ngram
listlengths
0
82k
[ "\"log_settings\": { \"save_in_file\": {'type':bool}, \"log_folder\": {'type':str}, \"log_filename\":{'type':str}, \"clear_old_log_file\": {'type':bool} }", ": DO NOT CHANGE !!!\"\"\" LOG_FORMAT = \"%(asctime)s: %(levelname)s: %(message)s\"", "str}, \"dss_filename\":{'type':str}, \"export_folder\":{'type':str}, \"start_time\":{'type':str}, \"end_time\":{'type':str}, \"simulation_time_step (minute)\":{'type':int}, \"frequency\": {'type':int,'options':[50,60]}, \"upper_voltage\":", "%(levelname)s: %(message)s\" DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\" MAXITERATIONS = 100 LIFE_PARAMETERS", "\"save_in_file\": False, \"log_filename\":\"\", \"clear_old_log_file\": True } } VALID_SETTINGS = {", "\"end_time\":{'type':str}, \"simulation_time_step (minute)\":{'type':int}, \"frequency\": {'type':int,'options':[50,60]}, \"upper_voltage\": {'type':float,'range':[1,1.5]}, \"lower_voltage\":{'type':float,'range':[0.8,1]}, \"record_every\": {'type':int},", "60, \"frequency\": 50, \"upper_voltage\": 1.1, \"lower_voltage\":0.9, \"record_every\": 4, \"parallel_simulation\":True, \"parallel_process\":", "False, \"log_filename\":\"\", \"clear_old_log_file\": True } } VALID_SETTINGS = { \"project_path\":{'type':str},", "\"enabled\": {'type':bool}, \"yarray\": {'type':list}, \"xarray\": {'type':list} }, \"log_settings\": { \"save_in_file\":", "{'type':list}, \"xarray\": {'type':list} }, \"log_settings\": { \"save_in_file\": {'type':bool}, \"log_folder\": {'type':str},", "%(message)s\" DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\" MAXITERATIONS = 100 LIFE_PARAMETERS =", "\"upper_voltage\": 1.1, \"lower_voltage\":0.9, \"record_every\": 4, \"parallel_simulation\":True, \"parallel_process\": 1, \"export_voltages\": False,", "{ \"dss_filepath\": \"\", \"dss_filename\":\"\", \"extra_data_path\": \".\", \"export_folder\":\"\", \"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-2-1", "\"%Y-%m-%d %H:%M:%S\" MAXITERATIONS = 100 LIFE_PARAMETERS = {\"theta_i\":30,\"theta_fl\":36,\"theta_gfl\":28.6, \"R\":4.87,\"n\":1,\"tau\":3.5,\"m\":1,\"A\":-13.391, \"B\":6972.15,\"num_of_iteration\":4,}", "\"record_every\": 4, \"parallel_simulation\":True, \"parallel_process\": 1, \"export_voltages\": False, \"export_lineloadings\": False, \"export_transloadings\":False,", "= \"%(asctime)s: %(levelname)s: %(message)s\" DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\" MAXITERATIONS =", "\"\", \"export_end_date\": \"\", \"volt_var\": { \"enabled\": True, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\":", "%H:%M:%S\" MAXITERATIONS = 100 LIFE_PARAMETERS = {\"theta_i\":30,\"theta_fl\":36,\"theta_gfl\":28.6, \"R\":4.87,\"n\":1,\"tau\":3.5,\"m\":1,\"A\":-13.391, \"B\":6972.15,\"num_of_iteration\":4,} DEFAULT_TEMP", "DEFAULT_TEMP = 25 MAX_TRANS_LOADING = 1.5 DEFAULT_CONFIGURATION = { \"dss_filepath\":", "\"lower_voltage\":0.9, \"record_every\": 4, \"parallel_simulation\":True, \"parallel_process\": 1, \"export_voltages\": False, \"export_lineloadings\": False,", "= { \"project_path\":{'type':str}, \"active_project\":{'type':str}, \"active_scenario\":{'type':str}, \"dss_filepath\": {'type': str}, \"dss_filename\":{'type':str}, \"export_folder\":{'type':str},", "\"active_project\":{'type':str}, \"active_scenario\":{'type':str}, \"dss_filepath\": {'type': str}, \"dss_filename\":{'type':str}, \"export_folder\":{'type':str}, \"start_time\":{'type':str}, \"end_time\":{'type':str}, \"simulation_time_step", "{\"theta_i\":30,\"theta_fl\":36,\"theta_gfl\":28.6, \"R\":4.87,\"n\":1,\"tau\":3.5,\"m\":1,\"A\":-13.391, \"B\":6972.15,\"num_of_iteration\":4,} DEFAULT_TEMP = 25 MAX_TRANS_LOADING = 1.5 DEFAULT_CONFIGURATION", "\"end_time\":\"2018-1-2 0:0:0\", \"simulation_time_step (minute)\": 60, \"frequency\": 50, \"upper_voltage\": 1.1, \"lower_voltage\":0.9,", "} } VALID_SETTINGS = { \"project_path\":{'type':str}, \"active_project\":{'type':str}, \"active_scenario\":{'type':str}, \"dss_filepath\": {'type':", "\"B\":6972.15,\"num_of_iteration\":4,} DEFAULT_TEMP = 25 MAX_TRANS_LOADING = 1.5 DEFAULT_CONFIGURATION = {", "(minute)\":{'type':int}, \"frequency\": {'type':int,'options':[50,60]}, \"upper_voltage\": {'type':float,'range':[1,1.5]}, \"lower_voltage\":{'type':float,'range':[0.8,1]}, \"record_every\": {'type':int}, \"extra_data_path\":{'type':str}, \"parallel_simulation\":{'type':bool},", "{ \"project_path\":{'type':str}, \"active_project\":{'type':str}, \"active_scenario\":{'type':str}, \"dss_filepath\": {'type': str}, \"dss_filename\":{'type':str}, \"export_folder\":{'type':str}, \"start_time\":{'type':str},", "{'type':float,'range':[1,1.5]}, \"lower_voltage\":{'type':float,'range':[0.8,1]}, \"record_every\": {'type':int}, \"extra_data_path\":{'type':str}, \"parallel_simulation\":{'type':bool}, \"parallel_process\": {'type':int,'range':[1,4]}, \"export_voltages\": {'type':bool},", "\"parallel_process\": {'type':int,'range':[1,4]}, \"export_voltages\": {'type':bool}, \"export_lineloadings\": {'type':bool}, \"export_transloadings\":{'type':bool}, \"export_start_date\": {'type':str}, \"export_end_date\":", "\"export_voltages\": False, \"export_lineloadings\": False, \"export_transloadings\":False, \"export_start_date\": \"\", \"export_end_date\": \"\", \"volt_var\":", "\"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\": { \"save_in_file\": False, \"log_folder\": \".\", \"log_filename\":\"logs.log\",", "DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\" MAXITERATIONS = 100 LIFE_PARAMETERS = {\"theta_i\":30,\"theta_fl\":36,\"theta_gfl\":28.6,", "\"dss_filepath\": {'type': str}, \"dss_filename\":{'type':str}, \"export_folder\":{'type':str}, \"start_time\":{'type':str}, \"end_time\":{'type':str}, \"simulation_time_step (minute)\":{'type':int}, \"frequency\":", "\"\", \"export_end_date\": \"\", \"volt_var\": { \"enabled\": False, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\":", "= 100 LIFE_PARAMETERS = {\"theta_i\":30,\"theta_fl\":36,\"theta_gfl\":28.6, \"R\":4.87,\"n\":1,\"tau\":3.5,\"m\":1,\"A\":-13.391, \"B\":6972.15,\"num_of_iteration\":4,} DEFAULT_TEMP = 25", "100 LIFE_PARAMETERS = {\"theta_i\":30,\"theta_fl\":36,\"theta_gfl\":28.6, \"R\":4.87,\"n\":1,\"tau\":3.5,\"m\":1,\"A\":-13.391, \"B\":6972.15,\"num_of_iteration\":4,} DEFAULT_TEMP = 25 MAX_TRANS_LOADING", "\"upper_voltage\": 1.1, \"lower_voltage\":0.9, \"record_every\": 96, \"export_voltages\": False, \"export_lineloadings\": False, \"export_transloadings\":False,", "\"start_time\":{'type':str}, \"end_time\":{'type':str}, \"simulation_time_step (minute)\":{'type':int}, \"frequency\": {'type':int,'options':[50,60]}, \"upper_voltage\": {'type':float,'range':[1,1.5]}, \"lower_voltage\":{'type':float,'range':[0.8,1]}, \"record_every\":", "\"R\":4.87,\"n\":1,\"tau\":3.5,\"m\":1,\"A\":-13.391, \"B\":6972.15,\"num_of_iteration\":4,} DEFAULT_TEMP = 25 MAX_TRANS_LOADING = 1.5 DEFAULT_CONFIGURATION =", "} DEFAULT_ADVANCED_CONFIGURATION = { \"project_path\": \"C:\\\\Users\\\\KDUWADI\\\\Desktop\\\\NREL_Projects\\\\CIFF-TANGEDCO\\\\TANGEDCO\\\\EMERGE\\\\Projects\", \"active_project\":\"GR_PALAYAM\", \"active_scenario\": \"FullYear\", \"dss_filename\":\"gr_palayam.dss\",", "\".\", \"export_folder\":\"\", \"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-2-1 0:0:0\", \"simulation_time_step (minute)\": 15, \"frequency\":", "\"\", \"dss_filename\":\"\", \"extra_data_path\": \".\", \"export_folder\":\"\", \"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-2-1 0:0:0\", \"simulation_time_step", "False, \"export_lineloadings\": False, \"export_transloadings\":False, \"export_start_date\": \"\", \"export_end_date\": \"\", \"volt_var\": {", "{ \"save_in_file\": False, \"log_folder\": \".\", \"log_filename\":\"logs.log\", \"clear_old_log_file\": True } }", "\"export_transloadings\":False, \"export_start_date\": \"\", \"export_end_date\": \"\", \"volt_var\": { \"enabled\": True, \"yarray\":", "\"log_settings\": { \"save_in_file\": False, \"log_filename\":\"\", \"clear_old_log_file\": True } } VALID_SETTINGS", "{ \"save_in_file\": False, \"log_filename\":\"\", \"clear_old_log_file\": True } } VALID_SETTINGS =", "0:0:0\", \"end_time\":\"2018-1-2 0:0:0\", \"simulation_time_step (minute)\": 60, \"frequency\": 50, \"upper_voltage\": 1.1,", "\"log_settings\": { \"save_in_file\": False, \"log_folder\": \".\", \"log_filename\":\"logs.log\", \"clear_old_log_file\": True }", "LOG_FORMAT = \"%(asctime)s: %(levelname)s: %(message)s\" DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\" MAXITERATIONS", "50, \"upper_voltage\": 1.1, \"lower_voltage\":0.9, \"record_every\": 96, \"export_voltages\": False, \"export_lineloadings\": False,", "CHANGE !!!\"\"\" LOG_FORMAT = \"%(asctime)s: %(levelname)s: %(message)s\" DATE_FORMAT = \"%Y-%m-%d", "} } DEFAULT_ADVANCED_CONFIGURATION = { \"project_path\": \"C:\\\\Users\\\\KDUWADI\\\\Desktop\\\\NREL_Projects\\\\CIFF-TANGEDCO\\\\TANGEDCO\\\\EMERGE\\\\Projects\", \"active_project\":\"GR_PALAYAM\", \"active_scenario\": \"FullYear\",", "[0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\": { \"save_in_file\": False, \"log_folder\": \".\",", "\"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-1-2 0:0:0\", \"simulation_time_step (minute)\": 60, \"frequency\": 50, \"upper_voltage\":", "(minute)\": 60, \"frequency\": 50, \"upper_voltage\": 1.1, \"lower_voltage\":0.9, \"record_every\": 4, \"parallel_simulation\":True,", "\"parallel_simulation\":{'type':bool}, \"parallel_process\": {'type':int,'range':[1,4]}, \"export_voltages\": {'type':bool}, \"export_lineloadings\": {'type':bool}, \"export_transloadings\":{'type':bool}, \"export_start_date\": {'type':str},", "\"simulation_time_step (minute)\": 15, \"frequency\": 50, \"upper_voltage\": 1.1, \"lower_voltage\":0.9, \"record_every\": 96,", "\"enabled\": False, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\": { \"save_in_file\":", "\"xarray\": {'type':list} }, \"log_settings\": { \"save_in_file\": {'type':bool}, \"log_folder\": {'type':str}, \"log_filename\":{'type':str},", "1, \"export_voltages\": False, \"export_lineloadings\": False, \"export_transloadings\":False, \"export_start_date\": \"\", \"export_end_date\": \"\",", "\"C:\\\\Users\\\\KDUWADI\\\\Desktop\\\\NREL_Projects\\\\CIFF-TANGEDCO\\\\TANGEDCO\\\\EMERGE\\\\Projects\", \"active_project\":\"GR_PALAYAM\", \"active_scenario\": \"FullYear\", \"dss_filename\":\"gr_palayam.dss\", \"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-1-2 0:0:0\", \"simulation_time_step", "0:0:0\", \"simulation_time_step (minute)\": 60, \"frequency\": 50, \"upper_voltage\": 1.1, \"lower_voltage\":0.9, \"record_every\":", "\"\", \"volt_var\": { \"enabled\": False, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] },", "\"export_end_date\": \"\", \"volt_var\": { \"enabled\": True, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3]", "{'type':list} }, \"log_settings\": { \"save_in_file\": {'type':bool}, \"log_folder\": {'type':str}, \"log_filename\":{'type':str}, \"clear_old_log_file\":", "{'type':bool}, \"export_lineloadings\": {'type':bool}, \"export_transloadings\":{'type':bool}, \"export_start_date\": {'type':str}, \"export_end_date\": {'type':str}, \"volt_var\": {", "\"simulation_time_step (minute)\": 60, \"frequency\": 50, \"upper_voltage\": 1.1, \"lower_voltage\":0.9, \"record_every\": 4,", "{ \"enabled\": {'type':bool}, \"yarray\": {'type':list}, \"xarray\": {'type':list} }, \"log_settings\": {", "{ \"save_in_file\": {'type':bool}, \"log_folder\": {'type':str}, \"log_filename\":{'type':str}, \"clear_old_log_file\": {'type':bool} } }", "\"upper_voltage\": {'type':float,'range':[1,1.5]}, \"lower_voltage\":{'type':float,'range':[0.8,1]}, \"record_every\": {'type':int}, \"extra_data_path\":{'type':str}, \"parallel_simulation\":{'type':bool}, \"parallel_process\": {'type':int,'range':[1,4]}, \"export_voltages\":", "96, \"export_voltages\": False, \"export_lineloadings\": False, \"export_transloadings\":False, \"export_start_date\": \"\", \"export_end_date\": \"\",", "\"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\": { \"save_in_file\": False, \"log_filename\":\"\",", "25 MAX_TRANS_LOADING = 1.5 DEFAULT_CONFIGURATION = { \"dss_filepath\": \"\", \"dss_filename\":\"\",", "NOT CHANGE !!!\"\"\" LOG_FORMAT = \"%(asctime)s: %(levelname)s: %(message)s\" DATE_FORMAT =", "{'type':bool}, \"yarray\": {'type':list}, \"xarray\": {'type':list} }, \"log_settings\": { \"save_in_file\": {'type':bool},", "= 1.5 DEFAULT_CONFIGURATION = { \"dss_filepath\": \"\", \"dss_filename\":\"\", \"extra_data_path\": \".\",", "\"dss_filepath\": \"\", \"dss_filename\":\"\", \"extra_data_path\": \".\", \"export_folder\":\"\", \"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-2-1 0:0:0\",", "{'type': str}, \"dss_filename\":{'type':str}, \"export_folder\":{'type':str}, \"start_time\":{'type':str}, \"end_time\":{'type':str}, \"simulation_time_step (minute)\":{'type':int}, \"frequency\": {'type':int,'options':[50,60]},", "\"export_start_date\": {'type':str}, \"export_end_date\": {'type':str}, \"volt_var\": { \"enabled\": {'type':bool}, \"yarray\": {'type':list},", "\"FullYear\", \"dss_filename\":\"gr_palayam.dss\", \"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-1-2 0:0:0\", \"simulation_time_step (minute)\": 60, \"frequency\":", "False, \"export_transloadings\":False, \"export_start_date\": \"\", \"export_end_date\": \"\", \"volt_var\": { \"enabled\": True,", "MAXITERATIONS = 100 LIFE_PARAMETERS = {\"theta_i\":30,\"theta_fl\":36,\"theta_gfl\":28.6, \"R\":4.87,\"n\":1,\"tau\":3.5,\"m\":1,\"A\":-13.391, \"B\":6972.15,\"num_of_iteration\":4,} DEFAULT_TEMP =", "DEFAULT_ADVANCED_CONFIGURATION = { \"project_path\": \"C:\\\\Users\\\\KDUWADI\\\\Desktop\\\\NREL_Projects\\\\CIFF-TANGEDCO\\\\TANGEDCO\\\\EMERGE\\\\Projects\", \"active_project\":\"GR_PALAYAM\", \"active_scenario\": \"FullYear\", \"dss_filename\":\"gr_palayam.dss\", \"start_time\":\"2018-1-1", "= 25 MAX_TRANS_LOADING = 1.5 DEFAULT_CONFIGURATION = { \"dss_filepath\": \"\",", "\"active_scenario\":{'type':str}, \"dss_filepath\": {'type': str}, \"dss_filename\":{'type':str}, \"export_folder\":{'type':str}, \"start_time\":{'type':str}, \"end_time\":{'type':str}, \"simulation_time_step (minute)\":{'type':int},", "\"lower_voltage\":0.9, \"record_every\": 96, \"export_voltages\": False, \"export_lineloadings\": False, \"export_transloadings\":False, \"export_start_date\": \"\",", "\"end_time\":\"2018-2-1 0:0:0\", \"simulation_time_step (minute)\": 15, \"frequency\": 50, \"upper_voltage\": 1.1, \"lower_voltage\":0.9,", "\"active_scenario\": \"FullYear\", \"dss_filename\":\"gr_palayam.dss\", \"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-1-2 0:0:0\", \"simulation_time_step (minute)\": 60,", "= {\"theta_i\":30,\"theta_fl\":36,\"theta_gfl\":28.6, \"R\":4.87,\"n\":1,\"tau\":3.5,\"m\":1,\"A\":-13.391, \"B\":6972.15,\"num_of_iteration\":4,} DEFAULT_TEMP = 25 MAX_TRANS_LOADING = 1.5", "True, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\": { \"save_in_file\": False,", "\"frequency\": 50, \"upper_voltage\": 1.1, \"lower_voltage\":0.9, \"record_every\": 4, \"parallel_simulation\":True, \"parallel_process\": 1,", "\"export_folder\":\"\", \"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-2-1 0:0:0\", \"simulation_time_step (minute)\": 15, \"frequency\": 50,", "False, \"log_folder\": \".\", \"log_filename\":\"logs.log\", \"clear_old_log_file\": True } } DEFAULT_ADVANCED_CONFIGURATION =", "}, \"log_settings\": { \"save_in_file\": {'type':bool}, \"log_folder\": {'type':str}, \"log_filename\":{'type':str}, \"clear_old_log_file\": {'type':bool}", "[0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\": { \"save_in_file\": False, \"log_filename\":\"\", \"clear_old_log_file\": True }", "\"\", \"volt_var\": { \"enabled\": True, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] },", "True } } VALID_SETTINGS = { \"project_path\":{'type':str}, \"active_project\":{'type':str}, \"active_scenario\":{'type':str}, \"dss_filepath\":", "\"\"\" Default values : DO NOT CHANGE !!!\"\"\" LOG_FORMAT =", "}, \"log_settings\": { \"save_in_file\": False, \"log_filename\":\"\", \"clear_old_log_file\": True } }", "{'type':int}, \"extra_data_path\":{'type':str}, \"parallel_simulation\":{'type':bool}, \"parallel_process\": {'type':int,'range':[1,4]}, \"export_voltages\": {'type':bool}, \"export_lineloadings\": {'type':bool}, \"export_transloadings\":{'type':bool},", "\"export_lineloadings\": False, \"export_transloadings\":False, \"export_start_date\": \"\", \"export_end_date\": \"\", \"volt_var\": { \"enabled\":", "\"extra_data_path\": \".\", \"export_folder\":\"\", \"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-2-1 0:0:0\", \"simulation_time_step (minute)\": 15,", "\"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\": { \"save_in_file\": False, \"log_folder\":", "\"frequency\": {'type':int,'options':[50,60]}, \"upper_voltage\": {'type':float,'range':[1,1.5]}, \"lower_voltage\":{'type':float,'range':[0.8,1]}, \"record_every\": {'type':int}, \"extra_data_path\":{'type':str}, \"parallel_simulation\":{'type':bool}, \"parallel_process\":", "MAX_TRANS_LOADING = 1.5 DEFAULT_CONFIGURATION = { \"dss_filepath\": \"\", \"dss_filename\":\"\", \"extra_data_path\":", "{'type':bool}, \"export_transloadings\":{'type':bool}, \"export_start_date\": {'type':str}, \"export_end_date\": {'type':str}, \"volt_var\": { \"enabled\": {'type':bool},", "\"parallel_simulation\":True, \"parallel_process\": 1, \"export_voltages\": False, \"export_lineloadings\": False, \"export_transloadings\":False, \"export_start_date\": \"\",", "!!!\"\"\" LOG_FORMAT = \"%(asctime)s: %(levelname)s: %(message)s\" DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\"", "\"yarray\": {'type':list}, \"xarray\": {'type':list} }, \"log_settings\": { \"save_in_file\": {'type':bool}, \"log_folder\":", "values : DO NOT CHANGE !!!\"\"\" LOG_FORMAT = \"%(asctime)s: %(levelname)s:", "\"dss_filename\":\"gr_palayam.dss\", \"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-1-2 0:0:0\", \"simulation_time_step (minute)\": 60, \"frequency\": 50,", "\"lower_voltage\":{'type':float,'range':[0.8,1]}, \"record_every\": {'type':int}, \"extra_data_path\":{'type':str}, \"parallel_simulation\":{'type':bool}, \"parallel_process\": {'type':int,'range':[1,4]}, \"export_voltages\": {'type':bool}, \"export_lineloadings\":", "= { \"dss_filepath\": \"\", \"dss_filename\":\"\", \"extra_data_path\": \".\", \"export_folder\":\"\", \"start_time\":\"2018-1-1 0:0:0\",", "\"parallel_process\": 1, \"export_voltages\": False, \"export_lineloadings\": False, \"export_transloadings\":False, \"export_start_date\": \"\", \"export_end_date\":", "} VALID_SETTINGS = { \"project_path\":{'type':str}, \"active_project\":{'type':str}, \"active_scenario\":{'type':str}, \"dss_filepath\": {'type': str},", "DO NOT CHANGE !!!\"\"\" LOG_FORMAT = \"%(asctime)s: %(levelname)s: %(message)s\" DATE_FORMAT", "\"clear_old_log_file\": True } } DEFAULT_ADVANCED_CONFIGURATION = { \"project_path\": \"C:\\\\Users\\\\KDUWADI\\\\Desktop\\\\NREL_Projects\\\\CIFF-TANGEDCO\\\\TANGEDCO\\\\EMERGE\\\\Projects\", \"active_project\":\"GR_PALAYAM\",", "\"clear_old_log_file\": True } } VALID_SETTINGS = { \"project_path\":{'type':str}, \"active_project\":{'type':str}, \"active_scenario\":{'type':str},", "\"export_folder\":{'type':str}, \"start_time\":{'type':str}, \"end_time\":{'type':str}, \"simulation_time_step (minute)\":{'type':int}, \"frequency\": {'type':int,'options':[50,60]}, \"upper_voltage\": {'type':float,'range':[1,1.5]}, \"lower_voltage\":{'type':float,'range':[0.8,1]},", "\"volt_var\": { \"enabled\": {'type':bool}, \"yarray\": {'type':list}, \"xarray\": {'type':list} }, \"log_settings\":", "\"frequency\": 50, \"upper_voltage\": 1.1, \"lower_voltage\":0.9, \"record_every\": 96, \"export_voltages\": False, \"export_lineloadings\":", "\"export_end_date\": \"\", \"volt_var\": { \"enabled\": False, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3]", "True } } DEFAULT_ADVANCED_CONFIGURATION = { \"project_path\": \"C:\\\\Users\\\\KDUWADI\\\\Desktop\\\\NREL_Projects\\\\CIFF-TANGEDCO\\\\TANGEDCO\\\\EMERGE\\\\Projects\", \"active_project\":\"GR_PALAYAM\", \"active_scenario\":", "\"enabled\": True, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\": { \"save_in_file\":", "{'type':str}, \"volt_var\": { \"enabled\": {'type':bool}, \"yarray\": {'type':list}, \"xarray\": {'type':list} },", "0:0:0\", \"simulation_time_step (minute)\": 15, \"frequency\": 50, \"upper_voltage\": 1.1, \"lower_voltage\":0.9, \"record_every\":", "<reponame>NREL/EMeRGE \"\"\" Default values : DO NOT CHANGE !!!\"\"\" LOG_FORMAT", "4, \"parallel_simulation\":True, \"parallel_process\": 1, \"export_voltages\": False, \"export_lineloadings\": False, \"export_transloadings\":False, \"export_start_date\":", "= { \"project_path\": \"C:\\\\Users\\\\KDUWADI\\\\Desktop\\\\NREL_Projects\\\\CIFF-TANGEDCO\\\\TANGEDCO\\\\EMERGE\\\\Projects\", \"active_project\":\"GR_PALAYAM\", \"active_scenario\": \"FullYear\", \"dss_filename\":\"gr_palayam.dss\", \"start_time\":\"2018-1-1 0:0:0\",", "1.1, \"lower_voltage\":0.9, \"record_every\": 96, \"export_voltages\": False, \"export_lineloadings\": False, \"export_transloadings\":False, \"export_start_date\":", "\"%(asctime)s: %(levelname)s: %(message)s\" DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\" MAXITERATIONS = 100", "\"log_folder\": \".\", \"log_filename\":\"logs.log\", \"clear_old_log_file\": True } } DEFAULT_ADVANCED_CONFIGURATION = {", "\"record_every\": {'type':int}, \"extra_data_path\":{'type':str}, \"parallel_simulation\":{'type':bool}, \"parallel_process\": {'type':int,'range':[1,4]}, \"export_voltages\": {'type':bool}, \"export_lineloadings\": {'type':bool},", "\".\", \"log_filename\":\"logs.log\", \"clear_old_log_file\": True } } DEFAULT_ADVANCED_CONFIGURATION = { \"project_path\":", "1.5 DEFAULT_CONFIGURATION = { \"dss_filepath\": \"\", \"dss_filename\":\"\", \"extra_data_path\": \".\", \"export_folder\":\"\",", "\"export_end_date\": {'type':str}, \"volt_var\": { \"enabled\": {'type':bool}, \"yarray\": {'type':list}, \"xarray\": {'type':list}", "(minute)\": 15, \"frequency\": 50, \"upper_voltage\": 1.1, \"lower_voltage\":0.9, \"record_every\": 96, \"export_voltages\":", "\"export_start_date\": \"\", \"export_end_date\": \"\", \"volt_var\": { \"enabled\": True, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44],", "\"export_transloadings\":False, \"export_start_date\": \"\", \"export_end_date\": \"\", \"volt_var\": { \"enabled\": False, \"yarray\":", "\"record_every\": 96, \"export_voltages\": False, \"export_lineloadings\": False, \"export_transloadings\":False, \"export_start_date\": \"\", \"export_end_date\":", "\"volt_var\": { \"enabled\": True, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\":", "{ \"enabled\": True, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\": {", "= \"%Y-%m-%d %H:%M:%S\" MAXITERATIONS = 100 LIFE_PARAMETERS = {\"theta_i\":30,\"theta_fl\":36,\"theta_gfl\":28.6, \"R\":4.87,\"n\":1,\"tau\":3.5,\"m\":1,\"A\":-13.391,", "50, \"upper_voltage\": 1.1, \"lower_voltage\":0.9, \"record_every\": 4, \"parallel_simulation\":True, \"parallel_process\": 1, \"export_voltages\":", "DEFAULT_CONFIGURATION = { \"dss_filepath\": \"\", \"dss_filename\":\"\", \"extra_data_path\": \".\", \"export_folder\":\"\", \"start_time\":\"2018-1-1", "\"dss_filename\":{'type':str}, \"export_folder\":{'type':str}, \"start_time\":{'type':str}, \"end_time\":{'type':str}, \"simulation_time_step (minute)\":{'type':int}, \"frequency\": {'type':int,'options':[50,60]}, \"upper_voltage\": {'type':float,'range':[1,1.5]},", "[0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\": { \"save_in_file\": False, \"log_folder\": \".\", \"log_filename\":\"logs.log\", \"clear_old_log_file\":", "{ \"project_path\": \"C:\\\\Users\\\\KDUWADI\\\\Desktop\\\\NREL_Projects\\\\CIFF-TANGEDCO\\\\TANGEDCO\\\\EMERGE\\\\Projects\", \"active_project\":\"GR_PALAYAM\", \"active_scenario\": \"FullYear\", \"dss_filename\":\"gr_palayam.dss\", \"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-1-2", "\"project_path\": \"C:\\\\Users\\\\KDUWADI\\\\Desktop\\\\NREL_Projects\\\\CIFF-TANGEDCO\\\\TANGEDCO\\\\EMERGE\\\\Projects\", \"active_project\":\"GR_PALAYAM\", \"active_scenario\": \"FullYear\", \"dss_filename\":\"gr_palayam.dss\", \"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-1-2 0:0:0\",", "[0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\": { \"save_in_file\": False, \"log_filename\":\"\", \"clear_old_log_file\":", "\"export_voltages\": {'type':bool}, \"export_lineloadings\": {'type':bool}, \"export_transloadings\":{'type':bool}, \"export_start_date\": {'type':str}, \"export_end_date\": {'type':str}, \"volt_var\":", "False, \"export_transloadings\":False, \"export_start_date\": \"\", \"export_end_date\": \"\", \"volt_var\": { \"enabled\": False,", "}, \"log_settings\": { \"save_in_file\": False, \"log_folder\": \".\", \"log_filename\":\"logs.log\", \"clear_old_log_file\": True", "\"log_filename\":\"logs.log\", \"clear_old_log_file\": True } } DEFAULT_ADVANCED_CONFIGURATION = { \"project_path\": \"C:\\\\Users\\\\KDUWADI\\\\Desktop\\\\NREL_Projects\\\\CIFF-TANGEDCO\\\\TANGEDCO\\\\EMERGE\\\\Projects\",", "\"volt_var\": { \"enabled\": False, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\":", "{'type':int,'range':[1,4]}, \"export_voltages\": {'type':bool}, \"export_lineloadings\": {'type':bool}, \"export_transloadings\":{'type':bool}, \"export_start_date\": {'type':str}, \"export_end_date\": {'type':str},", "\"export_start_date\": \"\", \"export_end_date\": \"\", \"volt_var\": { \"enabled\": False, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44],", "\"simulation_time_step (minute)\":{'type':int}, \"frequency\": {'type':int,'options':[50,60]}, \"upper_voltage\": {'type':float,'range':[1,1.5]}, \"lower_voltage\":{'type':float,'range':[0.8,1]}, \"record_every\": {'type':int}, \"extra_data_path\":{'type':str},", "{'type':str}, \"export_end_date\": {'type':str}, \"volt_var\": { \"enabled\": {'type':bool}, \"yarray\": {'type':list}, \"xarray\":", "\"project_path\":{'type':str}, \"active_project\":{'type':str}, \"active_scenario\":{'type':str}, \"dss_filepath\": {'type': str}, \"dss_filename\":{'type':str}, \"export_folder\":{'type':str}, \"start_time\":{'type':str}, \"end_time\":{'type':str},", "{ \"enabled\": False, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\": {", "LIFE_PARAMETERS = {\"theta_i\":30,\"theta_fl\":36,\"theta_gfl\":28.6, \"R\":4.87,\"n\":1,\"tau\":3.5,\"m\":1,\"A\":-13.391, \"B\":6972.15,\"num_of_iteration\":4,} DEFAULT_TEMP = 25 MAX_TRANS_LOADING =", "Default values : DO NOT CHANGE !!!\"\"\" LOG_FORMAT = \"%(asctime)s:", "False, \"yarray\": [0.44,0.44,0,0,-0.44,-0.44], \"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\": { \"save_in_file\": False,", "\"xarray\": [0.7,0.90,0.95,1.05,1.10,1.3] }, \"log_settings\": { \"save_in_file\": False, \"log_filename\":\"\", \"clear_old_log_file\": True", "{'type':int,'options':[50,60]}, \"upper_voltage\": {'type':float,'range':[1,1.5]}, \"lower_voltage\":{'type':float,'range':[0.8,1]}, \"record_every\": {'type':int}, \"extra_data_path\":{'type':str}, \"parallel_simulation\":{'type':bool}, \"parallel_process\": {'type':int,'range':[1,4]},", "\"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-2-1 0:0:0\", \"simulation_time_step (minute)\": 15, \"frequency\": 50, \"upper_voltage\":", "\"save_in_file\": False, \"log_folder\": \".\", \"log_filename\":\"logs.log\", \"clear_old_log_file\": True } } DEFAULT_ADVANCED_CONFIGURATION", "\"log_filename\":\"\", \"clear_old_log_file\": True } } VALID_SETTINGS = { \"project_path\":{'type':str}, \"active_project\":{'type':str},", "\"extra_data_path\":{'type':str}, \"parallel_simulation\":{'type':bool}, \"parallel_process\": {'type':int,'range':[1,4]}, \"export_voltages\": {'type':bool}, \"export_lineloadings\": {'type':bool}, \"export_transloadings\":{'type':bool}, \"export_start_date\":", "\"export_transloadings\":{'type':bool}, \"export_start_date\": {'type':str}, \"export_end_date\": {'type':str}, \"volt_var\": { \"enabled\": {'type':bool}, \"yarray\":", "15, \"frequency\": 50, \"upper_voltage\": 1.1, \"lower_voltage\":0.9, \"record_every\": 96, \"export_voltages\": False,", "\"active_project\":\"GR_PALAYAM\", \"active_scenario\": \"FullYear\", \"dss_filename\":\"gr_palayam.dss\", \"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-1-2 0:0:0\", \"simulation_time_step (minute)\":", "0:0:0\", \"end_time\":\"2018-2-1 0:0:0\", \"simulation_time_step (minute)\": 15, \"frequency\": 50, \"upper_voltage\": 1.1,", "\"dss_filename\":\"\", \"extra_data_path\": \".\", \"export_folder\":\"\", \"start_time\":\"2018-1-1 0:0:0\", \"end_time\":\"2018-2-1 0:0:0\", \"simulation_time_step (minute)\":", "\"export_lineloadings\": {'type':bool}, \"export_transloadings\":{'type':bool}, \"export_start_date\": {'type':str}, \"export_end_date\": {'type':str}, \"volt_var\": { \"enabled\":", "VALID_SETTINGS = { \"project_path\":{'type':str}, \"active_project\":{'type':str}, \"active_scenario\":{'type':str}, \"dss_filepath\": {'type': str}, \"dss_filename\":{'type':str},", "1.1, \"lower_voltage\":0.9, \"record_every\": 4, \"parallel_simulation\":True, \"parallel_process\": 1, \"export_voltages\": False, \"export_lineloadings\":" ]
[ "numpy as np from copy import copy import colorsys import", "== 1 flaged = observation == 2 observation += self.impact_size", "= max(1, int(bombs_density * self.grid_size)) if n_bombs is None else", "done, {'passed':False} elif case_content == NO_BOMBS_AROUND: self.reveal_around(coords, reward, done) elif", "# Plot infos ## Score score_text = self.score_font.render(\"SCORE\", 1, (255,", "(0.1*self.header_size, 0.45*self.width)) self.window.blit(potential_bombs_left, (0.1*self.header_size, 0.5*self.width)) pygame.display.flip() pygame.time.wait(10) if self.done: pygame.time.wait(3000)", "self.pygame_is_init = True for event in pygame.event.get(): if event.type ==", "self.semi_impact_size + 1) + self.impact_size return x_min, x_max, dx_min, dx_max", "self.num_font.render(str(int(self.time_left+1)), 1, (255, 10, 10)) self.window.blit(time_text, (0.1*self.header_size, 0.03*self.width)) self.window.blit(time_left, (0.1*self.header_size,", "in bombs_ids: bomb_x, bomb_y = idx[0][bombs_id], idx[1][bombs_id] x_min, x_max, dx_min,", "pygame.transform import scale class MinesweeperEnv(Env): def __init__(self, grid_shape=(10, 15), bombs_density=0.1,", "np.array(colorsys.hsv_to_rgb(HUE, 1, 0.7)) return color def _plot_block(self, index, state): position", "+ (0,)] NO_BOMBS_AROUND = 0 reward, done = 0, False", "done, {'passed':False} def reset(self): self.__init__(self.grid_shape, n_bombs=self.n_bombs, impact_size=self.impact_size, max_time=self.max_time, chicken=self.chicken) return", "0 reward, done = 0, False self.time_left = self.max_time -", "if not self.chicken else 'chicken' else: img_key = 'exploded_mine' if", "100 self.origin = np.array([self.header_size, 0]) self.width = int(self.scale_factor * self.BLOCK_SIZE", "(255, 255, 10)) self.window.blit(bombs_text, (0.1*self.header_size, 0.4*self.width)) self.window.blit(left_text, (0.1*self.header_size, 0.45*self.width)) self.window.blit(potential_bombs_left,", "if np.any(np.logical_and(self.state[..., 0]==9, self.state[..., 1]==1)) or self.done: reward, done =", "for bombs_id in bombs_ids: bomb_x, bomb_y = idx[0][bombs_id], idx[1][bombs_id] x_min,", "= 1 unflagged_bombs_around = np.logical_and(region[..., 0] == self.BOMB, region[..., 1]", "* 100 self.origin = np.array([self.header_size, 0]) self.width = int(self.scale_factor *", "= -1, True return self.get_observation(), reward, done, {'passed':False} elif case_content", "+= ['chicken', 'exploded_chicken', 'disabled_chicken'] else: images_names += ['mine', 'exploded_mine', 'disabled_mine']", "+ (0,)] = self.BOMB self.start_time = time() self.time_left = int(time()", "- (content > 9) * self.decimal_font_offset) def _init_pygame(self): pygame.init() #", "self._init_pygame() self.pygame_is_init = True for event in pygame.event.get(): if event.type", "font for numbers num_font_size = 20 self.num_font = pygame.font.SysFont(\"monospace\", int(self.scale_factor", "self.flaged_empty)/self.n_bombs, True self.score += reward return self.get_observation(), reward, done, {'passed':False}", "+ self.font_offset - (content > 9) * self.decimal_font_offset) def _init_pygame(self):", "= np.ones((self.impact_size, self.impact_size), dtype=np.uint8) for bombs_id in bombs_ids: bomb_x, bomb_y", "+ self.semi_impact_size + 1) dx_min, dx_max = x_min - (x", "self.grid_size = np.prod(grid_shape) self.n_bombs = max(1, int(bombs_density * self.grid_size)) if", "reward, done, without_loss=True) self.state[x_min:x_max, y_min:y_max, 1][self.state[x_min:x_max, y_min:y_max, 1] != self.FLAG]", "= np.logical_and(region[..., 0] == self.BOMB, region[..., 1] != self.FLAG) if", "= self.scale_factor * 100 self.origin = np.array([self.header_size, 0]) self.width =", "= 2 self.BOMB = self.impact_size ** 2 # Setting up", "{'passed':False} def reset(self): self.__init__(self.grid_shape, n_bombs=self.n_bombs, impact_size=self.impact_size, max_time=self.max_time, chicken=self.chicken) return self.get_observation()", "action): coords = action[:2] action_type = action[2] + 1 #", "= min(self.grid_size - 1, self.n_bombs) self.flaged_bombs = 0 self.flaged_empty =", "reward, done = 2 + self.time_left/self.max_time, True if np.any(np.logical_and(self.state[..., 0]==9,", "self.num_font.render(\"LEFT\", 1, (255, 255, 10)) potential_bombs_left = self.n_bombs - self.flaged_bombs", "elif case_state == self.REVEAL: self.reveal_around(coords, reward, done) reward -= 0.01", "'exploded_chicken' else: img_key = 'revealed' label = self.num_font.render(str(content), 1, self._get_color(content,", "= self.state[coords + (1,)] case_content = self.state[coords + (0,)] NO_BOMBS_AROUND", "# Initalize state self.state = np.zeros(self.grid_shape + (2,), dtype=np.uint8) ##", "* np.array(colorsys.hsv_to_rgb(HUE, 1, 0.7)) return color def _plot_block(self, index, state):", "Time left time_text = self.num_font.render(\"TIME\", 1, (255, 10, 10)) self.time_left", "self._get_color(content, self.BOMB)) self.window.blit(self.images[img_key], position) if label: self.window.blit(label, position + self.font_offset", "return flags_around = np.sum(region[..., 1] == 2) if flags_around ==", "not self.chicken else 'disabled_chicken' else: img_key = 'misplaced_flag' else: content", "__init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False): self.grid_shape =", "from pygame.transform import scale class MinesweeperEnv(Env): def __init__(self, grid_shape=(10, 15),", "10)) self.time_left = self.max_time - time() + self.start_time time_left =", "max_n)**3 color = 255 * np.array(colorsys.hsv_to_rgb(HUE, 1, 0.7)) return color", "(0.1*self.header_size, 0.03*self.width)) self.window.blit(time_left, (0.1*self.header_size, 0.1*self.width)) ## Bombs left bombs_text =", "self.window.blit(score, (0.1*self.header_size, 0.8*self.width)) ## Time left time_text = self.num_font.render(\"TIME\", 1,", "Env, spaces from time import time import numpy as np", "self.start_time = time() self.time_left = int(time() - self.start_time) # Setup", "case_state == self.HIDDEN: self.state[coords + (1,)] = action_type if case_content", "= self.HIDDEN else: self.state[coords + (1,)] = self.FLAG if case_content", "self.get_observation() def render(self): if not self.pygame_is_init: self._init_pygame() self.pygame_is_init = True", "position) if label: self.window.blit(label, position + self.font_offset - (content >", "int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[0]) self.height = int(self.scale_factor * self.BLOCK_SIZE", "int(scale_factor*img.get_height()))) images_names = ['hidden', 'revealed', 'flag', 'misplaced_flag'] if self.chicken: images_names", "0.8*self.width)) ## Time left time_text = self.num_font.render(\"TIME\", 1, (255, 10,", "1 self.reveal_around(coord, reward, done, without_loss=True) self.state[x_min:x_max, y_min:y_max, 1][self.state[x_min:x_max, y_min:y_max, 1]", "self.done = True reward, done = -1, True else: if", "255 * np.array(colorsys.hsv_to_rgb(HUE, 1, 0.7)) return color def _plot_block(self, index,", "step(self, action): coords = action[:2] action_type = action[2] + 1", "self.window.blit(time_text, (0.1*self.header_size, 0.03*self.width)) self.window.blit(time_left, (0.1*self.header_size, 0.1*self.width)) ## Bombs left bombs_text", "== NO_BOMBS_AROUND: self.reveal_around(coords, reward, done) elif case_state == self.REVEAL: self.reveal_around(coords,", "Load images def scale_image(img, scale_factor=self.scale_factor): return scale(img, (int(scale_factor*img.get_width()), int(scale_factor*img.get_height()))) images_names", "coords = action[:2] action_type = action[2] + 1 # 0", "Place bombs self.state[self.bombs_positions + (0,)] = self.BOMB self.start_time = time()", "self.num_font.render(str(int(potential_bombs_left)), 1, (255, 255, 10)) self.window.blit(bombs_text, (0.1*self.header_size, 0.4*self.width)) self.window.blit(left_text, (0.1*self.header_size,", "-> 2 = toggle_flag case_state = self.state[coords + (1,)] case_content", "elif case_content == NO_BOMBS_AROUND: self.reveal_around(coords, reward, done) elif case_state ==", "2 + self.time_left/self.max_time, True if np.any(np.logical_and(self.state[..., 0]==9, self.state[..., 1]==1)) or", "for img_name in images_names: with pkg_resources.path(images, img_name + '.png') as", "if label: self.window.blit(label, position + self.font_offset - (content > 9)", "pygame.display.set_mode((self.height, self.width)) # Setup font for numbers num_font_size = 20", "10)) self.window.blit(score_text, (0.1*self.header_size, 0.75*self.width)) self.window.blit(score, (0.1*self.header_size, 0.8*self.width)) ## Time left", "self.decimal_font_offset) def _init_pygame(self): pygame.init() # pylint: disable=E1101 # Open Pygame", "_get_color(n, max_n): BLUE_HUE = 0.6 RED_HUE = 0.0 HUE =", "self.state[x_min:x_max, y_min:y_max, 0] bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max] ## Place bombs", "= self.FLAG if case_content == self.BOMB: self.flaged_bombs += flaging else:", "= np.prod(grid_shape) self.n_bombs = max(1, int(bombs_density * self.grid_size)) if n_bombs", "'flag', 'misplaced_flag'] if self.chicken: images_names += ['chicken', 'exploded_chicken', 'disabled_chicken'] else:", "chicken=False): self.grid_shape = grid_shape self.grid_size = np.prod(grid_shape) self.n_bombs = max(1,", "done) reward -= 0.01 else: reward -= 0.001 self.score +=", "10)) potential_bombs_left = self.n_bombs - self.flaged_bombs - self.flaged_empty potential_bombs_left =", "- (x - self.semi_impact_size), x_max - (x + self.semi_impact_size +", "action[2] + 1 # 0 -> 1 = reveal; 1", "copy import copy import colorsys import pygame from pygame.transform import", "self.origin = np.array([self.header_size, 0]) self.width = int(self.scale_factor * self.BLOCK_SIZE *", "12)) # Load images def scale_image(img, scale_factor=self.scale_factor): return scale(img, (int(scale_factor*img.get_width()),", "if n_bombs is None else n_bombs self.n_bombs = min(self.grid_size -", "observation += self.impact_size ** 2 + 1 observation[revealed] = copy(self.state[:,", "0][revealed]) observation[flaged] -= 1 return observation def reveal_around(self, coords, reward,", "1 if case_state == self.FLAG: flaging = -1 self.state[coords +", "(255, 255, 10)) potential_bombs_left = self.n_bombs - self.flaged_bombs - self.flaged_empty", "raise ValueError('Impact_size must be an odd number !') self.impact_size =", "Try backported to PY<37 `importlib_resources`. import importlib_resources as pkg_resources from", "size=self.n_bombs, replace=False) self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids] ## Place numbers self.semi_impact_size", "+ (1,)] = self.FLAG if case_content == self.BOMB: self.flaged_bombs +=", "= self.n_bombs - self.flaged_bombs - self.flaged_empty potential_bombs_left = self.num_font.render(str(int(potential_bombs_left)), 1,", "(self.BOMB + 2) * np.ones(self.grid_shape) self.observation_space = spaces.MultiDiscrete(nvec_observation) nvec_action =", "+= bomb_impact[dx_min:dx_max, dy_min:dy_max] ## Place bombs self.state[self.bombs_positions + (0,)] =", "== self.HIDDEN) if np.any(unrevealed_zeros_around): zeros_coords = np.argwhere(unrevealed_zeros_around) for zero in", "= pygame.font.SysFont(\"monospace\", int(self.scale_factor * num_font_size)) self.font_offset = self.scale_factor * self.BLOCK_SIZE", "= 0 def get_observation(self): observation = copy(self.state[:, :, 1]) revealed", "= observation == 1 flaged = observation == 2 observation", "def clip_index(self, x, axis): max_idx = self.grid_shape[axis] x_min, x_max =", "1, (255, 10, 10)) self.window.blit(score_text, (0.1*self.header_size, 0.75*self.width)) self.window.blit(score, (0.1*self.header_size, 0.8*self.width))", "tuple(self.origin + self.scale_factor * self.BLOCK_SIZE * np.array((index[1], index[0]))) label =", "= self.score_font.render(str(round(self.score, 4)), 1, (255, 10, 10)) self.window.blit(score_text, (0.1*self.header_size, 0.75*self.width))", "import colorsys import pygame from pygame.transform import scale class MinesweeperEnv(Env):", "score_text = self.score_font.render(\"SCORE\", 1, (255, 10, 10)) score = self.score_font.render(str(round(self.score,", "img_key = 'flag' else: content = self.state[index][0] if content ==", "self.get_observation(), reward, done, {'passed':False} def reset(self): self.__init__(self.grid_shape, n_bombs=self.n_bombs, impact_size=self.impact_size, max_time=self.max_time,", "if content == self.BOMB: if state == self.HIDDEN: img_key =", "self.chicken = chicken self.done = False self.score = 0 def", "1, (255, 255, 10)) potential_bombs_left = self.n_bombs - self.flaged_bombs -", "(0.1*self.header_size, 0.5*self.width)) pygame.display.flip() pygame.time.wait(10) if self.done: pygame.time.wait(3000) @staticmethod def _get_color(n,", "x-self.semi_impact_size), min(max_idx, x + self.semi_impact_size + 1) dx_min, dx_max =", "== self.REVEAL: if case_state == self.HIDDEN: self.state[coords + (1,)] =", "if self.pygame_is_init: self.done = True reward, done = -1, True", "= int(time() - self.start_time) # Setup rendering self.pygame_is_init = False", "+= reward return self.get_observation(), reward, done, {'passed':True} elif action_type ==", "if not self.chicken else 'exploded_chicken' else: img_key = 'revealed' label", "np.argwhere(unrevealed_zeros_around) for zero in zeros_coords: coord = (x_min + zero[0],", "done, {'passed':False} if action_type == self.REVEAL: if case_state == self.HIDDEN:", "potential_bombs_left = self.n_bombs - self.flaged_bombs - self.flaged_empty potential_bombs_left = self.num_font.render(str(int(potential_bombs_left)),", "def scale_image(img, scale_factor=self.scale_factor): return scale(img, (int(scale_factor*img.get_width()), int(scale_factor*img.get_height()))) images_names = ['hidden',", "self.state[coords + (0,)]: unrevealed_zeros_around = np.logical_and(region[..., 0] == 0, region[...,", "is None else n_bombs self.n_bombs = min(self.grid_size - 1, self.n_bombs)", "2) if flags_around == self.state[coords + (0,)]: unrevealed_zeros_around = np.logical_and(region[...,", "== self.REVEAL: reward -= 0.001 else: flaging = 1 if", "_ = self.clip_index(coords[1], 1) region = self.state[x_min:x_max, y_min:y_max, :] unseen_around", "* self.grid_shape[1] + self.header_size) self.window = pygame.display.set_mode((self.height, self.width)) # Setup", "self.score = 0 def get_observation(self): observation = copy(self.state[:, :, 1])", "if unseen_around == 0: if not without_loss: reward -= 0.001", "= 0 self.flaged_empty = 0 self.max_time = max_time if impact_size", "'misplaced_flag'] if self.chicken: images_names += ['chicken', 'exploded_chicken', 'disabled_chicken'] else: images_names", "y_min:y_max, 1][self.state[x_min:x_max, y_min:y_max, 1] != self.FLAG] = 1 unflagged_bombs_around =", "self.score += reward return self.get_observation(), reward, done, {'passed':False} def reset(self):", "self.BLOCK_SIZE * np.array([0.325, 0.15]) self.decimal_font_offset = self.scale_factor * self.BLOCK_SIZE *", "32 self.header_size = self.scale_factor * 100 self.origin = np.array([self.header_size, 0])", "## Time left time_text = self.num_font.render(\"TIME\", 1, (255, 10, 10))", "n_bombs self.n_bombs = min(self.grid_size - 1, self.n_bombs) self.flaged_bombs = 0", "+ zero[0], y_min + zero[1]) self.state[coord + (1,)] = 1", "0.03*self.width)) self.window.blit(time_left, (0.1*self.header_size, 0.1*self.width)) ## Bombs left bombs_text = self.num_font.render(\"BOMBS\",", "potential_bombs_left = self.num_font.render(str(int(potential_bombs_left)), 1, (255, 255, 10)) self.window.blit(bombs_text, (0.1*self.header_size, 0.4*self.width))", "pylint: disable=E1101 pygame.quit() # pylint: disable=E1101 # Plot background pygame.draw.rect(self.window,", "self.flaged_empty potential_bombs_left = self.num_font.render(str(int(potential_bombs_left)), 1, (255, 255, 10)) self.window.blit(bombs_text, (0.1*self.header_size,", "done, without_loss=True) self.state[x_min:x_max, y_min:y_max, 1][self.state[x_min:x_max, y_min:y_max, 1] != self.FLAG] =", "True if np.any(np.logical_and(self.state[..., 0]==9, self.state[..., 1]==1)) or self.done: reward, done", "self.height, self.width)) # Plot grid for index, state in np.ndenumerate(self.state[...,", "255, 10)) self.window.blit(bombs_text, (0.1*self.header_size, 0.4*self.width)) self.window.blit(left_text, (0.1*self.header_size, 0.45*self.width)) self.window.blit(potential_bombs_left, (0.1*self.header_size,", "Setting up gym Env conventions nvec_observation = (self.BOMB + 2)", "= RED_HUE + (BLUE_HUE - RED_HUE) * ((max_n - n)", "self.BOMB)) self.window.blit(self.images[img_key], position) if label: self.window.blit(label, position + self.font_offset -", "state in np.ndenumerate(self.state[..., 1]): self._plot_block(index, state) # Plot infos ##", "= action_type if case_content == self.BOMB: if self.pygame_is_init: self.done =", "time() + self.start_time time_left = self.num_font.render(str(int(self.time_left+1)), 1, (255, 10, 10))", "= score, True return self.get_observation(), reward, done, {'passed':False} if action_type", "dx_min, dx_max = x_min - (x - self.semi_impact_size), x_max -", "as pkg_resources except ImportError: # Try backported to PY<37 `importlib_resources`.", "from time import time import numpy as np from copy", "gym Env conventions nvec_observation = (self.BOMB + 2) * np.ones(self.grid_shape)", "idx[0][bombs_ids], idx[1][bombs_ids] ## Place numbers self.semi_impact_size = (self.impact_size-1)//2 bomb_impact =", "reward, done, without_loss=False): if not done: x_min, x_max, _, _", "# Define constants self.HIDDEN = 0 self.REVEAL = 1 self.FLAG", "- (x + self.semi_impact_size + 1) + self.impact_size return x_min,", "if event.type == pygame.QUIT: # pylint: disable=E1101 pygame.quit() # pylint:", "revealed = observation == 1 flaged = observation == 2", "return self.get_observation(), reward, done, {'passed':False} elif case_content == NO_BOMBS_AROUND: self.reveal_around(coords,", ":, 0][revealed]) observation[flaged] -= 1 return observation def reveal_around(self, coords,", "without_loss: reward -= 0.001 return flags_around = np.sum(region[..., 1] ==", "= 'misplaced_flag' else: content = self.state[index][0] if content == self.BOMB:", "+ (0,)]: unrevealed_zeros_around = np.logical_and(region[..., 0] == 0, region[..., 1]", "15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False): self.grid_shape = grid_shape self.grid_size", "* self.grid_size)) if n_bombs is None else n_bombs self.n_bombs =", "1, (255, 10, 10)) score = self.score_font.render(str(round(self.score, 4)), 1, (255,", "pygame.font.SysFont(\"monospace\", int(self.scale_factor * num_font_size)) self.font_offset = self.scale_factor * self.BLOCK_SIZE *", "self.done = False self.score = 0 def get_observation(self): observation =", "max_time=self.max_time, chicken=self.chicken) return self.get_observation() def render(self): if not self.pygame_is_init: self._init_pygame()", "as pkg_resources from . import images from gym import Env,", "'mine' if not self.chicken else 'chicken' else: img_key = 'exploded_mine'", "self.BLOCK_SIZE * np.array((index[1], index[0]))) label = None if state ==", "y_min + zero[1]) self.state[coord + (1,)] = 1 self.reveal_around(coord, reward,", "+ self.flaged_empty)/self.n_bombs reward, done = score, True return self.get_observation(), reward,", "2 * min(12 / self.grid_shape[0], 25 / self.grid_shape[1]) self.BLOCK_SIZE =", "True self.score += reward return self.get_observation(), reward, done, {'passed':False} def", "import images from gym import Env, spaces from time import", "0: raise ValueError('Impact_size must be an odd number !') self.impact_size", "if self.flaged_bombs == self.n_bombs and self.flaged_empty == 0: reward, done", "== 0, region[..., 1] == self.HIDDEN) if np.any(unrevealed_zeros_around): zeros_coords =", "time_text = self.num_font.render(\"TIME\", 1, (255, 10, 10)) self.time_left = self.max_time", "as np from copy import copy import colorsys import pygame", "= 2 + self.time_left/self.max_time, True if np.any(np.logical_and(self.state[..., 0]==9, self.state[..., 1]==1))", "self.time_left = self.max_time - time() + self.start_time time_left = self.num_font.render(str(int(self.time_left+1)),", "if not self.done: img_key = 'flag' else: content = self.state[index][0]", "bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False): self.grid_shape = grid_shape self.grid_size =", "= 'disabled_mine' if not self.chicken else 'disabled_chicken' else: img_key =", "to PY<37 `importlib_resources`. import importlib_resources as pkg_resources from . import", "min(self.grid_size - 1, self.n_bombs) self.flaged_bombs = 0 self.flaged_empty = 0", "if flags_around == self.state[coords + (0,)]: unrevealed_zeros_around = np.logical_and(region[..., 0]", "done = -1, True else: if not without_loss: reward -=", "# 0 -> 1 = reveal; 1 -> 2 =", "MinesweeperEnv(Env): def __init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False):", "self.state[..., 1]==1)) or self.done: reward, done = -1 + self.time_left/self.max_time", "scale_factor=self.scale_factor): return scale(img, (int(scale_factor*img.get_width()), int(scale_factor*img.get_height()))) images_names = ['hidden', 'revealed', 'flag',", ":, 1]) revealed = observation == 1 flaged = observation", "'revealed', 'flag', 'misplaced_flag'] if self.chicken: images_names += ['chicken', 'exploded_chicken', 'disabled_chicken']", "self.font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.325, 0.15]) self.decimal_font_offset =", "1) + self.impact_size return x_min, x_max, dx_min, dx_max def step(self,", "dtype=np.uint8) for bombs_id in bombs_ids: bomb_x, bomb_y = idx[0][bombs_id], idx[1][bombs_id]", "dx_max = self.clip_index(bomb_x, 0) y_min, y_max, dy_min, dy_max = self.clip_index(bomb_y,", "= None if state == self.HIDDEN and not self.done: img_key", "self.impact_size = impact_size # Define constants self.HIDDEN = 0 self.REVEAL", "= chicken self.done = False self.score = 0 def get_observation(self):", "True for event in pygame.event.get(): if event.type == pygame.QUIT: #", "return self.get_observation(), reward, done, {'passed':False} def reset(self): self.__init__(self.grid_shape, n_bombs=self.n_bombs, impact_size=self.impact_size,", "- n) / max_n)**3 color = 255 * np.array(colorsys.hsv_to_rgb(HUE, 1,", "= idx[0][bombs_ids], idx[1][bombs_ids] ## Place numbers self.semi_impact_size = (self.impact_size-1)//2 bomb_impact", "y_min:y_max, :] unseen_around = np.sum(region[..., 1] == 0) if unseen_around", "np.logical_and(region[..., 0] == self.BOMB, region[..., 1] != self.FLAG) if np.any(unflagged_bombs_around):", "reward, done, {'passed':True} elif action_type == self.FLAG: if case_state ==", "{'passed':True} elif action_type == self.FLAG: if case_state == self.REVEAL: reward", "0: if not without_loss: reward -= 0.001 return flags_around =", "+= flaging else: self.flaged_empty += flaging if self.flaged_bombs == self.n_bombs", "- self.flaged_empty)/self.n_bombs, True self.score += reward return self.get_observation(), reward, done,", "img_key = 'exploded_mine' if not self.chicken else 'exploded_chicken' else: img_key", "= action[:2] action_type = action[2] + 1 # 0 ->", "'exploded_chicken', 'disabled_chicken'] else: images_names += ['mine', 'exploded_mine', 'disabled_mine'] self.images =", "= self.state[x_min:x_max, y_min:y_max, 0] bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max] ## Place", "0.01 else: reward -= 0.001 self.score += reward return self.get_observation(),", "self.BLOCK_SIZE = 32 self.header_size = self.scale_factor * 100 self.origin =", "np.any(unflagged_bombs_around): self.done = True reward, done = -1, True else:", "chicken=self.chicken) return self.get_observation() def render(self): if not self.pygame_is_init: self._init_pygame() self.pygame_is_init", "= 20 self.num_font = pygame.font.SysFont(\"monospace\", int(self.scale_factor * num_font_size)) self.font_offset =", "importlib.resources as pkg_resources except ImportError: # Try backported to PY<37", "self.clip_index(coords[1], 1) region = self.state[x_min:x_max, y_min:y_max, :] unseen_around = np.sum(region[...,", "number !') self.impact_size = impact_size # Define constants self.HIDDEN =", "Env conventions nvec_observation = (self.BOMB + 2) * np.ones(self.grid_shape) self.observation_space", "= -1, True else: if not without_loss: reward -= 0.001", "self.width)) # Plot grid for index, state in np.ndenumerate(self.state[..., 1]):", "self.FLAG: if case_state == self.REVEAL: reward -= 0.001 else: flaging", "if case_state == self.REVEAL: reward -= 0.001 else: flaging =", "self.HIDDEN = 0 self.REVEAL = 1 self.FLAG = 2 self.BOMB", "(0.1*self.header_size, 0.75*self.width)) self.window.blit(score, (0.1*self.header_size, 0.8*self.width)) ## Time left time_text =", "case_content == NO_BOMBS_AROUND: self.reveal_around(coords, reward, done) elif case_state == self.REVEAL:", "Setup rendering self.pygame_is_init = False self.chicken = chicken self.done =", "case_content == self.BOMB: if self.pygame_is_init: self.done = True reward, done", "self.flaged_empty += flaging if self.flaged_bombs == self.n_bombs and self.flaged_empty ==", "position = tuple(self.origin + self.scale_factor * self.BLOCK_SIZE * np.array((index[1], index[0])))", "* min(12 / self.grid_shape[0], 25 / self.grid_shape[1]) self.BLOCK_SIZE = 32", "== 0) if unseen_around == 0: if not without_loss: reward", "reward, done, {'passed':False} if action_type == self.REVEAL: if case_state ==", "-= 0.001 else: flaging = 1 if case_state == self.FLAG:", "self.max_time - time() + self.start_time time_left = self.num_font.render(str(int(self.time_left+1)), 1, (255,", "== self.HIDDEN and not self.done: img_key = 'hidden' elif state", "return x_min, x_max, dx_min, dx_max def step(self, action): coords =", "index, state): position = tuple(self.origin + self.scale_factor * self.BLOCK_SIZE *", "done = -1, True return self.get_observation(), reward, done, {'passed':False} elif", "reward, done) reward -= 0.01 else: reward -= 0.001 self.score", "10)) left_text = self.num_font.render(\"LEFT\", 1, (255, 255, 10)) potential_bombs_left =", "unseen_around = np.sum(region[..., 1] == 0) if unseen_around == 0:", "self.scale_factor * self.BLOCK_SIZE * np.array([0.225, 0]) self.score_font = pygame.font.SysFont(\"monospace\", int(self.scale_factor", "reward -= 0.001 return flags_around = np.sum(region[..., 1] == 2)", "else: self.flaged_empty += flaging if self.flaged_bombs == self.n_bombs and self.flaged_empty", "+= flaging if self.flaged_bombs == self.n_bombs and self.flaged_empty == 0:", "self.grid_shape[0]) self.height = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[1] + self.header_size)", "= 1 self.reveal_around(coord, reward, done, without_loss=True) self.state[x_min:x_max, y_min:y_max, 1][self.state[x_min:x_max, y_min:y_max,", "self.num_font.render(str(content), 1, self._get_color(content, self.BOMB)) self.window.blit(self.images[img_key], position) if label: self.window.blit(label, position", "flaging else: self.flaged_empty += flaging if self.flaged_bombs == self.n_bombs and", "not without_loss: reward -= 0.001 return flags_around = np.sum(region[..., 1]", "+ self.start_time if self.time_left <= 0: score = -(self.n_bombs -", "= self.num_font.render(str(int(self.time_left+1)), 1, (255, 10, 10)) self.window.blit(time_text, (0.1*self.header_size, 0.03*self.width)) self.window.blit(time_left,", "copy(self.state[:, :, 1]) revealed = observation == 1 flaged =", "dy_min, dy_max = self.clip_index(bomb_y, 1) bomb_region = self.state[x_min:x_max, y_min:y_max, 0]", "self.state[coords + (1,)] = action_type if case_content == self.BOMB: if", "background pygame.draw.rect(self.window, (60, 56, 53), (0, 0, self.height, self.width)) #", "not self.chicken else 'chicken' else: img_key = 'exploded_mine' if not", "observation == 1 flaged = observation == 2 observation +=", "(1,)] = action_type if case_content == self.BOMB: if self.pygame_is_init: self.done", "index, state in np.ndenumerate(self.state[..., 1]): self._plot_block(index, state) # Plot infos", "1 unflagged_bombs_around = np.logical_and(region[..., 0] == self.BOMB, region[..., 1] !=", "def __init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False): self.grid_shape", "pygame.draw.rect(self.window, (60, 56, 53), (0, 0, self.height, self.width)) # Plot", "** 2 # Setting up gym Env conventions nvec_observation =", "(255, 10, 10)) self.time_left = self.max_time - time() + self.start_time", "max_time=999, chicken=False): self.grid_shape = grid_shape self.grid_size = np.prod(grid_shape) self.n_bombs =", "self.num_font.render(\"BOMBS\", 1, (255, 255, 10)) left_text = self.num_font.render(\"LEFT\", 1, (255,", "HUE = RED_HUE + (BLUE_HUE - RED_HUE) * ((max_n -", "self.BOMB = self.impact_size ** 2 # Setting up gym Env", "case_content = self.state[coords + (0,)] NO_BOMBS_AROUND = 0 reward, done", "1, (255, 255, 10)) left_text = self.num_font.render(\"LEFT\", 1, (255, 255,", "pygame.quit() # pylint: disable=E1101 # Plot background pygame.draw.rect(self.window, (60, 56,", "{} for img_name in images_names: with pkg_resources.path(images, img_name + '.png')", "self.BOMB: if self.pygame_is_init: self.done = True reward, done = -1,", "False self.chicken = chicken self.done = False self.score = 0", "state == self.HIDDEN: img_key = 'mine' if not self.chicken else", "reward return self.get_observation(), reward, done, {'passed':False} def reset(self): self.__init__(self.grid_shape, n_bombs=self.n_bombs,", "action_type == self.REVEAL: if case_state == self.HIDDEN: self.state[coords + (1,)]", "self.n_bombs and self.flaged_empty == 0: reward, done = 2 +", "None else n_bombs self.n_bombs = min(self.grid_size - 1, self.n_bombs) self.flaged_bombs", "x_min - (x - self.semi_impact_size), x_max - (x + self.semi_impact_size", "Pygame window self.scale_factor = 2 * min(12 / self.grid_shape[0], 25", "self.scale_factor = 2 * min(12 / self.grid_shape[0], 25 / self.grid_shape[1])", "== 2) if flags_around == self.state[coords + (0,)]: unrevealed_zeros_around =", "max(1, int(bombs_density * self.grid_size)) if n_bombs is None else n_bombs", "(self.flaged_bombs - self.flaged_empty)/self.n_bombs, True self.score += reward return self.get_observation(), reward,", "disable=E1101 # Plot background pygame.draw.rect(self.window, (60, 56, 53), (0, 0,", "# Plot background pygame.draw.rect(self.window, (60, 56, 53), (0, 0, self.height,", "1 observation[revealed] = copy(self.state[:, :, 0][revealed]) observation[flaged] -= 1 return", "np.any(np.logical_and(self.state[..., 0]==9, self.state[..., 1]==1)) or self.done: reward, done = -1", "RED_HUE + (BLUE_HUE - RED_HUE) * ((max_n - n) /", "else 'exploded_chicken' else: img_key = 'revealed' label = self.num_font.render(str(content), 1,", "== 0: raise ValueError('Impact_size must be an odd number !')", "self.grid_shape[axis] x_min, x_max = max(0, x-self.semi_impact_size), min(max_idx, x + self.semi_impact_size", "= reveal; 1 -> 2 = toggle_flag case_state = self.state[coords", "self.window = pygame.display.set_mode((self.height, self.width)) # Setup font for numbers num_font_size", "== self.BOMB: if self.pygame_is_init: self.done = True reward, done =", "-= 0.001 return flags_around = np.sum(region[..., 1] == 2) if", "= action[2] + 1 # 0 -> 1 = reveal;", "np.logical_and(region[..., 0] == 0, region[..., 1] == self.HIDDEN) if np.any(unrevealed_zeros_around):", "img_key = 'revealed' label = self.num_font.render(str(content), 1, self._get_color(content, self.BOMB)) self.window.blit(self.images[img_key],", "region[..., 1] == self.HIDDEN) if np.any(unrevealed_zeros_around): zeros_coords = np.argwhere(unrevealed_zeros_around) for", "= pygame.font.SysFont(\"monospace\", int(self.scale_factor * 12)) # Load images def scale_image(img,", "images_names = ['hidden', 'revealed', 'flag', 'misplaced_flag'] if self.chicken: images_names +=", "<= 0: score = -(self.n_bombs - self.flaged_bombs + self.flaged_empty)/self.n_bombs reward,", "self.chicken else 'exploded_chicken' else: img_key = 'revealed' label = self.num_font.render(str(content),", "action_type if case_content == self.BOMB: if self.pygame_is_init: self.done = True", "'misplaced_flag' else: content = self.state[index][0] if content == self.BOMB: if", "= int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[0]) self.height = int(self.scale_factor *", "else 'chicken' else: img_key = 'exploded_mine' if not self.chicken else", "self.reveal_around(coords, reward, done) elif case_state == self.REVEAL: self.reveal_around(coords, reward, done)", "pygame.time.wait(3000) @staticmethod def _get_color(n, max_n): BLUE_HUE = 0.6 RED_HUE =", "if self.chicken: images_names += ['chicken', 'exploded_chicken', 'disabled_chicken'] else: images_names +=", "= self.state[coords + (0,)] NO_BOMBS_AROUND = 0 reward, done =", "without_loss: reward -= 0.001 def clip_index(self, x, axis): max_idx =", "state): position = tuple(self.origin + self.scale_factor * self.BLOCK_SIZE * np.array((index[1],", "20 self.num_font = pygame.font.SysFont(\"monospace\", int(self.scale_factor * num_font_size)) self.font_offset = self.scale_factor", "x, axis): max_idx = self.grid_shape[axis] x_min, x_max = max(0, x-self.semi_impact_size),", "reveal_around(self, coords, reward, done, without_loss=False): if not done: x_min, x_max,", "pkg_resources from . import images from gym import Env, spaces", "else: img_key = 'exploded_mine' if not self.chicken else 'exploded_chicken' else:", "self.impact_size), dtype=np.uint8) for bombs_id in bombs_ids: bomb_x, bomb_y = idx[0][bombs_id],", "flags_around = np.sum(region[..., 1] == 2) if flags_around == self.state[coords", "in images_names: with pkg_resources.path(images, img_name + '.png') as path: img", "= impact_size # Define constants self.HIDDEN = 0 self.REVEAL =", "# Setting up gym Env conventions nvec_observation = (self.BOMB +", "importlib_resources as pkg_resources from . import images from gym import", "- self.flaged_bombs - self.flaged_empty potential_bombs_left = self.num_font.render(str(int(potential_bombs_left)), 1, (255, 255,", "self.done: img_key = 'hidden' elif state == self.FLAG: if not", "(255, 255, 10)) left_text = self.num_font.render(\"LEFT\", 1, (255, 255, 10))", "in zeros_coords: coord = (x_min + zero[0], y_min + zero[1])", "Plot grid for index, state in np.ndenumerate(self.state[..., 1]): self._plot_block(index, state)", "done = -1 + self.time_left/self.max_time + (self.flaged_bombs - self.flaged_empty)/self.n_bombs, True", "== 0: if not without_loss: reward -= 0.001 return flags_around", "= self.grid_shape[axis] x_min, x_max = max(0, x-self.semi_impact_size), min(max_idx, x +", "grid for index, state in np.ndenumerate(self.state[..., 1]): self._plot_block(index, state) #", "255, 10)) potential_bombs_left = self.n_bombs - self.flaged_bombs - self.flaged_empty potential_bombs_left", "pylint: disable=E1101 # Plot background pygame.draw.rect(self.window, (60, 56, 53), (0,", "spaces.MultiDiscrete(nvec_action) # Initalize state self.state = np.zeros(self.grid_shape + (2,), dtype=np.uint8)", "* np.ones(self.grid_shape) self.observation_space = spaces.MultiDiscrete(nvec_observation) nvec_action = np.array(self.grid_shape + (2,))", "time import time import numpy as np from copy import", "-1 self.state[coords + (1,)] = self.HIDDEN else: self.state[coords + (1,)]", "np.array([0.225, 0]) self.score_font = pygame.font.SysFont(\"monospace\", int(self.scale_factor * 12)) # Load", "0.001 else: flaging = 1 if case_state == self.FLAG: flaging", "import copy import colorsys import pygame from pygame.transform import scale", "import importlib_resources as pkg_resources from . import images from gym", "bombs_ids: bomb_x, bomb_y = idx[0][bombs_id], idx[1][bombs_id] x_min, x_max, dx_min, dx_max", "images_names += ['chicken', 'exploded_chicken', 'disabled_chicken'] else: images_names += ['mine', 'exploded_mine',", "self.pygame_is_init: self.done = True reward, done = -1, True return", "bomb_impact[dx_min:dx_max, dy_min:dy_max] ## Place bombs self.state[self.bombs_positions + (0,)] = self.BOMB", "def get_observation(self): observation = copy(self.state[:, :, 1]) revealed = observation", "x_max, _, _ = self.clip_index(coords[0], 0) y_min, y_max, _, _", "0, region[..., 1] == self.HIDDEN) if np.any(unrevealed_zeros_around): zeros_coords = np.argwhere(unrevealed_zeros_around)", "import numpy as np from copy import copy import colorsys", "+ (BLUE_HUE - RED_HUE) * ((max_n - n) / max_n)**3", "self.FLAG: if not self.done: img_key = 'flag' else: content =", "0 self.max_time = max_time if impact_size % 2 == 0:", "self.FLAG if case_content == self.BOMB: self.flaged_bombs += flaging else: self.flaged_empty", "## Score score_text = self.score_font.render(\"SCORE\", 1, (255, 10, 10)) score", "self.start_time) # Setup rendering self.pygame_is_init = False self.chicken = chicken", "## Place bombs self.state[self.bombs_positions + (0,)] = self.BOMB self.start_time =", "infos ## Score score_text = self.score_font.render(\"SCORE\", 1, (255, 10, 10))", "bomb_region = self.state[x_min:x_max, y_min:y_max, 0] bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max] ##", "+ self.header_size) self.window = pygame.display.set_mode((self.height, self.width)) # Setup font for", "self.get_observation(), reward, done, {'passed':False} if action_type == self.REVEAL: if case_state", "0.0 HUE = RED_HUE + (BLUE_HUE - RED_HUE) * ((max_n", "zero[0], y_min + zero[1]) self.state[coord + (1,)] = 1 self.reveal_around(coord,", "self.window.blit(self.images[img_key], position) if label: self.window.blit(label, position + self.font_offset - (content", "pkg_resources.path(images, img_name + '.png') as path: img = pygame.image.load(str(path)).convert() self.images[img_name]", "= self.state[x_min:x_max, y_min:y_max, :] unseen_around = np.sum(region[..., 1] == 0)", "NO_BOMBS_AROUND: self.reveal_around(coords, reward, done) elif case_state == self.REVEAL: self.reveal_around(coords, reward,", "if content == self.BOMB: img_key = 'disabled_mine' if not self.chicken", "self.grid_shape[0], 25 / self.grid_shape[1]) self.BLOCK_SIZE = 32 self.header_size = self.scale_factor", "case_state == self.FLAG: flaging = -1 self.state[coords + (1,)] =", "= self.BOMB self.start_time = time() self.time_left = int(time() - self.start_time)", "np.indices(self.grid_shape).reshape(2, -1) bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False) self.bombs_positions = idx[0][bombs_ids],", "case_content == self.BOMB: self.flaged_bombs += flaging else: self.flaged_empty += flaging", "0.7)) return color def _plot_block(self, index, state): position = tuple(self.origin", "0]==9, self.state[..., 1]==1)) or self.done: reward, done = -1 +", "import scale class MinesweeperEnv(Env): def __init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None,", "zeros_coords: coord = (x_min + zero[0], y_min + zero[1]) self.state[coord", "self.state = np.zeros(self.grid_shape + (2,), dtype=np.uint8) ## Setup bombs places", "if not without_loss: reward -= 0.001 return flags_around = np.sum(region[...,", "left time_text = self.num_font.render(\"TIME\", 1, (255, 10, 10)) self.time_left =", "bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False) self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids] ##", "def render(self): if not self.pygame_is_init: self._init_pygame() self.pygame_is_init = True for", "score = self.score_font.render(str(round(self.score, 4)), 1, (255, 10, 10)) self.window.blit(score_text, (0.1*self.header_size,", "self.time_left = self.max_time - time() + self.start_time if self.time_left <=", "self.BLOCK_SIZE * self.grid_shape[0]) self.height = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[1]", "@staticmethod def _get_color(n, max_n): BLUE_HUE = 0.6 RED_HUE = 0.0", "class MinesweeperEnv(Env): def __init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999,", "= idx[0][bombs_id], idx[1][bombs_id] x_min, x_max, dx_min, dx_max = self.clip_index(bomb_x, 0)", "== 2 observation += self.impact_size ** 2 + 1 observation[revealed]", "impact_size % 2 == 0: raise ValueError('Impact_size must be an", "n_bombs=None, impact_size=3, max_time=999, chicken=False): self.grid_shape = grid_shape self.grid_size = np.prod(grid_shape)", "get_observation(self): observation = copy(self.state[:, :, 1]) revealed = observation ==", "+ (2,)) self.action_space = spaces.MultiDiscrete(nvec_action) # Initalize state self.state =", "return self.get_observation(), reward, done, {'passed':True} elif action_type == self.FLAG: if", "time_left = self.num_font.render(str(int(self.time_left+1)), 1, (255, 10, 10)) self.window.blit(time_text, (0.1*self.header_size, 0.03*self.width))", "self.width = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[0]) self.height = int(self.scale_factor", "= self.max_time - time() + self.start_time time_left = self.num_font.render(str(int(self.time_left+1)), 1,", "pygame.QUIT: # pylint: disable=E1101 pygame.quit() # pylint: disable=E1101 # Plot", "_, _ = self.clip_index(coords[1], 1) region = self.state[x_min:x_max, y_min:y_max, :]", "= 'flag' else: content = self.state[index][0] if content == self.BOMB:", "1 flaged = observation == 2 observation += self.impact_size **", "Setup bombs places idx = np.indices(self.grid_shape).reshape(2, -1) bombs_ids = np.random.choice(range(self.grid_size),", "self.state[coords + (1,)] case_content = self.state[coords + (0,)] NO_BOMBS_AROUND =", "return observation def reveal_around(self, coords, reward, done, without_loss=False): if not", "0) if unseen_around == 0: if not without_loss: reward -=", "0]) self.width = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[0]) self.height =", "== self.FLAG: if not self.done: img_key = 'flag' else: content", "self.state[coords + (0,)] NO_BOMBS_AROUND = 0 reward, done = 0,", "= True for event in pygame.event.get(): if event.type == pygame.QUIT:", "= self.num_font.render(str(content), 1, self._get_color(content, self.BOMB)) self.window.blit(self.images[img_key], position) if label: self.window.blit(label,", "min(max_idx, x + self.semi_impact_size + 1) dx_min, dx_max = x_min", "+ 1) dx_min, dx_max = x_min - (x - self.semi_impact_size),", "-= 1 return observation def reveal_around(self, coords, reward, done, without_loss=False):", "copy import colorsys import pygame from pygame.transform import scale class", "(int(scale_factor*img.get_width()), int(scale_factor*img.get_height()))) images_names = ['hidden', 'revealed', 'flag', 'misplaced_flag'] if self.chicken:", "event.type == pygame.QUIT: # pylint: disable=E1101 pygame.quit() # pylint: disable=E1101", "# pylint: disable=E1101 # Plot background pygame.draw.rect(self.window, (60, 56, 53),", "return self.get_observation() def render(self): if not self.pygame_is_init: self._init_pygame() self.pygame_is_init =", "self.done: img_key = 'flag' else: content = self.state[index][0] if content", "max_time if impact_size % 2 == 0: raise ValueError('Impact_size must", "+ 2) * np.ones(self.grid_shape) self.observation_space = spaces.MultiDiscrete(nvec_observation) nvec_action = np.array(self.grid_shape", "self.pygame_is_init: self._init_pygame() self.pygame_is_init = True for event in pygame.event.get(): if", "content == self.BOMB: img_key = 'disabled_mine' if not self.chicken else", "= -(self.n_bombs - self.flaged_bombs + self.flaged_empty)/self.n_bombs reward, done = score,", "self.n_bombs = min(self.grid_size - 1, self.n_bombs) self.flaged_bombs = 0 self.flaged_empty", "= 32 self.header_size = self.scale_factor * 100 self.origin = np.array([self.header_size,", "scale(img, (int(scale_factor*img.get_width()), int(scale_factor*img.get_height()))) images_names = ['hidden', 'revealed', 'flag', 'misplaced_flag'] if", "1, (255, 10, 10)) self.time_left = self.max_time - time() +", "/ self.grid_shape[0], 25 / self.grid_shape[1]) self.BLOCK_SIZE = 32 self.header_size =", "y_min, y_max, dy_min, dy_max = self.clip_index(bomb_y, 1) bomb_region = self.state[x_min:x_max,", "self.time_left/self.max_time, True if np.any(np.logical_and(self.state[..., 0]==9, self.state[..., 1]==1)) or self.done: reward,", "!= self.FLAG] = 1 unflagged_bombs_around = np.logical_and(region[..., 0] == self.BOMB,", "if not self.pygame_is_init: self._init_pygame() self.pygame_is_init = True for event in", "from copy import copy import colorsys import pygame from pygame.transform", "dtype=np.uint8) ## Setup bombs places idx = np.indices(self.grid_shape).reshape(2, -1) bombs_ids", "y_max, dy_min, dy_max = self.clip_index(bomb_y, 1) bomb_region = self.state[x_min:x_max, y_min:y_max,", "gym import Env, spaces from time import time import numpy", "Place numbers self.semi_impact_size = (self.impact_size-1)//2 bomb_impact = np.ones((self.impact_size, self.impact_size), dtype=np.uint8)", "self.window.blit(left_text, (0.1*self.header_size, 0.45*self.width)) self.window.blit(potential_bombs_left, (0.1*self.header_size, 0.5*self.width)) pygame.display.flip() pygame.time.wait(10) if self.done:", "self.semi_impact_size + 1) dx_min, dx_max = x_min - (x -", ". import images from gym import Env, spaces from time", "self.n_bombs) self.flaged_bombs = 0 self.flaged_empty = 0 self.max_time = max_time", "{'passed':False} elif case_content == NO_BOMBS_AROUND: self.reveal_around(coords, reward, done) elif case_state", "((max_n - n) / max_n)**3 color = 255 * np.array(colorsys.hsv_to_rgb(HUE,", "1] != self.FLAG] = 1 unflagged_bombs_around = np.logical_and(region[..., 0] ==", "x_min, x_max, dx_min, dx_max = self.clip_index(bomb_x, 0) y_min, y_max, dy_min,", "reward return self.get_observation(), reward, done, {'passed':True} elif action_type == self.FLAG:", "1, (255, 255, 10)) self.window.blit(bombs_text, (0.1*self.header_size, 0.4*self.width)) self.window.blit(left_text, (0.1*self.header_size, 0.45*self.width))", "'disabled_mine'] self.images = {} for img_name in images_names: with pkg_resources.path(images,", "for event in pygame.event.get(): if event.type == pygame.QUIT: # pylint:", "if self.done: pygame.time.wait(3000) @staticmethod def _get_color(n, max_n): BLUE_HUE = 0.6", "= np.argwhere(unrevealed_zeros_around) for zero in zeros_coords: coord = (x_min +", "if not self.chicken else 'disabled_chicken' else: img_key = 'misplaced_flag' else:", "['chicken', 'exploded_chicken', 'disabled_chicken'] else: images_names += ['mine', 'exploded_mine', 'disabled_mine'] self.images", "# Setup font for numbers num_font_size = 20 self.num_font =", "0 -> 1 = reveal; 1 -> 2 = toggle_flag", "(0,)] = self.BOMB self.start_time = time() self.time_left = int(time() -", "- self.start_time) # Setup rendering self.pygame_is_init = False self.chicken =", ":] unseen_around = np.sum(region[..., 1] == 0) if unseen_around ==", "def reset(self): self.__init__(self.grid_shape, n_bombs=self.n_bombs, impact_size=self.impact_size, max_time=self.max_time, chicken=self.chicken) return self.get_observation() def", "self.flaged_empty == 0: reward, done = 2 + self.time_left/self.max_time, True", "bombs_text = self.num_font.render(\"BOMBS\", 1, (255, 255, 10)) left_text = self.num_font.render(\"LEFT\",", "color = 255 * np.array(colorsys.hsv_to_rgb(HUE, 1, 0.7)) return color def", "# pylint: disable=E1101 # Open Pygame window self.scale_factor = 2", "self.FLAG = 2 self.BOMB = self.impact_size ** 2 # Setting", "else: self.state[coords + (1,)] = self.FLAG if case_content == self.BOMB:", "255, 10)) left_text = self.num_font.render(\"LEFT\", 1, (255, 255, 10)) potential_bombs_left", "- self.flaged_bombs + self.flaged_empty)/self.n_bombs reward, done = score, True return", "1] == 2) if flags_around == self.state[coords + (0,)]: unrevealed_zeros_around", "observation[revealed] = copy(self.state[:, :, 0][revealed]) observation[flaged] -= 1 return observation", "self.score_font = pygame.font.SysFont(\"monospace\", int(self.scale_factor * 12)) # Load images def", "2 self.BOMB = self.impact_size ** 2 # Setting up gym", "import importlib.resources as pkg_resources except ImportError: # Try backported to", "y_max, _, _ = self.clip_index(coords[1], 1) region = self.state[x_min:x_max, y_min:y_max,", "Open Pygame window self.scale_factor = 2 * min(12 / self.grid_shape[0],", "_init_pygame(self): pygame.init() # pylint: disable=E1101 # Open Pygame window self.scale_factor", "self.decimal_font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.225, 0]) self.score_font =", "self.score_font.render(str(round(self.score, 4)), 1, (255, 10, 10)) self.window.blit(score_text, (0.1*self.header_size, 0.75*self.width)) self.window.blit(score,", "grid_shape self.grid_size = np.prod(grid_shape) self.n_bombs = max(1, int(bombs_density * self.grid_size))", "left bombs_text = self.num_font.render(\"BOMBS\", 1, (255, 255, 10)) left_text =", "self.header_size = self.scale_factor * 100 self.origin = np.array([self.header_size, 0]) self.width", "= copy(self.state[:, :, 1]) revealed = observation == 1 flaged", "self.flaged_bombs + self.flaged_empty)/self.n_bombs reward, done = score, True return self.get_observation(),", "self.done: reward, done = -1 + self.time_left/self.max_time + (self.flaged_bombs -", "= self.impact_size ** 2 # Setting up gym Env conventions", "self.start_time if self.time_left <= 0: score = -(self.n_bombs - self.flaged_bombs", "self.REVEAL: reward -= 0.001 else: flaging = 1 if case_state", "['mine', 'exploded_mine', 'disabled_mine'] self.images = {} for img_name in images_names:", "= True reward, done = -1, True return self.get_observation(), reward,", "scale class MinesweeperEnv(Env): def __init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3,", "0 self.flaged_empty = 0 self.max_time = max_time if impact_size %", "= 0.6 RED_HUE = 0.0 HUE = RED_HUE + (BLUE_HUE", "0: score = -(self.n_bombs - self.flaged_bombs + self.flaged_empty)/self.n_bombs reward, done", "with pkg_resources.path(images, img_name + '.png') as path: img = pygame.image.load(str(path)).convert()", "self.grid_shape[1]) self.BLOCK_SIZE = 32 self.header_size = self.scale_factor * 100 self.origin", "1, self.n_bombs) self.flaged_bombs = 0 self.flaged_empty = 0 self.max_time =", "try: import importlib.resources as pkg_resources except ImportError: # Try backported", "if impact_size % 2 == 0: raise ValueError('Impact_size must be", "= np.sum(region[..., 1] == 2) if flags_around == self.state[coords +", "if np.any(unrevealed_zeros_around): zeros_coords = np.argwhere(unrevealed_zeros_around) for zero in zeros_coords: coord", "self.REVEAL: if case_state == self.HIDDEN: self.state[coords + (1,)] = action_type", "odd number !') self.impact_size = impact_size # Define constants self.HIDDEN", "self.header_size) self.window = pygame.display.set_mode((self.height, self.width)) # Setup font for numbers", "== self.REVEAL: self.reveal_around(coords, reward, done) reward -= 0.01 else: reward", "bomb_y = idx[0][bombs_id], idx[1][bombs_id] x_min, x_max, dx_min, dx_max = self.clip_index(bomb_x,", "0.4*self.width)) self.window.blit(left_text, (0.1*self.header_size, 0.45*self.width)) self.window.blit(potential_bombs_left, (0.1*self.header_size, 0.5*self.width)) pygame.display.flip() pygame.time.wait(10) if", "bombs places idx = np.indices(self.grid_shape).reshape(2, -1) bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs,", "True reward, done = -1, True return self.get_observation(), reward, done,", "elif action_type == self.FLAG: if case_state == self.REVEAL: reward -=", "idx[1][bombs_id] x_min, x_max, dx_min, dx_max = self.clip_index(bomb_x, 0) y_min, y_max,", "1] != self.FLAG) if np.any(unflagged_bombs_around): self.done = True reward, done", "pylint: disable=E1101 # Open Pygame window self.scale_factor = 2 *", "-= 0.001 def clip_index(self, x, axis): max_idx = self.grid_shape[axis] x_min,", "+= self.impact_size ** 2 + 1 observation[revealed] = copy(self.state[:, :,", "except ImportError: # Try backported to PY<37 `importlib_resources`. import importlib_resources", "self.state[x_min:x_max, y_min:y_max, :] unseen_around = np.sum(region[..., 1] == 0) if", "reset(self): self.__init__(self.grid_shape, n_bombs=self.n_bombs, impact_size=self.impact_size, max_time=self.max_time, chicken=self.chicken) return self.get_observation() def render(self):", "idx = np.indices(self.grid_shape).reshape(2, -1) bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False) self.bombs_positions", "img_key = 'misplaced_flag' else: content = self.state[index][0] if content ==", "up gym Env conventions nvec_observation = (self.BOMB + 2) *", "self.num_font = pygame.font.SysFont(\"monospace\", int(self.scale_factor * num_font_size)) self.font_offset = self.scale_factor *", "None if state == self.HIDDEN and not self.done: img_key =", "1]) revealed = observation == 1 flaged = observation ==", "state self.state = np.zeros(self.grid_shape + (2,), dtype=np.uint8) ## Setup bombs", "pygame.event.get(): if event.type == pygame.QUIT: # pylint: disable=E1101 pygame.quit() #", "self.state[coord + (1,)] = 1 self.reveal_around(coord, reward, done, without_loss=True) self.state[x_min:x_max,", "= True reward, done = -1, True else: if not", "1) bomb_region = self.state[x_min:x_max, y_min:y_max, 0] bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max]", "content == self.BOMB: if state == self.HIDDEN: img_key = 'mine'", "self.FLAG) if np.any(unflagged_bombs_around): self.done = True reward, done = -1,", "self.grid_shape = grid_shape self.grid_size = np.prod(grid_shape) self.n_bombs = max(1, int(bombs_density", "= self.num_font.render(\"LEFT\", 1, (255, 255, 10)) potential_bombs_left = self.n_bombs -", "self.time_left = int(time() - self.start_time) # Setup rendering self.pygame_is_init =", "self.pygame_is_init = False self.chicken = chicken self.done = False self.score", "if not without_loss: reward -= 0.001 def clip_index(self, x, axis):", "= self.state[index][0] if content == self.BOMB: if state == self.HIDDEN:", "reward, done, {'passed':False} elif case_content == NO_BOMBS_AROUND: self.reveal_around(coords, reward, done)", "state) # Plot infos ## Score score_text = self.score_font.render(\"SCORE\", 1,", "0 self.REVEAL = 1 self.FLAG = 2 self.BOMB = self.impact_size", "self.get_observation(), reward, done, {'passed':False} elif case_content == NO_BOMBS_AROUND: self.reveal_around(coords, reward,", "backported to PY<37 `importlib_resources`. import importlib_resources as pkg_resources from .", "numbers num_font_size = 20 self.num_font = pygame.font.SysFont(\"monospace\", int(self.scale_factor * num_font_size))", "np.sum(region[..., 1] == 2) if flags_around == self.state[coords + (0,)]:", "(1,)] case_content = self.state[coords + (0,)] NO_BOMBS_AROUND = 0 reward,", "np.any(unrevealed_zeros_around): zeros_coords = np.argwhere(unrevealed_zeros_around) for zero in zeros_coords: coord =", "dx_max def step(self, action): coords = action[:2] action_type = action[2]", "self.flaged_bombs - self.flaged_empty potential_bombs_left = self.num_font.render(str(int(potential_bombs_left)), 1, (255, 255, 10))", "replace=False) self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids] ## Place numbers self.semi_impact_size =", "def step(self, action): coords = action[:2] action_type = action[2] +", "- time() + self.start_time time_left = self.num_font.render(str(int(self.time_left+1)), 1, (255, 10,", "bombs self.state[self.bombs_positions + (0,)] = self.BOMB self.start_time = time() self.time_left", "10)) self.window.blit(time_text, (0.1*self.header_size, 0.03*self.width)) self.window.blit(time_left, (0.1*self.header_size, 0.1*self.width)) ## Bombs left", "self.state[x_min:x_max, y_min:y_max, 1][self.state[x_min:x_max, y_min:y_max, 1] != self.FLAG] = 1 unflagged_bombs_around", "1][self.state[x_min:x_max, y_min:y_max, 1] != self.FLAG] = 1 unflagged_bombs_around = np.logical_and(region[...,", "render(self): if not self.pygame_is_init: self._init_pygame() self.pygame_is_init = True for event", "# Open Pygame window self.scale_factor = 2 * min(12 /", "Setup font for numbers num_font_size = 20 self.num_font = pygame.font.SysFont(\"monospace\",", "0.15]) self.decimal_font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.225, 0]) self.score_font", "reward, done = -1, True return self.get_observation(), reward, done, {'passed':False}", "spaces.MultiDiscrete(nvec_observation) nvec_action = np.array(self.grid_shape + (2,)) self.action_space = spaces.MultiDiscrete(nvec_action) #", "self.images = {} for img_name in images_names: with pkg_resources.path(images, img_name", "(255, 10, 10)) self.window.blit(time_text, (0.1*self.header_size, 0.03*self.width)) self.window.blit(time_left, (0.1*self.header_size, 0.1*self.width)) ##", "zeros_coords = np.argwhere(unrevealed_zeros_around) for zero in zeros_coords: coord = (x_min", "self.FLAG] = 1 unflagged_bombs_around = np.logical_and(region[..., 0] == self.BOMB, region[...,", "idx[0][bombs_id], idx[1][bombs_id] x_min, x_max, dx_min, dx_max = self.clip_index(bomb_x, 0) y_min,", "np.prod(grid_shape) self.n_bombs = max(1, int(bombs_density * self.grid_size)) if n_bombs is", "img_name in images_names: with pkg_resources.path(images, img_name + '.png') as path:", "for zero in zeros_coords: coord = (x_min + zero[0], y_min", "2 + 1 observation[revealed] = copy(self.state[:, :, 0][revealed]) observation[flaged] -=", "= 0.0 HUE = RED_HUE + (BLUE_HUE - RED_HUE) *", "ImportError: # Try backported to PY<37 `importlib_resources`. import importlib_resources as", "observation def reveal_around(self, coords, reward, done, without_loss=False): if not done:", "= -1 self.state[coords + (1,)] = self.HIDDEN else: self.state[coords +", "bomb_x, bomb_y = idx[0][bombs_id], idx[1][bombs_id] x_min, x_max, dx_min, dx_max =", "def _init_pygame(self): pygame.init() # pylint: disable=E1101 # Open Pygame window", "spaces from time import time import numpy as np from", "self.width)) # Setup font for numbers num_font_size = 20 self.num_font", "(1,)] = self.HIDDEN else: self.state[coords + (1,)] = self.FLAG if", "= self.clip_index(bomb_x, 0) y_min, y_max, dy_min, dy_max = self.clip_index(bomb_y, 1)", "done = score, True return self.get_observation(), reward, done, {'passed':False} if", "images from gym import Env, spaces from time import time", "1, (255, 10, 10)) self.window.blit(time_text, (0.1*self.header_size, 0.03*self.width)) self.window.blit(time_left, (0.1*self.header_size, 0.1*self.width))", "* num_font_size)) self.font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.325, 0.15])", "= self.clip_index(bomb_y, 1) bomb_region = self.state[x_min:x_max, y_min:y_max, 0] bomb_region +=", "np.sum(region[..., 1] == 0) if unseen_around == 0: if not", "bomb_impact = np.ones((self.impact_size, self.impact_size), dtype=np.uint8) for bombs_id in bombs_ids: bomb_x,", "not done: x_min, x_max, _, _ = self.clip_index(coords[0], 0) y_min,", "flaging if self.flaged_bombs == self.n_bombs and self.flaged_empty == 0: reward,", "y_min:y_max, 1] != self.FLAG] = 1 unflagged_bombs_around = np.logical_and(region[..., 0]", "int(self.scale_factor * 12)) # Load images def scale_image(img, scale_factor=self.scale_factor): return", "/ max_n)**3 color = 255 * np.array(colorsys.hsv_to_rgb(HUE, 1, 0.7)) return", "x_min, x_max, _, _ = self.clip_index(coords[0], 0) y_min, y_max, _,", "= self.num_font.render(\"BOMBS\", 1, (255, 255, 10)) left_text = self.num_font.render(\"LEFT\", 1,", "self.window.blit(label, position + self.font_offset - (content > 9) * self.decimal_font_offset)", "2 == 0: raise ValueError('Impact_size must be an odd number", "must be an odd number !') self.impact_size = impact_size #", "self.grid_size)) if n_bombs is None else n_bombs self.n_bombs = min(self.grid_size", "self.FLAG: flaging = -1 self.state[coords + (1,)] = self.HIDDEN else:", "self.flaged_bombs = 0 self.flaged_empty = 0 self.max_time = max_time if", "if self.time_left <= 0: score = -(self.n_bombs - self.flaged_bombs +", "self.window.blit(bombs_text, (0.1*self.header_size, 0.4*self.width)) self.window.blit(left_text, (0.1*self.header_size, 0.45*self.width)) self.window.blit(potential_bombs_left, (0.1*self.header_size, 0.5*self.width)) pygame.display.flip()", "* self.BLOCK_SIZE * np.array([0.325, 0.15]) self.decimal_font_offset = self.scale_factor * self.BLOCK_SIZE", "import Env, spaces from time import time import numpy as", "self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids] ## Place numbers self.semi_impact_size = (self.impact_size-1)//2", "/ self.grid_shape[1]) self.BLOCK_SIZE = 32 self.header_size = self.scale_factor * 100", "- self.flaged_empty potential_bombs_left = self.num_font.render(str(int(potential_bombs_left)), 1, (255, 255, 10)) self.window.blit(bombs_text,", "% 2 == 0: raise ValueError('Impact_size must be an odd", "flaging = -1 self.state[coords + (1,)] = self.HIDDEN else: self.state[coords", "0, self.height, self.width)) # Plot grid for index, state in", "self.BLOCK_SIZE * np.array([0.225, 0]) self.score_font = pygame.font.SysFont(\"monospace\", int(self.scale_factor * 12))", "* 12)) # Load images def scale_image(img, scale_factor=self.scale_factor): return scale(img,", "np.ndenumerate(self.state[..., 1]): self._plot_block(index, state) # Plot infos ## Score score_text", "'revealed' label = self.num_font.render(str(content), 1, self._get_color(content, self.BOMB)) self.window.blit(self.images[img_key], position) if", "nvec_action = np.array(self.grid_shape + (2,)) self.action_space = spaces.MultiDiscrete(nvec_action) # Initalize", "`importlib_resources`. import importlib_resources as pkg_resources from . import images from", "= np.array(self.grid_shape + (2,)) self.action_space = spaces.MultiDiscrete(nvec_action) # Initalize state", "= -1 + self.time_left/self.max_time + (self.flaged_bombs - self.flaged_empty)/self.n_bombs, True self.score", "-= 0.001 self.score += reward return self.get_observation(), reward, done, {'passed':True}", "+ 1) + self.impact_size return x_min, x_max, dx_min, dx_max def", "case_state == self.REVEAL: reward -= 0.001 else: flaging = 1", "== self.BOMB: self.flaged_bombs += flaging else: self.flaged_empty += flaging if", "pygame.display.flip() pygame.time.wait(10) if self.done: pygame.time.wait(3000) @staticmethod def _get_color(n, max_n): BLUE_HUE", "* self.BLOCK_SIZE * self.grid_shape[0]) self.height = int(self.scale_factor * self.BLOCK_SIZE *", "'disabled_mine' if not self.chicken else 'disabled_chicken' else: img_key = 'misplaced_flag'", "0) y_min, y_max, dy_min, dy_max = self.clip_index(bomb_y, 1) bomb_region =", "done = 2 + self.time_left/self.max_time, True if np.any(np.logical_and(self.state[..., 0]==9, self.state[...,", "Score score_text = self.score_font.render(\"SCORE\", 1, (255, 10, 10)) score =", "done = 0, False self.time_left = self.max_time - time() +", "-1) bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False) self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids]", "y_min:y_max, 0] bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max] ## Place bombs self.state[self.bombs_positions", "= self.clip_index(coords[1], 1) region = self.state[x_min:x_max, y_min:y_max, :] unseen_around =", "+ self.scale_factor * self.BLOCK_SIZE * np.array((index[1], index[0]))) label = None", "if not done: x_min, x_max, _, _ = self.clip_index(coords[0], 0)", "1 # 0 -> 1 = reveal; 1 -> 2", "= 'hidden' elif state == self.FLAG: if not self.done: img_key", "= 2 * min(12 / self.grid_shape[0], 25 / self.grid_shape[1]) self.BLOCK_SIZE", "else: flaging = 1 if case_state == self.FLAG: flaging =", "= max(0, x-self.semi_impact_size), min(max_idx, x + self.semi_impact_size + 1) dx_min,", "pygame.font.SysFont(\"monospace\", int(self.scale_factor * 12)) # Load images def scale_image(img, scale_factor=self.scale_factor):", "x_max, dx_min, dx_max def step(self, action): coords = action[:2] action_type", "10, 10)) self.window.blit(time_text, (0.1*self.header_size, 0.03*self.width)) self.window.blit(time_left, (0.1*self.header_size, 0.1*self.width)) ## Bombs", "10, 10)) self.window.blit(score_text, (0.1*self.header_size, 0.75*self.width)) self.window.blit(score, (0.1*self.header_size, 0.8*self.width)) ## Time", "import time import numpy as np from copy import copy", "chicken self.done = False self.score = 0 def get_observation(self): observation", "2 observation += self.impact_size ** 2 + 1 observation[revealed] =", "max(0, x-self.semi_impact_size), min(max_idx, x + self.semi_impact_size + 1) dx_min, dx_max", "+ self.time_left/self.max_time, True if np.any(np.logical_and(self.state[..., 0]==9, self.state[..., 1]==1)) or self.done:", "-1, True else: if not without_loss: reward -= 0.001 def", "int(bombs_density * self.grid_size)) if n_bombs is None else n_bombs self.n_bombs", "self.observation_space = spaces.MultiDiscrete(nvec_observation) nvec_action = np.array(self.grid_shape + (2,)) self.action_space =", "== self.n_bombs and self.flaged_empty == 0: reward, done = 2", "(255, 10, 10)) self.window.blit(score_text, (0.1*self.header_size, 0.75*self.width)) self.window.blit(score, (0.1*self.header_size, 0.8*self.width)) ##", "= 255 * np.array(colorsys.hsv_to_rgb(HUE, 1, 0.7)) return color def _plot_block(self,", "np.array((index[1], index[0]))) label = None if state == self.HIDDEN and", "= observation == 2 observation += self.impact_size ** 2 +", "images_names: with pkg_resources.path(images, img_name + '.png') as path: img =", "x_min, x_max, dx_min, dx_max def step(self, action): coords = action[:2]", "'flag' else: content = self.state[index][0] if content == self.BOMB: img_key", "color def _plot_block(self, index, state): position = tuple(self.origin + self.scale_factor", "+ self.start_time time_left = self.num_font.render(str(int(self.time_left+1)), 1, (255, 10, 10)) self.window.blit(time_text,", "grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False): self.grid_shape = grid_shape", "* self.grid_shape[0]) self.height = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[1] +", "label: self.window.blit(label, position + self.font_offset - (content > 9) *", "action_type = action[2] + 1 # 0 -> 1 =", "score = -(self.n_bombs - self.flaged_bombs + self.flaged_empty)/self.n_bombs reward, done =", "== pygame.QUIT: # pylint: disable=E1101 pygame.quit() # pylint: disable=E1101 #", "state == self.FLAG: if not self.done: img_key = 'flag' else:", "= time() self.time_left = int(time() - self.start_time) # Setup rendering", "(0.1*self.header_size, 0.8*self.width)) ## Time left time_text = self.num_font.render(\"TIME\", 1, (255,", "* ((max_n - n) / max_n)**3 color = 255 *", "flaging = 1 if case_state == self.FLAG: flaging = -1", "= ['hidden', 'revealed', 'flag', 'misplaced_flag'] if self.chicken: images_names += ['chicken',", "Plot background pygame.draw.rect(self.window, (60, 56, 53), (0, 0, self.height, self.width))", "* np.array([0.325, 0.15]) self.decimal_font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.225,", "self.impact_size ** 2 + 1 observation[revealed] = copy(self.state[:, :, 0][revealed])", "53), (0, 0, self.height, self.width)) # Plot grid for index,", "+ (2,), dtype=np.uint8) ## Setup bombs places idx = np.indices(self.grid_shape).reshape(2,", "1] == self.HIDDEN) if np.any(unrevealed_zeros_around): zeros_coords = np.argwhere(unrevealed_zeros_around) for zero", "pygame.time.wait(10) if self.done: pygame.time.wait(3000) @staticmethod def _get_color(n, max_n): BLUE_HUE =", "if state == self.HIDDEN and not self.done: img_key = 'hidden'", "not self.done: img_key = 'flag' else: content = self.state[index][0] if", "+ (1,)] = action_type if case_content == self.BOMB: if self.pygame_is_init:", "if case_state == self.FLAG: flaging = -1 self.state[coords + (1,)]", "img_key = 'hidden' elif state == self.FLAG: if not self.done:", "np.ones(self.grid_shape) self.observation_space = spaces.MultiDiscrete(nvec_observation) nvec_action = np.array(self.grid_shape + (2,)) self.action_space", "= self.clip_index(coords[0], 0) y_min, y_max, _, _ = self.clip_index(coords[1], 1)", "if case_content == self.BOMB: if self.pygame_is_init: self.done = True reward,", "x_max, dx_min, dx_max = self.clip_index(bomb_x, 0) y_min, y_max, dy_min, dy_max", "self.semi_impact_size), x_max - (x + self.semi_impact_size + 1) + self.impact_size", "copy(self.state[:, :, 0][revealed]) observation[flaged] -= 1 return observation def reveal_around(self,", "time() self.time_left = int(time() - self.start_time) # Setup rendering self.pygame_is_init", "reward -= 0.001 else: flaging = 1 if case_state ==", "(0.1*self.header_size, 0.4*self.width)) self.window.blit(left_text, (0.1*self.header_size, 0.45*self.width)) self.window.blit(potential_bombs_left, (0.1*self.header_size, 0.5*self.width)) pygame.display.flip() pygame.time.wait(10)", "* self.BLOCK_SIZE * np.array([0.225, 0]) self.score_font = pygame.font.SysFont(\"monospace\", int(self.scale_factor *", "self.BOMB self.start_time = time() self.time_left = int(time() - self.start_time) #", "- RED_HUE) * ((max_n - n) / max_n)**3 color =", "(2,)) self.action_space = spaces.MultiDiscrete(nvec_action) # Initalize state self.state = np.zeros(self.grid_shape", "coords, reward, done, without_loss=False): if not done: x_min, x_max, _,", "== self.FLAG: flaging = -1 self.state[coords + (1,)] = self.HIDDEN", "self.HIDDEN: img_key = 'mine' if not self.chicken else 'chicken' else:", "reveal; 1 -> 2 = toggle_flag case_state = self.state[coords +", "0] bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max] ## Place bombs self.state[self.bombs_positions +", "max_idx = self.grid_shape[axis] x_min, x_max = max(0, x-self.semi_impact_size), min(max_idx, x", "self.BLOCK_SIZE * self.grid_shape[1] + self.header_size) self.window = pygame.display.set_mode((self.height, self.width)) #", "= int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[1] + self.header_size) self.window =", "== self.HIDDEN: img_key = 'mine' if not self.chicken else 'chicken'", "coord = (x_min + zero[0], y_min + zero[1]) self.state[coord +", "region = self.state[x_min:x_max, y_min:y_max, :] unseen_around = np.sum(region[..., 1] ==", "conventions nvec_observation = (self.BOMB + 2) * np.ones(self.grid_shape) self.observation_space =", "region[..., 1] != self.FLAG) if np.any(unflagged_bombs_around): self.done = True reward,", "or self.done: reward, done = -1 + self.time_left/self.max_time + (self.flaged_bombs", "dy_max = self.clip_index(bomb_y, 1) bomb_region = self.state[x_min:x_max, y_min:y_max, 0] bomb_region", "if np.any(unflagged_bombs_around): self.done = True reward, done = -1, True", "self.reveal_around(coords, reward, done) reward -= 0.01 else: reward -= 0.001", "disable=E1101 pygame.quit() # pylint: disable=E1101 # Plot background pygame.draw.rect(self.window, (60,", "in np.ndenumerate(self.state[..., 1]): self._plot_block(index, state) # Plot infos ## Score", "np from copy import copy import colorsys import pygame from", "(BLUE_HUE - RED_HUE) * ((max_n - n) / max_n)**3 color", "action[:2] action_type = action[2] + 1 # 0 -> 1", "+ '.png') as path: img = pygame.image.load(str(path)).convert() self.images[img_name] = scale_image(img)", "= (self.impact_size-1)//2 bomb_impact = np.ones((self.impact_size, self.impact_size), dtype=np.uint8) for bombs_id in", "self.BOMB: if state == self.HIDDEN: img_key = 'mine' if not", "'disabled_chicken' else: img_key = 'misplaced_flag' else: content = self.state[index][0] if", "y_min, y_max, _, _ = self.clip_index(coords[1], 1) region = self.state[x_min:x_max,", "import pygame from pygame.transform import scale class MinesweeperEnv(Env): def __init__(self,", "n_bombs is None else n_bombs self.n_bombs = min(self.grid_size - 1,", "reward, done) elif case_state == self.REVEAL: self.reveal_around(coords, reward, done) reward", "## Place numbers self.semi_impact_size = (self.impact_size-1)//2 bomb_impact = np.ones((self.impact_size, self.impact_size),", "_, _ = self.clip_index(coords[0], 0) y_min, y_max, _, _ =", "1 -> 2 = toggle_flag case_state = self.state[coords + (1,)]", "window self.scale_factor = 2 * min(12 / self.grid_shape[0], 25 /", "* np.array((index[1], index[0]))) label = None if state == self.HIDDEN", "RED_HUE) * ((max_n - n) / max_n)**3 color = 255", "int(self.scale_factor * num_font_size)) self.font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.325,", "case_state = self.state[coords + (1,)] case_content = self.state[coords + (0,)]", "(1,)] = 1 self.reveal_around(coord, reward, done, without_loss=True) self.state[x_min:x_max, y_min:y_max, 1][self.state[x_min:x_max,", "impact_size=self.impact_size, max_time=self.max_time, chicken=self.chicken) return self.get_observation() def render(self): if not self.pygame_is_init:", "+ self.time_left/self.max_time + (self.flaged_bombs - self.flaged_empty)/self.n_bombs, True self.score += reward", "= spaces.MultiDiscrete(nvec_observation) nvec_action = np.array(self.grid_shape + (2,)) self.action_space = spaces.MultiDiscrete(nvec_action)", "score, True return self.get_observation(), reward, done, {'passed':False} if action_type ==", "== self.BOMB, region[..., 1] != self.FLAG) if np.any(unflagged_bombs_around): self.done =", "x_max - (x + self.semi_impact_size + 1) + self.impact_size return", "self.flaged_bombs += flaging else: self.flaged_empty += flaging if self.flaged_bombs ==", "self.BOMB, region[..., 1] != self.FLAG) if np.any(unflagged_bombs_around): self.done = True", "else: reward -= 0.001 self.score += reward return self.get_observation(), reward,", "def _plot_block(self, index, state): position = tuple(self.origin + self.scale_factor *", "1]): self._plot_block(index, state) # Plot infos ## Score score_text =", "= self.state[index][0] if content == self.BOMB: img_key = 'disabled_mine' if", "'hidden' elif state == self.FLAG: if not self.done: img_key =", "PY<37 `importlib_resources`. import importlib_resources as pkg_resources from . import images", "self.state[index][0] if content == self.BOMB: if state == self.HIDDEN: img_key", "# Load images def scale_image(img, scale_factor=self.scale_factor): return scale(img, (int(scale_factor*img.get_width()), int(scale_factor*img.get_height())))", "1, 0.7)) return color def _plot_block(self, index, state): position =", "else n_bombs self.n_bombs = min(self.grid_size - 1, self.n_bombs) self.flaged_bombs =", "# Plot grid for index, state in np.ndenumerate(self.state[..., 1]): self._plot_block(index,", "self.scale_factor * self.BLOCK_SIZE * np.array((index[1], index[0]))) label = None if", "and not self.done: img_key = 'hidden' elif state == self.FLAG:", "-1, True return self.get_observation(), reward, done, {'passed':False} elif case_content ==", "bombs_id in bombs_ids: bomb_x, bomb_y = idx[0][bombs_id], idx[1][bombs_id] x_min, x_max,", "toggle_flag case_state = self.state[coords + (1,)] case_content = self.state[coords +", "0.45*self.width)) self.window.blit(potential_bombs_left, (0.1*self.header_size, 0.5*self.width)) pygame.display.flip() pygame.time.wait(10) if self.done: pygame.time.wait(3000) @staticmethod", "0] == 0, region[..., 1] == self.HIDDEN) if np.any(unrevealed_zeros_around): zeros_coords", "True else: if not without_loss: reward -= 0.001 def clip_index(self,", "1 self.FLAG = 2 self.BOMB = self.impact_size ** 2 #", "disable=E1101 # Open Pygame window self.scale_factor = 2 * min(12", "- 1, self.n_bombs) self.flaged_bombs = 0 self.flaged_empty = 0 self.max_time", "Initalize state self.state = np.zeros(self.grid_shape + (2,), dtype=np.uint8) ## Setup", "(1,)] = self.FLAG if case_content == self.BOMB: self.flaged_bombs += flaging", "colorsys import pygame from pygame.transform import scale class MinesweeperEnv(Env): def", "= np.zeros(self.grid_shape + (2,), dtype=np.uint8) ## Setup bombs places idx", "reward, done = -1, True else: if not without_loss: reward", "= 1 self.FLAG = 2 self.BOMB = self.impact_size ** 2", "pkg_resources except ImportError: # Try backported to PY<37 `importlib_resources`. import", "self.window.blit(score_text, (0.1*self.header_size, 0.75*self.width)) self.window.blit(score, (0.1*self.header_size, 0.8*self.width)) ## Time left time_text", "self.HIDDEN else: self.state[coords + (1,)] = self.FLAG if case_content ==", "2 # Setting up gym Env conventions nvec_observation = (self.BOMB", "-= 0.01 else: reward -= 0.001 self.score += reward return", "{'passed':False} if action_type == self.REVEAL: if case_state == self.HIDDEN: self.state[coords", "True reward, done = -1, True else: if not without_loss:", "self.REVEAL = 1 self.FLAG = 2 self.BOMB = self.impact_size **", "self.max_time - time() + self.start_time if self.time_left <= 0: score", "= (x_min + zero[0], y_min + zero[1]) self.state[coord + (1,)]", "+ (1,)] = self.HIDDEN else: self.state[coords + (1,)] = self.FLAG", "min(12 / self.grid_shape[0], 25 / self.grid_shape[1]) self.BLOCK_SIZE = 32 self.header_size", "not without_loss: reward -= 0.001 def clip_index(self, x, axis): max_idx", "self.flaged_empty)/self.n_bombs reward, done = score, True return self.get_observation(), reward, done,", "56, 53), (0, 0, self.height, self.width)) # Plot grid for", "+ self.impact_size return x_min, x_max, dx_min, dx_max def step(self, action):", "self.state[coords + (1,)] = self.FLAG if case_content == self.BOMB: self.flaged_bombs", "- self.semi_impact_size), x_max - (x + self.semi_impact_size + 1) +", "self.done: pygame.time.wait(3000) @staticmethod def _get_color(n, max_n): BLUE_HUE = 0.6 RED_HUE", "max_n): BLUE_HUE = 0.6 RED_HUE = 0.0 HUE = RED_HUE", "def _get_color(n, max_n): BLUE_HUE = 0.6 RED_HUE = 0.0 HUE", "Define constants self.HIDDEN = 0 self.REVEAL = 1 self.FLAG =", "impact_size # Define constants self.HIDDEN = 0 self.REVEAL = 1", "in pygame.event.get(): if event.type == pygame.QUIT: # pylint: disable=E1101 pygame.quit()", "'exploded_mine', 'disabled_mine'] self.images = {} for img_name in images_names: with", "= 0 self.REVEAL = 1 self.FLAG = 2 self.BOMB =", "x_min, x_max = max(0, x-self.semi_impact_size), min(max_idx, x + self.semi_impact_size +", "images_names += ['mine', 'exploded_mine', 'disabled_mine'] self.images = {} for img_name", "!= self.FLAG) if np.any(unflagged_bombs_around): self.done = True reward, done =", "np.array([self.header_size, 0]) self.width = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[0]) self.height", "0.001 self.score += reward return self.get_observation(), reward, done, {'passed':True} elif", "dx_max = x_min - (x - self.semi_impact_size), x_max - (x", "self.flaged_empty = 0 self.max_time = max_time if impact_size % 2", "(x_min + zero[0], y_min + zero[1]) self.state[coord + (1,)] =", "scale_image(img, scale_factor=self.scale_factor): return scale(img, (int(scale_factor*img.get_width()), int(scale_factor*img.get_height()))) images_names = ['hidden', 'revealed',", "= self.score_font.render(\"SCORE\", 1, (255, 10, 10)) score = self.score_font.render(str(round(self.score, 4)),", "10)) score = self.score_font.render(str(round(self.score, 4)), 1, (255, 10, 10)) self.window.blit(score_text,", "BLUE_HUE = 0.6 RED_HUE = 0.0 HUE = RED_HUE +", "not self.chicken else 'exploded_chicken' else: img_key = 'revealed' label =", "if action_type == self.REVEAL: if case_state == self.HIDDEN: self.state[coords +", "= 'revealed' label = self.num_font.render(str(content), 1, self._get_color(content, self.BOMB)) self.window.blit(self.images[img_key], position)", "9) * self.decimal_font_offset) def _init_pygame(self): pygame.init() # pylint: disable=E1101 #", "self.grid_shape[1] + self.header_size) self.window = pygame.display.set_mode((self.height, self.width)) # Setup font", "= self.scale_factor * self.BLOCK_SIZE * np.array([0.225, 0]) self.score_font = pygame.font.SysFont(\"monospace\",", "(x - self.semi_impact_size), x_max - (x + self.semi_impact_size + 1)", "self.time_left/self.max_time + (self.flaged_bombs - self.flaged_empty)/self.n_bombs, True self.score += reward return", "self.n_bombs - self.flaged_bombs - self.flaged_empty potential_bombs_left = self.num_font.render(str(int(potential_bombs_left)), 1, (255,", "content = self.state[index][0] if content == self.BOMB: img_key = 'disabled_mine'", "dx_min, dx_max def step(self, action): coords = action[:2] action_type =", "(255, 10, 10)) score = self.score_font.render(str(round(self.score, 4)), 1, (255, 10,", "25 / self.grid_shape[1]) self.BLOCK_SIZE = 32 self.header_size = self.scale_factor *", "(content > 9) * self.decimal_font_offset) def _init_pygame(self): pygame.init() # pylint:", "index[0]))) label = None if state == self.HIDDEN and not", "self.scale_factor * self.BLOCK_SIZE * np.array([0.325, 0.15]) self.decimal_font_offset = self.scale_factor *", "time() + self.start_time if self.time_left <= 0: score = -(self.n_bombs", "-> 1 = reveal; 1 -> 2 = toggle_flag case_state", "1 = reveal; 1 -> 2 = toggle_flag case_state =", "without_loss=True) self.state[x_min:x_max, y_min:y_max, 1][self.state[x_min:x_max, y_min:y_max, 1] != self.FLAG] = 1", "int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[1] + self.header_size) self.window = pygame.display.set_mode((self.height,", "0.001 return flags_around = np.sum(region[..., 1] == 2) if flags_around", "= np.indices(self.grid_shape).reshape(2, -1) bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False) self.bombs_positions =", "self.clip_index(bomb_x, 0) y_min, y_max, dy_min, dy_max = self.clip_index(bomb_y, 1) bomb_region", "2) * np.ones(self.grid_shape) self.observation_space = spaces.MultiDiscrete(nvec_observation) nvec_action = np.array(self.grid_shape +", "bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max] ## Place bombs self.state[self.bombs_positions + (0,)]", "## Bombs left bombs_text = self.num_font.render(\"BOMBS\", 1, (255, 255, 10))", "done) elif case_state == self.REVEAL: self.reveal_around(coords, reward, done) reward -=", "0: reward, done = 2 + self.time_left/self.max_time, True if np.any(np.logical_and(self.state[...,", "self.start_time time_left = self.num_font.render(str(int(self.time_left+1)), 1, (255, 10, 10)) self.window.blit(time_text, (0.1*self.header_size,", "zero[1]) self.state[coord + (1,)] = 1 self.reveal_around(coord, reward, done, without_loss=True)", "and self.flaged_empty == 0: reward, done = 2 + self.time_left/self.max_time,", "event in pygame.event.get(): if event.type == pygame.QUIT: # pylint: disable=E1101", "for numbers num_font_size = 20 self.num_font = pygame.font.SysFont(\"monospace\", int(self.scale_factor *", "# Setup rendering self.pygame_is_init = False self.chicken = chicken self.done", "else: img_key = 'misplaced_flag' else: content = self.state[index][0] if content", "dx_min, dx_max = self.clip_index(bomb_x, 0) y_min, y_max, dy_min, dy_max =", "self.font_offset - (content > 9) * self.decimal_font_offset) def _init_pygame(self): pygame.init()", "2 = toggle_flag case_state = self.state[coords + (1,)] case_content =", "'chicken' else: img_key = 'exploded_mine' if not self.chicken else 'exploded_chicken'", "(0,)] NO_BOMBS_AROUND = 0 reward, done = 0, False self.time_left", "+ 1 observation[revealed] = copy(self.state[:, :, 0][revealed]) observation[flaged] -= 1", "1] == 0) if unseen_around == 0: if not without_loss:", "self.state[self.bombs_positions + (0,)] = self.BOMB self.start_time = time() self.time_left =", "0) y_min, y_max, _, _ = self.clip_index(coords[1], 1) region =", "1, self._get_color(content, self.BOMB)) self.window.blit(self.images[img_key], position) if label: self.window.blit(label, position +", "without_loss=False): if not done: x_min, x_max, _, _ = self.clip_index(coords[0],", "reward, done = 0, False self.time_left = self.max_time - time()", "constants self.HIDDEN = 0 self.REVEAL = 1 self.FLAG = 2", "self.HIDDEN) if np.any(unrevealed_zeros_around): zeros_coords = np.argwhere(unrevealed_zeros_around) for zero in zeros_coords:", "label = self.num_font.render(str(content), 1, self._get_color(content, self.BOMB)) self.window.blit(self.images[img_key], position) if label:", "num_font_size = 20 self.num_font = pygame.font.SysFont(\"monospace\", int(self.scale_factor * num_font_size)) self.font_offset", "= copy(self.state[:, :, 0][revealed]) observation[flaged] -= 1 return observation def", "Plot infos ## Score score_text = self.score_font.render(\"SCORE\", 1, (255, 10,", "> 9) * self.decimal_font_offset) def _init_pygame(self): pygame.init() # pylint: disable=E1101", "10, 10)) score = self.score_font.render(str(round(self.score, 4)), 1, (255, 10, 10))", "0 def get_observation(self): observation = copy(self.state[:, :, 1]) revealed =", "done: x_min, x_max, _, _ = self.clip_index(coords[0], 0) y_min, y_max,", "0] == self.BOMB, region[..., 1] != self.FLAG) if np.any(unflagged_bombs_around): self.done", "be an odd number !') self.impact_size = impact_size # Define", "self.score_font.render(\"SCORE\", 1, (255, 10, 10)) score = self.score_font.render(str(round(self.score, 4)), 1,", "0.1*self.width)) ## Bombs left bombs_text = self.num_font.render(\"BOMBS\", 1, (255, 255,", "self.get_observation(), reward, done, {'passed':True} elif action_type == self.FLAG: if case_state", "num_font_size)) self.font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.325, 0.15]) self.decimal_font_offset", "-(self.n_bombs - self.flaged_bombs + self.flaged_empty)/self.n_bombs reward, done = score, True", "0.5*self.width)) pygame.display.flip() pygame.time.wait(10) if self.done: pygame.time.wait(3000) @staticmethod def _get_color(n, max_n):", "0.6 RED_HUE = 0.0 HUE = RED_HUE + (BLUE_HUE -", "reward, done = -1 + self.time_left/self.max_time + (self.flaged_bombs - self.flaged_empty)/self.n_bombs,", "np.ones((self.impact_size, self.impact_size), dtype=np.uint8) for bombs_id in bombs_ids: bomb_x, bomb_y =", "== 0: reward, done = 2 + self.time_left/self.max_time, True if", "10)) self.window.blit(bombs_text, (0.1*self.header_size, 0.4*self.width)) self.window.blit(left_text, (0.1*self.header_size, 0.45*self.width)) self.window.blit(potential_bombs_left, (0.1*self.header_size, 0.5*self.width))", "False self.score = 0 def get_observation(self): observation = copy(self.state[:, :,", "- time() + self.start_time if self.time_left <= 0: score =", "pygame from pygame.transform import scale class MinesweeperEnv(Env): def __init__(self, grid_shape=(10,", "= 0 reward, done = 0, False self.time_left = self.max_time", "+ (1,)] case_content = self.state[coords + (0,)] NO_BOMBS_AROUND = 0", "= self.scale_factor * self.BLOCK_SIZE * np.array([0.325, 0.15]) self.decimal_font_offset = self.scale_factor", "position + self.font_offset - (content > 9) * self.decimal_font_offset) def", "False self.time_left = self.max_time - time() + self.start_time if self.time_left", "* self.BLOCK_SIZE * self.grid_shape[1] + self.header_size) self.window = pygame.display.set_mode((self.height, self.width))", "(2,), dtype=np.uint8) ## Setup bombs places idx = np.indices(self.grid_shape).reshape(2, -1)", "= 'mine' if not self.chicken else 'chicken' else: img_key =", "return scale(img, (int(scale_factor*img.get_width()), int(scale_factor*img.get_height()))) images_names = ['hidden', 'revealed', 'flag', 'misplaced_flag']", "self.flaged_bombs == self.n_bombs and self.flaged_empty == 0: reward, done =", "else: content = self.state[index][0] if content == self.BOMB: if state", "observation = copy(self.state[:, :, 1]) revealed = observation == 1", "else: img_key = 'revealed' label = self.num_font.render(str(content), 1, self._get_color(content, self.BOMB))", "img_key = 'mine' if not self.chicken else 'chicken' else: img_key", "img_key = 'disabled_mine' if not self.chicken else 'disabled_chicken' else: img_key", "int(time() - self.start_time) # Setup rendering self.pygame_is_init = False self.chicken", "= False self.score = 0 def get_observation(self): observation = copy(self.state[:,", "observation[flaged] -= 1 return observation def reveal_around(self, coords, reward, done,", "img_name + '.png') as path: img = pygame.image.load(str(path)).convert() self.images[img_name] =", "= np.sum(region[..., 1] == 0) if unseen_around == 0: if", "done, without_loss=False): if not done: x_min, x_max, _, _ =", "1 return observation def reveal_around(self, coords, reward, done, without_loss=False): if", "0.001 def clip_index(self, x, axis): max_idx = self.grid_shape[axis] x_min, x_max", "reward, done, {'passed':False} def reset(self): self.__init__(self.grid_shape, n_bombs=self.n_bombs, impact_size=self.impact_size, max_time=self.max_time, chicken=self.chicken)", "else 'disabled_chicken' else: img_key = 'misplaced_flag' else: content = self.state[index][0]", "else: images_names += ['mine', 'exploded_mine', 'disabled_mine'] self.images = {} for", "n_bombs=self.n_bombs, impact_size=self.impact_size, max_time=self.max_time, chicken=self.chicken) return self.get_observation() def render(self): if not", "self.clip_index(bomb_y, 1) bomb_region = self.state[x_min:x_max, y_min:y_max, 0] bomb_region += bomb_impact[dx_min:dx_max,", "unrevealed_zeros_around = np.logical_and(region[..., 0] == 0, region[..., 1] == self.HIDDEN)", "self.impact_size return x_min, x_max, dx_min, dx_max def step(self, action): coords", "self.REVEAL: self.reveal_around(coords, reward, done) reward -= 0.01 else: reward -=", "self.HIDDEN: self.state[coords + (1,)] = action_type if case_content == self.BOMB:", "1]==1)) or self.done: reward, done = -1 + self.time_left/self.max_time +", "not self.pygame_is_init: self._init_pygame() self.pygame_is_init = True for event in pygame.event.get():", "x_max = max(0, x-self.semi_impact_size), min(max_idx, x + self.semi_impact_size + 1)", "self.state[coords + (1,)] = self.HIDDEN else: self.state[coords + (1,)] =", "from . import images from gym import Env, spaces from", "= max_time if impact_size % 2 == 0: raise ValueError('Impact_size", "1) dx_min, dx_max = x_min - (x - self.semi_impact_size), x_max", "self.BOMB: self.flaged_bombs += flaging else: self.flaged_empty += flaging if self.flaged_bombs", "'exploded_mine' if not self.chicken else 'exploded_chicken' else: img_key = 'revealed'", "['hidden', 'revealed', 'flag', 'misplaced_flag'] if self.chicken: images_names += ['chicken', 'exploded_chicken',", "pygame.init() # pylint: disable=E1101 # Open Pygame window self.scale_factor =", "= np.array([self.header_size, 0]) self.width = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[0])", "time import numpy as np from copy import copy import", "(0.1*self.header_size, 0.1*self.width)) ## Bombs left bombs_text = self.num_font.render(\"BOMBS\", 1, (255,", "self.window.blit(potential_bombs_left, (0.1*self.header_size, 0.5*self.width)) pygame.display.flip() pygame.time.wait(10) if self.done: pygame.time.wait(3000) @staticmethod def", "= self.max_time - time() + self.start_time if self.time_left <= 0:", "reward, done = score, True return self.get_observation(), reward, done, {'passed':False}", "** 2 + 1 observation[revealed] = copy(self.state[:, :, 0][revealed]) observation[flaged]", "-1 + self.time_left/self.max_time + (self.flaged_bombs - self.flaged_empty)/self.n_bombs, True self.score +=", "== self.state[coords + (0,)]: unrevealed_zeros_around = np.logical_and(region[..., 0] == 0,", "self.chicken else 'disabled_chicken' else: img_key = 'misplaced_flag' else: content =", "RED_HUE = 0.0 HUE = RED_HUE + (BLUE_HUE - RED_HUE)", "* self.decimal_font_offset) def _init_pygame(self): pygame.init() # pylint: disable=E1101 # Open", "= self.num_font.render(\"TIME\", 1, (255, 10, 10)) self.time_left = self.max_time -", "axis): max_idx = self.grid_shape[axis] x_min, x_max = max(0, x-self.semi_impact_size), min(max_idx,", "0, False self.time_left = self.max_time - time() + self.start_time if", "ValueError('Impact_size must be an odd number !') self.impact_size = impact_size", "= toggle_flag case_state = self.state[coords + (1,)] case_content = self.state[coords", "case_state == self.REVEAL: self.reveal_around(coords, reward, done) reward -= 0.01 else:", "+ (self.flaged_bombs - self.flaged_empty)/self.n_bombs, True self.score += reward return self.get_observation(),", "(0,)]: unrevealed_zeros_around = np.logical_and(region[..., 0] == 0, region[..., 1] ==", "= np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False) self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids] ## Place", "+ 1 # 0 -> 1 = reveal; 1 ->", "self.time_left <= 0: score = -(self.n_bombs - self.flaged_bombs + self.flaged_empty)/self.n_bombs", "np.zeros(self.grid_shape + (2,), dtype=np.uint8) ## Setup bombs places idx =", "= 1 if case_state == self.FLAG: flaging = -1 self.state[coords", "else: content = self.state[index][0] if content == self.BOMB: img_key =", "_ = self.clip_index(coords[0], 0) y_min, y_max, _, _ = self.clip_index(coords[1],", "= tuple(self.origin + self.scale_factor * self.BLOCK_SIZE * np.array((index[1], index[0]))) label", "unflagged_bombs_around = np.logical_and(region[..., 0] == self.BOMB, region[..., 1] != self.FLAG)", "= spaces.MultiDiscrete(nvec_action) # Initalize state self.state = np.zeros(self.grid_shape + (2,),", "= np.logical_and(region[..., 0] == 0, region[..., 1] == self.HIDDEN) if", "nvec_observation = (self.BOMB + 2) * np.ones(self.grid_shape) self.observation_space = spaces.MultiDiscrete(nvec_observation)", "= (self.BOMB + 2) * np.ones(self.grid_shape) self.observation_space = spaces.MultiDiscrete(nvec_observation) nvec_action", "## Setup bombs places idx = np.indices(self.grid_shape).reshape(2, -1) bombs_ids =", "idx[1][bombs_ids] ## Place numbers self.semi_impact_size = (self.impact_size-1)//2 bomb_impact = np.ones((self.impact_size,", "self.done = True reward, done = -1, True return self.get_observation(),", "True return self.get_observation(), reward, done, {'passed':False} elif case_content == NO_BOMBS_AROUND:", "4)), 1, (255, 10, 10)) self.window.blit(score_text, (0.1*self.header_size, 0.75*self.width)) self.window.blit(score, (0.1*self.header_size,", "self.reveal_around(coord, reward, done, without_loss=True) self.state[x_min:x_max, y_min:y_max, 1][self.state[x_min:x_max, y_min:y_max, 1] !=", "self.action_space = spaces.MultiDiscrete(nvec_action) # Initalize state self.state = np.zeros(self.grid_shape +", "elif state == self.FLAG: if not self.done: img_key = 'flag'", "10, 10)) self.time_left = self.max_time - time() + self.start_time time_left", "+ self.semi_impact_size + 1) + self.impact_size return x_min, x_max, dx_min,", "= 0 self.max_time = max_time if impact_size % 2 ==", "not self.done: img_key = 'hidden' elif state == self.FLAG: if", "= 'exploded_mine' if not self.chicken else 'exploded_chicken' else: img_key =", "+ (1,)] = 1 self.reveal_around(coord, reward, done, without_loss=True) self.state[x_min:x_max, y_min:y_max,", "* np.array([0.225, 0]) self.score_font = pygame.font.SysFont(\"monospace\", int(self.scale_factor * 12)) #", "NO_BOMBS_AROUND = 0 reward, done = 0, False self.time_left =", "self.semi_impact_size = (self.impact_size-1)//2 bomb_impact = np.ones((self.impact_size, self.impact_size), dtype=np.uint8) for bombs_id", "True return self.get_observation(), reward, done, {'passed':False} if action_type == self.REVEAL:", "+ zero[1]) self.state[coord + (1,)] = 1 self.reveal_around(coord, reward, done,", "self.num_font.render(\"TIME\", 1, (255, 10, 10)) self.time_left = self.max_time - time()", "else: if not without_loss: reward -= 0.001 def clip_index(self, x,", "self.window.blit(time_left, (0.1*self.header_size, 0.1*self.width)) ## Bombs left bombs_text = self.num_font.render(\"BOMBS\", 1,", "if case_state == self.HIDDEN: self.state[coords + (1,)] = action_type if", "= 0, False self.time_left = self.max_time - time() + self.start_time", "images def scale_image(img, scale_factor=self.scale_factor): return scale(img, (int(scale_factor*img.get_width()), int(scale_factor*img.get_height()))) images_names =", "= grid_shape self.grid_size = np.prod(grid_shape) self.n_bombs = max(1, int(bombs_density *", "self.HIDDEN and not self.done: img_key = 'hidden' elif state ==", "np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False) self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids] ## Place numbers", "state == self.HIDDEN and not self.done: img_key = 'hidden' elif", "+= ['mine', 'exploded_mine', 'disabled_mine'] self.images = {} for img_name in", "reward -= 0.001 def clip_index(self, x, axis): max_idx = self.grid_shape[axis]", "= self.num_font.render(str(int(potential_bombs_left)), 1, (255, 255, 10)) self.window.blit(bombs_text, (0.1*self.header_size, 0.4*self.width)) self.window.blit(left_text,", "== self.BOMB: img_key = 'disabled_mine' if not self.chicken else 'disabled_chicken'", "self.impact_size ** 2 # Setting up gym Env conventions nvec_observation", "+= reward return self.get_observation(), reward, done, {'passed':False} def reset(self): self.__init__(self.grid_shape,", "for index, state in np.ndenumerate(self.state[..., 1]): self._plot_block(index, state) # Plot", "Bombs left bombs_text = self.num_font.render(\"BOMBS\", 1, (255, 255, 10)) left_text", "numbers self.semi_impact_size = (self.impact_size-1)//2 bomb_impact = np.ones((self.impact_size, self.impact_size), dtype=np.uint8) for", "self.clip_index(coords[0], 0) y_min, y_max, _, _ = self.clip_index(coords[1], 1) region", "1) region = self.state[x_min:x_max, y_min:y_max, :] unseen_around = np.sum(region[..., 1]", "self.score += reward return self.get_observation(), reward, done, {'passed':True} elif action_type", "reward -= 0.001 self.score += reward return self.get_observation(), reward, done,", "label = None if state == self.HIDDEN and not self.done:", "self.BOMB: img_key = 'disabled_mine' if not self.chicken else 'disabled_chicken' else:", "return color def _plot_block(self, index, state): position = tuple(self.origin +", "unseen_around == 0: if not without_loss: reward -= 0.001 return", "self.max_time = max_time if impact_size % 2 == 0: raise", "= False self.chicken = chicken self.done = False self.score =", "np.array(self.grid_shape + (2,)) self.action_space = spaces.MultiDiscrete(nvec_action) # Initalize state self.state", "self.state[index][0] if content == self.BOMB: img_key = 'disabled_mine' if not", "impact_size=3, max_time=999, chicken=False): self.grid_shape = grid_shape self.grid_size = np.prod(grid_shape) self.n_bombs", "zero in zeros_coords: coord = (x_min + zero[0], y_min +", "n) / max_n)**3 color = 255 * np.array(colorsys.hsv_to_rgb(HUE, 1, 0.7))", "action_type == self.FLAG: if case_state == self.REVEAL: reward -= 0.001", "flags_around == self.state[coords + (0,)]: unrevealed_zeros_around = np.logical_and(region[..., 0] ==", "places idx = np.indices(self.grid_shape).reshape(2, -1) bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False)", "if case_content == self.BOMB: self.flaged_bombs += flaging else: self.flaged_empty +=", "self.__init__(self.grid_shape, n_bombs=self.n_bombs, impact_size=self.impact_size, max_time=self.max_time, chicken=self.chicken) return self.get_observation() def render(self): if", "(self.impact_size-1)//2 bomb_impact = np.ones((self.impact_size, self.impact_size), dtype=np.uint8) for bombs_id in bombs_ids:", "done, {'passed':True} elif action_type == self.FLAG: if case_state == self.REVEAL:", "(60, 56, 53), (0, 0, self.height, self.width)) # Plot grid", "def reveal_around(self, coords, reward, done, without_loss=False): if not done: x_min,", "clip_index(self, x, axis): max_idx = self.grid_shape[axis] x_min, x_max = max(0,", "return self.get_observation(), reward, done, {'passed':False} if action_type == self.REVEAL: if", "reward -= 0.01 else: reward -= 0.001 self.score += reward", "from gym import Env, spaces from time import time import", "(0, 0, self.height, self.width)) # Plot grid for index, state", "* self.BLOCK_SIZE * np.array((index[1], index[0]))) label = None if state", "self.chicken else 'chicken' else: img_key = 'exploded_mine' if not self.chicken", "'disabled_chicken'] else: images_names += ['mine', 'exploded_mine', 'disabled_mine'] self.images = {}", "self._plot_block(index, state) # Plot infos ## Score score_text = self.score_font.render(\"SCORE\",", "_plot_block(self, index, state): position = tuple(self.origin + self.scale_factor * self.BLOCK_SIZE", "self.n_bombs = max(1, int(bombs_density * self.grid_size)) if n_bombs is None", "x + self.semi_impact_size + 1) dx_min, dx_max = x_min -", "self.height = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[1] + self.header_size) self.window", "# pylint: disable=E1101 pygame.quit() # pylint: disable=E1101 # Plot background", "== self.HIDDEN: self.state[coords + (1,)] = action_type if case_content ==", "(x + self.semi_impact_size + 1) + self.impact_size return x_min, x_max,", "= x_min - (x - self.semi_impact_size), x_max - (x +", "content = self.state[index][0] if content == self.BOMB: if state ==", "np.array([0.325, 0.15]) self.decimal_font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.225, 0])", "= {} for img_name in images_names: with pkg_resources.path(images, img_name +", "self.scale_factor * 100 self.origin = np.array([self.header_size, 0]) self.width = int(self.scale_factor", "rendering self.pygame_is_init = False self.chicken = chicken self.done = False", "dy_min:dy_max] ## Place bombs self.state[self.bombs_positions + (0,)] = self.BOMB self.start_time", "if state == self.HIDDEN: img_key = 'mine' if not self.chicken", "0]) self.score_font = pygame.font.SysFont(\"monospace\", int(self.scale_factor * 12)) # Load images", "= pygame.display.set_mode((self.height, self.width)) # Setup font for numbers num_font_size =", "flaged = observation == 2 observation += self.impact_size ** 2", "# Try backported to PY<37 `importlib_resources`. import importlib_resources as pkg_resources", "!') self.impact_size = impact_size # Define constants self.HIDDEN = 0", "== self.FLAG: if case_state == self.REVEAL: reward -= 0.001 else:", "observation == 2 observation += self.impact_size ** 2 + 1", "0.75*self.width)) self.window.blit(score, (0.1*self.header_size, 0.8*self.width)) ## Time left time_text = self.num_font.render(\"TIME\",", "left_text = self.num_font.render(\"LEFT\", 1, (255, 255, 10)) potential_bombs_left = self.n_bombs", "== self.BOMB: if state == self.HIDDEN: img_key = 'mine' if", "self.chicken: images_names += ['chicken', 'exploded_chicken', 'disabled_chicken'] else: images_names += ['mine',", "an odd number !') self.impact_size = impact_size # Define constants" ]
[ "file, You can obtain one at http://mozilla.org/MPL/2.0/. from gaiatest import", "def test_unlock_to_camera_with_passcode(self): # https://github.com/mozilla/gaia-ui-tests/issues/479 camera = self.lock_screen.unlock_to_camera() self.lock_screen.wait_for_lockscreen_not_visible() camera.switch_to_camera_frame() self.assertFalse(camera.is_gallery_button_visible)", "the Mozilla Public # License, v. 2.0. If a copy", "= '<PASSWORD>' def setUp(self): GaiaTestCase.setUp(self) # Turn off geolocation prompt", "the MPL was not distributed with this # file, You", "'deny') self.data_layer.set_setting('lockscreen.passcode-lock.code', self._input_passcode) self.data_layer.set_setting('lockscreen.passcode-lock.enabled', True) # this time we need", "= LockScreen(self.marionette) def test_unlock_to_camera_with_passcode(self): # https://github.com/mozilla/gaia-ui-tests/issues/479 camera = self.lock_screen.unlock_to_camera() self.lock_screen.wait_for_lockscreen_not_visible()", "can obtain one at http://mozilla.org/MPL/2.0/. from gaiatest import GaiaTestCase from", "geolocation prompt self.apps.set_permission('System', 'geolocation', 'deny') self.data_layer.set_setting('lockscreen.passcode-lock.code', self._input_passcode) self.data_layer.set_setting('lockscreen.passcode-lock.enabled', True) #", "was not distributed with this # file, You can obtain", "time we need it locked! self.lockscreen.lock() self.lock_screen = LockScreen(self.marionette) def", "TestCameraUnlockWithPasscode(GaiaTestCase): # Input data _input_passcode = '<PASSWORD>' def setUp(self): GaiaTestCase.setUp(self)", "one at http://mozilla.org/MPL/2.0/. from gaiatest import GaiaTestCase from gaiatest.apps.lockscreen.app import", "at http://mozilla.org/MPL/2.0/. from gaiatest import GaiaTestCase from gaiatest.apps.lockscreen.app import LockScreen", "a copy of the MPL was not distributed with this", "from gaiatest.apps.lockscreen.app import LockScreen class TestCameraUnlockWithPasscode(GaiaTestCase): # Input data _input_passcode", "setUp(self): GaiaTestCase.setUp(self) # Turn off geolocation prompt self.apps.set_permission('System', 'geolocation', 'deny')", "2.0. If a copy of the MPL was not distributed", "http://mozilla.org/MPL/2.0/. from gaiatest import GaiaTestCase from gaiatest.apps.lockscreen.app import LockScreen class", "self.lockscreen.lock() self.lock_screen = LockScreen(self.marionette) def test_unlock_to_camera_with_passcode(self): # https://github.com/mozilla/gaia-ui-tests/issues/479 camera =", "locked! self.lockscreen.lock() self.lock_screen = LockScreen(self.marionette) def test_unlock_to_camera_with_passcode(self): # https://github.com/mozilla/gaia-ui-tests/issues/479 camera", "is subject to the terms of the Mozilla Public #", "subject to the terms of the Mozilla Public # License,", "the terms of the Mozilla Public # License, v. 2.0.", "gaiatest import GaiaTestCase from gaiatest.apps.lockscreen.app import LockScreen class TestCameraUnlockWithPasscode(GaiaTestCase): #", "gaiatest.apps.lockscreen.app import LockScreen class TestCameraUnlockWithPasscode(GaiaTestCase): # Input data _input_passcode =", "Public # License, v. 2.0. If a copy of the", "Form is subject to the terms of the Mozilla Public", "# This Source Code Form is subject to the terms", "'geolocation', 'deny') self.data_layer.set_setting('lockscreen.passcode-lock.code', self._input_passcode) self.data_layer.set_setting('lockscreen.passcode-lock.enabled', True) # this time we", "off geolocation prompt self.apps.set_permission('System', 'geolocation', 'deny') self.data_layer.set_setting('lockscreen.passcode-lock.code', self._input_passcode) self.data_layer.set_setting('lockscreen.passcode-lock.enabled', True)", "v. 2.0. If a copy of the MPL was not", "obtain one at http://mozilla.org/MPL/2.0/. from gaiatest import GaiaTestCase from gaiatest.apps.lockscreen.app", "self.data_layer.set_setting('lockscreen.passcode-lock.enabled', True) # this time we need it locked! self.lockscreen.lock()", "<filename>tests/python/gaia-ui-tests/gaiatest/tests/functional/lockscreen/test_lockscreen_unlock_to_camera_with_passcode.py # This Source Code Form is subject to the", "# file, You can obtain one at http://mozilla.org/MPL/2.0/. from gaiatest", "Code Form is subject to the terms of the Mozilla", "'<PASSWORD>' def setUp(self): GaiaTestCase.setUp(self) # Turn off geolocation prompt self.apps.set_permission('System',", "from gaiatest import GaiaTestCase from gaiatest.apps.lockscreen.app import LockScreen class TestCameraUnlockWithPasscode(GaiaTestCase):", "This Source Code Form is subject to the terms of", "True) # this time we need it locked! self.lockscreen.lock() self.lock_screen", "# Turn off geolocation prompt self.apps.set_permission('System', 'geolocation', 'deny') self.data_layer.set_setting('lockscreen.passcode-lock.code', self._input_passcode)", "terms of the Mozilla Public # License, v. 2.0. If", "of the MPL was not distributed with this # file,", "If a copy of the MPL was not distributed with", "Source Code Form is subject to the terms of the", "Turn off geolocation prompt self.apps.set_permission('System', 'geolocation', 'deny') self.data_layer.set_setting('lockscreen.passcode-lock.code', self._input_passcode) self.data_layer.set_setting('lockscreen.passcode-lock.enabled',", "not distributed with this # file, You can obtain one", "def setUp(self): GaiaTestCase.setUp(self) # Turn off geolocation prompt self.apps.set_permission('System', 'geolocation',", "this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from", "this time we need it locked! self.lockscreen.lock() self.lock_screen = LockScreen(self.marionette)", "we need it locked! self.lockscreen.lock() self.lock_screen = LockScreen(self.marionette) def test_unlock_to_camera_with_passcode(self):", "copy of the MPL was not distributed with this #", "class TestCameraUnlockWithPasscode(GaiaTestCase): # Input data _input_passcode = '<PASSWORD>' def setUp(self):", "LockScreen class TestCameraUnlockWithPasscode(GaiaTestCase): # Input data _input_passcode = '<PASSWORD>' def", "need it locked! self.lockscreen.lock() self.lock_screen = LockScreen(self.marionette) def test_unlock_to_camera_with_passcode(self): #", "# Input data _input_passcode = '<PASSWORD>' def setUp(self): GaiaTestCase.setUp(self) #", "test_unlock_to_camera_with_passcode(self): # https://github.com/mozilla/gaia-ui-tests/issues/479 camera = self.lock_screen.unlock_to_camera() self.lock_screen.wait_for_lockscreen_not_visible() camera.switch_to_camera_frame() self.assertFalse(camera.is_gallery_button_visible) camera.tap_switch_source()", "self._input_passcode) self.data_layer.set_setting('lockscreen.passcode-lock.enabled', True) # this time we need it locked!", "data _input_passcode = '<PASSWORD>' def setUp(self): GaiaTestCase.setUp(self) # Turn off", "it locked! self.lockscreen.lock() self.lock_screen = LockScreen(self.marionette) def test_unlock_to_camera_with_passcode(self): # https://github.com/mozilla/gaia-ui-tests/issues/479", "LockScreen(self.marionette) def test_unlock_to_camera_with_passcode(self): # https://github.com/mozilla/gaia-ui-tests/issues/479 camera = self.lock_screen.unlock_to_camera() self.lock_screen.wait_for_lockscreen_not_visible() camera.switch_to_camera_frame()", "to the terms of the Mozilla Public # License, v.", "Input data _input_passcode = '<PASSWORD>' def setUp(self): GaiaTestCase.setUp(self) # Turn", "self.apps.set_permission('System', 'geolocation', 'deny') self.data_layer.set_setting('lockscreen.passcode-lock.code', self._input_passcode) self.data_layer.set_setting('lockscreen.passcode-lock.enabled', True) # this time", "https://github.com/mozilla/gaia-ui-tests/issues/479 camera = self.lock_screen.unlock_to_camera() self.lock_screen.wait_for_lockscreen_not_visible() camera.switch_to_camera_frame() self.assertFalse(camera.is_gallery_button_visible) camera.tap_switch_source() camera.wait_for_capture_ready() self.assertFalse(camera.is_gallery_button_visible)", "with this # file, You can obtain one at http://mozilla.org/MPL/2.0/.", "MPL was not distributed with this # file, You can", "self.lock_screen = LockScreen(self.marionette) def test_unlock_to_camera_with_passcode(self): # https://github.com/mozilla/gaia-ui-tests/issues/479 camera = self.lock_screen.unlock_to_camera()", "distributed with this # file, You can obtain one at", "import GaiaTestCase from gaiatest.apps.lockscreen.app import LockScreen class TestCameraUnlockWithPasscode(GaiaTestCase): # Input", "self.data_layer.set_setting('lockscreen.passcode-lock.code', self._input_passcode) self.data_layer.set_setting('lockscreen.passcode-lock.enabled', True) # this time we need it", "prompt self.apps.set_permission('System', 'geolocation', 'deny') self.data_layer.set_setting('lockscreen.passcode-lock.code', self._input_passcode) self.data_layer.set_setting('lockscreen.passcode-lock.enabled', True) # this", "License, v. 2.0. If a copy of the MPL was", "# License, v. 2.0. If a copy of the MPL", "GaiaTestCase.setUp(self) # Turn off geolocation prompt self.apps.set_permission('System', 'geolocation', 'deny') self.data_layer.set_setting('lockscreen.passcode-lock.code',", "GaiaTestCase from gaiatest.apps.lockscreen.app import LockScreen class TestCameraUnlockWithPasscode(GaiaTestCase): # Input data", "of the Mozilla Public # License, v. 2.0. If a", "import LockScreen class TestCameraUnlockWithPasscode(GaiaTestCase): # Input data _input_passcode = '<PASSWORD>'", "# this time we need it locked! self.lockscreen.lock() self.lock_screen =", "_input_passcode = '<PASSWORD>' def setUp(self): GaiaTestCase.setUp(self) # Turn off geolocation", "You can obtain one at http://mozilla.org/MPL/2.0/. from gaiatest import GaiaTestCase", "Mozilla Public # License, v. 2.0. If a copy of", "# https://github.com/mozilla/gaia-ui-tests/issues/479 camera = self.lock_screen.unlock_to_camera() self.lock_screen.wait_for_lockscreen_not_visible() camera.switch_to_camera_frame() self.assertFalse(camera.is_gallery_button_visible) camera.tap_switch_source() camera.wait_for_capture_ready()" ]
[ "to make #edges x 5 then pad to desired size", "\"\"\" def __init__(self, in_channels, out_channels, k=5, bias=True): super(MeshConv, self).__init__() self.conv", "Gi = Gi + 1 #shift # first flatten indices", "#edges x 4 add the edge_id itself to make #edges", "edge features (Batch x Features x Edges) mesh: list of", "# apply the symmetric functions for an equivariant convolution x_1", "xsz, device): \"\"\" extracts one-ring neighbors (4x) -> m.gemm_edges which", "convolution \"\"\" def __init__(self, in_channels, out_channels, k=5, bias=True): super(MeshConv, self).__init__()", "symmetric functions for an equivariant convolution x_1 = f[:, :,", "device=Gi.device).float() / ne).view(b, ne) add_fac = batch_n * ne add_fac", "f def pad_gemm(self, m, xsz, device): \"\"\" extracts one-ring neighbors", "ne += 1 batch_n = torch.floor(torch.arange(b * ne, device=Gi.device).float() /", ":, :, 4] x_3 = torch.abs(f[:, :, :, 1] -", "add_fac[:, 1:, :] return Gi def create_GeMM(self, x, Gi): \"\"\"", "= F.pad(padded_gemm, (0, 0, 0, xsz - m.edges_count), \"constant\", 0)", "Gi + 1 #shift # first flatten indices Gi_flat =", "= torch.cat((padding, x), dim=2) Gi = Gi + 1 #shift", "padded section of x so padded section never used f", "x, mesh): x = x.squeeze(-1) # pad gemm G =", "ne) add_fac = batch_n * ne add_fac = add_fac.view(b, ne,", "x_3 = torch.abs(f[:, :, :, 1] - f[:, :, :,", "m.gemm_edges which is of size #edges x 4 add the", "create_GeMM(self, x, Gi): \"\"\" gathers the edge features (x) with", ":, :, 1] + f[:, :, :, 3] x_2 =", "a 'fake image' which can use 2d convolution on output", "+ f[:, :, :, 3] x_2 = f[:, :, :,", "x so padded section never used f = torch.index_select(x, dim=0,", "and 4 incident (1-ring) edge neighbors in the forward pass", "k=5, bias=True): super(MeshConv, self).__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, k),", "= torch.stack([f[:, :, :, 0], x_1, x_2, x_3, x_4], dim=3)", "f.view(Gishape[0], Gishape[1], Gishape[2], -1) f = f.permute(0, 3, 1, 2)", "\"\"\" Computes convolution between edges and 4 incident (1-ring) edge", "Gi Gi = Gi.float() + add_fac[:, 1:, :] return Gi", "never used f = torch.index_select(x, dim=0, index=Gi_flat) f = f.view(Gishape[0],", "forward(self, x, mesh): x = x.squeeze(-1) # pad gemm G", "f[:, :, :, 3]) x_4 = torch.abs(f[:, :, :, 2]", "features (x) with from the 1-ring indices (Gi) applys symmetric", ":, 2] - f[:, :, :, 4]) f = torch.stack([f[:,", "f[:, :, :, 3] x_2 = f[:, :, :, 2]", "= x.shape x = x.permute(0, 2, 1).contiguous() x = x.view(odim[0]", "self).__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, k), bias=bias) self.k =", "requires_grad=True, device=x.device) # add zero feature vector then shift all", "torch.cat((padding, x), dim=2) Gi = Gi + 1 #shift #", "Gi_flat.view(-1).long() # odim = x.shape x = x.permute(0, 2, 1).contiguous()", "1), requires_grad=True, device=x.device) # add zero feature vector then shift", "list of mesh data-structure (len(mesh) == Batch) and applies convolution", ":, :, 0], x_1, x_2, x_3, x_4], dim=3) return f", "(4x) -> m.gemm_edges which is of size #edges x 4", "3, 1, 2) # apply the symmetric functions for an", "device=device).float() padded_gemm = padded_gemm.requires_grad_() padded_gemm = torch.cat((torch.arange(int(m.edges_count), device=device).float().unsqueeze(1), padded_gemm), dim=1)", "dim=2) Gi = Gi + 1 #shift # first flatten", "of gemm never reference padded section of x so padded", "0) # build 'neighborhood image' and apply convolution G =", "2) # apply the symmetric functions for an equivariant convolution", "gemm never reference padded section of x so padded section", "Gi): (b, ne, nn) = Gi.shape ne += 1 batch_n", "= torch.tensor(m.gemm_edges, device=device).float() padded_gemm = padded_gemm.requires_grad_() padded_gemm = torch.cat((torch.arange(int(m.edges_count), device=device).float().unsqueeze(1),", "x_2 = f[:, :, :, 2] + f[:, :, :,", "-> m.gemm_edges which is of size #edges x 4 add", "batch_n * ne add_fac = add_fac.view(b, ne, 1) add_fac =", "return x def flatten_gemm_inds(self, Gi): (b, ne, nn) = Gi.shape", "out_channels=out_channels, kernel_size=(1, k), bias=bias) self.k = k def forward(self, x,", "between edges and 4 incident (1-ring) edge neighbors in the", "with zeros padding = torch.zeros((x.shape[0], x.shape[1], 1), requires_grad=True, device=x.device) #", "torch import torch.nn as nn import torch.nn.functional as F class", "1, 2) # apply the symmetric functions for an equivariant", "import torch import torch.nn as nn import torch.nn.functional as F", "Gi_flat = self.flatten_gemm_inds(Gi) Gi_flat = Gi_flat.view(-1).long() # odim = x.shape", "data-structure (len(mesh) == Batch) and applies convolution \"\"\" def __init__(self,", "-1) f = f.permute(0, 3, 1, 2) # apply the", "feature vector then shift all indices. border edges now reference", "ne, 1) add_fac = add_fac.repeat(1, 1, nn) # flatten Gi", "of mesh data-structure (len(mesh) == Batch) and applies convolution \"\"\"", ":, 0], x_1, x_2, x_3, x_4], dim=3) return f def", "f.permute(0, 3, 1, 2) # apply the symmetric functions for", "index=Gi_flat) f = f.view(Gishape[0], Gishape[1], Gishape[2], -1) f = f.permute(0,", "# pad the first row of every sample in batch", "the first row of every sample in batch with zeros", "indices of gemm never reference padded section of x so", "x Channels x Edges x 5 \"\"\" Gishape = Gi.shape", "x 4 add the edge_id itself to make #edges x", "Gi def create_GeMM(self, x, Gi): \"\"\" gathers the edge features", "extracts one-ring neighbors (4x) -> m.gemm_edges which is of size", "* ne, device=Gi.device).float() / ne).view(b, ne) add_fac = batch_n *", "the 1-ring indices (Gi) applys symmetric functions to handle order", "convolution between edges and 4 incident (1-ring) edge neighbors in", "x def flatten_gemm_inds(self, Gi): (b, ne, nn) = Gi.shape ne", ":, :, 2] + f[:, :, :, 4] x_3 =", "/ ne).view(b, ne) add_fac = batch_n * ne add_fac =", "itself to make #edges x 5 then pad to desired", "as nn import torch.nn.functional as F class MeshConv(nn.Module): \"\"\" Computes", "(Batch x Features x Edges) mesh: list of mesh data-structure", ":, 1] - f[:, :, :, 3]) x_4 = torch.abs(f[:,", "5 then pad to desired size e.g., xsz x 5", "mesh data-structure (len(mesh) == Batch) and applies convolution \"\"\" def", "def forward(self, x, mesh): x = x.squeeze(-1) # pad gemm", "apply the symmetric functions for an equivariant convolution x_1 =", "pad using F padded_gemm = F.pad(padded_gemm, (0, 0, 0, xsz", "torch.nn as nn import torch.nn.functional as F class MeshConv(nn.Module): \"\"\"", "every sample in batch with zeros padding = torch.zeros((x.shape[0], x.shape[1],", "functions to handle order invariance returns a 'fake image' which", "G) x = self.conv(G) return x def flatten_gemm_inds(self, Gi): (b,", "= torch.floor(torch.arange(b * ne, device=Gi.device).float() / ne).view(b, ne) add_fac =", "== Batch) and applies convolution \"\"\" def __init__(self, in_channels, out_channels,", "x_2, x_3, x_4], dim=3) return f def pad_gemm(self, m, xsz,", "Edges x 5 \"\"\" Gishape = Gi.shape # pad the", "zero feature vector then shift all indices. border edges now", "in the forward pass takes: x: edge features (Batch x", "4]) f = torch.stack([f[:, :, :, 0], x_1, x_2, x_3,", "used f = torch.index_select(x, dim=0, index=Gi_flat) f = f.view(Gishape[0], Gishape[1],", "padded_gemm = torch.tensor(m.gemm_edges, device=device).float() padded_gemm = padded_gemm.requires_grad_() padded_gemm = torch.cat((torch.arange(int(m.edges_count),", "def __init__(self, in_channels, out_channels, k=5, bias=True): super(MeshConv, self).__init__() self.conv =", "x = torch.cat((padding, x), dim=2) Gi = Gi + 1", "G = self.create_GeMM(x, G) x = self.conv(G) return x def", "def flatten_gemm_inds(self, Gi): (b, ne, nn) = Gi.shape ne +=", "= Gi.shape ne += 1 batch_n = torch.floor(torch.arange(b * ne,", "+= 1 batch_n = torch.floor(torch.arange(b * ne, device=Gi.device).float() / ne).view(b,", "edge_id itself to make #edges x 5 then pad to", "self.conv(G) return x def flatten_gemm_inds(self, Gi): (b, ne, nn) =", "2, 1).contiguous() x = x.view(odim[0] * odim[2], odim[1]) # indices", "= f.view(Gishape[0], Gishape[1], Gishape[2], -1) f = f.permute(0, 3, 1,", "add_fac.repeat(1, 1, nn) # flatten Gi Gi = Gi.float() +", "(b, ne, nn) = Gi.shape ne += 1 batch_n =", "torch.abs(f[:, :, :, 2] - f[:, :, :, 4]) f", "as F class MeshConv(nn.Module): \"\"\" Computes convolution between edges and", "2] - f[:, :, :, 4]) f = torch.stack([f[:, :,", "torch.zeros((x.shape[0], x.shape[1], 1), requires_grad=True, device=x.device) # add zero feature vector", "f[:, :, :, 4] x_3 = torch.abs(f[:, :, :, 1]", "x.squeeze(-1) # pad gemm G = torch.cat([self.pad_gemm(i, x.shape[2], x.device) for", "# odim = x.shape x = x.permute(0, 2, 1).contiguous() x", "dim=0, index=Gi_flat) f = f.view(Gishape[0], Gishape[1], Gishape[2], -1) f =", "Gishape[2], -1) f = f.permute(0, 3, 1, 2) # apply", ":, :, 4]) f = torch.stack([f[:, :, :, 0], x_1,", "= f[:, :, :, 2] + f[:, :, :, 4]", "with from the 1-ring indices (Gi) applys symmetric functions to", "Gishape = Gi.shape # pad the first row of every", "x.shape x = x.permute(0, 2, 1).contiguous() x = x.view(odim[0] *", "= add_fac.repeat(1, 1, nn) # flatten Gi Gi = Gi.float()", "return Gi def create_GeMM(self, x, Gi): \"\"\" gathers the edge", "Edges) mesh: list of mesh data-structure (len(mesh) == Batch) and", "mesh], 0) # build 'neighborhood image' and apply convolution G", ":, 4] x_3 = torch.abs(f[:, :, :, 1] - f[:,", "import torch.nn.functional as F class MeshConv(nn.Module): \"\"\" Computes convolution between", "add zero feature vector then shift all indices. border edges", "Gi): \"\"\" gathers the edge features (x) with from the", "MeshConv(nn.Module): \"\"\" Computes convolution between edges and 4 incident (1-ring)", "= Gi.shape # pad the first row of every sample", "x.permute(0, 2, 1).contiguous() x = x.view(odim[0] * odim[2], odim[1]) #", "+ 1 #shift # first flatten indices Gi_flat = self.flatten_gemm_inds(Gi)", "order invariance returns a 'fake image' which can use 2d", "__init__(self, in_channels, out_channels, k=5, bias=True): super(MeshConv, self).__init__() self.conv = nn.Conv2d(in_channels=in_channels,", "dimensions: Batch x Channels x Edges x 5 \"\"\" Gishape", "odim[2], odim[1]) # indices of gemm never reference padded section", "x = x.squeeze(-1) # pad gemm G = torch.cat([self.pad_gemm(i, x.shape[2],", "k), bias=bias) self.k = k def forward(self, x, mesh): x", "ne add_fac = add_fac.view(b, ne, 1) add_fac = add_fac.repeat(1, 1,", ":, 3] x_2 = f[:, :, :, 2] + f[:,", "Batch x Channels x Edges x 5 \"\"\" Gishape =", "= f.permute(0, 3, 1, 2) # apply the symmetric functions", "flatten_gemm_inds(self, Gi): (b, ne, nn) = Gi.shape ne += 1", "torch.cat([self.pad_gemm(i, x.shape[2], x.device) for i in mesh], 0) # build", "Gi_flat = Gi_flat.view(-1).long() # odim = x.shape x = x.permute(0,", "x, Gi): \"\"\" gathers the edge features (x) with from", "vector then shift all indices. border edges now reference zero", "= x.permute(0, 2, 1).contiguous() x = x.view(odim[0] * odim[2], odim[1])", "x 5 \"\"\" Gishape = Gi.shape # pad the first", "dim=1) # pad using F padded_gemm = F.pad(padded_gemm, (0, 0,", "self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, k), bias=bias) self.k = k", "torch.nn.functional as F class MeshConv(nn.Module): \"\"\" Computes convolution between edges", "1 batch_n = torch.floor(torch.arange(b * ne, device=Gi.device).float() / ne).view(b, ne)", "use 2d convolution on output dimensions: Batch x Channels x", "torch.abs(f[:, :, :, 1] - f[:, :, :, 3]) x_4", "(x) with from the 1-ring indices (Gi) applys symmetric functions", "= padded_gemm.requires_grad_() padded_gemm = torch.cat((torch.arange(int(m.edges_count), device=device).float().unsqueeze(1), padded_gemm), dim=1) # pad", "section of x so padded section never used f =", "2] + f[:, :, :, 4] x_3 = torch.abs(f[:, :,", ":, :, 3] x_2 = f[:, :, :, 2] +", "* odim[2], odim[1]) # indices of gemm never reference padded", "self.create_GeMM(x, G) x = self.conv(G) return x def flatten_gemm_inds(self, Gi):", "invariance returns a 'fake image' which can use 2d convolution", "= torch.abs(f[:, :, :, 1] - f[:, :, :, 3])", "the edge_id itself to make #edges x 5 then pad", "pad to desired size e.g., xsz x 5 \"\"\" padded_gemm", "add the edge_id itself to make #edges x 5 then", "one-ring neighbors (4x) -> m.gemm_edges which is of size #edges", "x.shape[1], 1), requires_grad=True, device=x.device) # add zero feature vector then", "1-ring indices (Gi) applys symmetric functions to handle order invariance", "# build 'neighborhood image' and apply convolution G = self.create_GeMM(x,", "applies convolution \"\"\" def __init__(self, in_channels, out_channels, k=5, bias=True): super(MeshConv,", ":] return Gi def create_GeMM(self, x, Gi): \"\"\" gathers the", "first flatten indices Gi_flat = self.flatten_gemm_inds(Gi) Gi_flat = Gi_flat.view(-1).long() #", "import torch.nn as nn import torch.nn.functional as F class MeshConv(nn.Module):", "to handle order invariance returns a 'fake image' which can", "torch.cat((torch.arange(int(m.edges_count), device=device).float().unsqueeze(1), padded_gemm), dim=1) # pad using F padded_gemm =", "f = f.permute(0, 3, 1, 2) # apply the symmetric", "nn import torch.nn.functional as F class MeshConv(nn.Module): \"\"\" Computes convolution", "image' and apply convolution G = self.create_GeMM(x, G) x =", "torch.floor(torch.arange(b * ne, device=Gi.device).float() / ne).view(b, ne) add_fac = batch_n", "never reference padded section of x so padded section never", "x.shape[2], x.device) for i in mesh], 0) # build 'neighborhood", "for an equivariant convolution x_1 = f[:, :, :, 1]", "4 add the edge_id itself to make #edges x 5", "section never used f = torch.index_select(x, dim=0, index=Gi_flat) f =", ":, :, 1] - f[:, :, :, 3]) x_4 =", "x = x.permute(0, 2, 1).contiguous() x = x.view(odim[0] * odim[2],", "xsz x 5 \"\"\" padded_gemm = torch.tensor(m.gemm_edges, device=device).float() padded_gemm =", "for i in mesh], 0) # build 'neighborhood image' and", "ne, device=Gi.device).float() / ne).view(b, ne) add_fac = batch_n * ne", "1:, :] return Gi def create_GeMM(self, x, Gi): \"\"\" gathers", "# pad gemm G = torch.cat([self.pad_gemm(i, x.shape[2], x.device) for i", "'fake image' which can use 2d convolution on output dimensions:", "pad gemm G = torch.cat([self.pad_gemm(i, x.shape[2], x.device) for i in", "padded_gemm), dim=1) # pad using F padded_gemm = F.pad(padded_gemm, (0,", "handle order invariance returns a 'fake image' which can use", "of size #edges x 4 add the edge_id itself to", "odim[1]) # indices of gemm never reference padded section of", "vector x = torch.cat((padding, x), dim=2) Gi = Gi +", "padded_gemm = padded_gemm.requires_grad_() padded_gemm = torch.cat((torch.arange(int(m.edges_count), device=device).float().unsqueeze(1), padded_gemm), dim=1) #", "\"\"\" Gishape = Gi.shape # pad the first row of", "indices Gi_flat = self.flatten_gemm_inds(Gi) Gi_flat = Gi_flat.view(-1).long() # odim =", "bias=True): super(MeshConv, self).__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, k), bias=bias)", "4] x_3 = torch.abs(f[:, :, :, 1] - f[:, :,", "x_3, x_4], dim=3) return f def pad_gemm(self, m, xsz, device):", "neighbors (4x) -> m.gemm_edges which is of size #edges x", "- f[:, :, :, 4]) f = torch.stack([f[:, :, :,", "kernel_size=(1, k), bias=bias) self.k = k def forward(self, x, mesh):", "f = f.view(Gishape[0], Gishape[1], Gishape[2], -1) f = f.permute(0, 3,", "padded_gemm.requires_grad_() padded_gemm = torch.cat((torch.arange(int(m.edges_count), device=device).float().unsqueeze(1), padded_gemm), dim=1) # pad using", "(0, 0, 0, xsz - m.edges_count), \"constant\", 0) padded_gemm =", "forward pass takes: x: edge features (Batch x Features x", "of x so padded section never used f = torch.index_select(x,", ":, 4]) f = torch.stack([f[:, :, :, 0], x_1, x_2,", "torch.index_select(x, dim=0, index=Gi_flat) f = f.view(Gishape[0], Gishape[1], Gishape[2], -1) f", "in_channels, out_channels, k=5, bias=True): super(MeshConv, self).__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,", "x.view(odim[0] * odim[2], odim[1]) # indices of gemm never reference", "f[:, :, :, 1] + f[:, :, :, 3] x_2", "Gi = Gi.float() + add_fac[:, 1:, :] return Gi def", "# pad using F padded_gemm = F.pad(padded_gemm, (0, 0, 0,", "indices. border edges now reference zero vector x = torch.cat((padding,", "= nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, k), bias=bias) self.k = k def", "gemm G = torch.cat([self.pad_gemm(i, x.shape[2], x.device) for i in mesh],", "- f[:, :, :, 3]) x_4 = torch.abs(f[:, :, :,", "1, nn) # flatten Gi Gi = Gi.float() + add_fac[:,", "0, 0, xsz - m.edges_count), \"constant\", 0) padded_gemm = padded_gemm.unsqueeze(0)", "= torch.cat((torch.arange(int(m.edges_count), device=device).float().unsqueeze(1), padded_gemm), dim=1) # pad using F padded_gemm", "mesh): x = x.squeeze(-1) # pad gemm G = torch.cat([self.pad_gemm(i,", "3]) x_4 = torch.abs(f[:, :, :, 2] - f[:, :,", "odim = x.shape x = x.permute(0, 2, 1).contiguous() x =", "flatten indices Gi_flat = self.flatten_gemm_inds(Gi) Gi_flat = Gi_flat.view(-1).long() # odim", "from the 1-ring indices (Gi) applys symmetric functions to handle", "\"\"\" padded_gemm = torch.tensor(m.gemm_edges, device=device).float() padded_gemm = padded_gemm.requires_grad_() padded_gemm =", "self.flatten_gemm_inds(Gi) Gi_flat = Gi_flat.view(-1).long() # odim = x.shape x =", "5 \"\"\" padded_gemm = torch.tensor(m.gemm_edges, device=device).float() padded_gemm = padded_gemm.requires_grad_() padded_gemm", "Features x Edges) mesh: list of mesh data-structure (len(mesh) ==", "= f[:, :, :, 1] + f[:, :, :, 3]", "0], x_1, x_2, x_3, x_4], dim=3) return f def pad_gemm(self,", "add_fac = add_fac.view(b, ne, 1) add_fac = add_fac.repeat(1, 1, nn)", "+ f[:, :, :, 4] x_3 = torch.abs(f[:, :, :,", "all indices. border edges now reference zero vector x =", "of every sample in batch with zeros padding = torch.zeros((x.shape[0],", "= Gi_flat.view(-1).long() # odim = x.shape x = x.permute(0, 2,", "add_fac = batch_n * ne add_fac = add_fac.view(b, ne, 1)", "f = torch.stack([f[:, :, :, 0], x_1, x_2, x_3, x_4],", "to desired size e.g., xsz x 5 \"\"\" padded_gemm =", "convolution x_1 = f[:, :, :, 1] + f[:, :,", "pass takes: x: edge features (Batch x Features x Edges)", "1).contiguous() x = x.view(odim[0] * odim[2], odim[1]) # indices of", "= self.conv(G) return x def flatten_gemm_inds(self, Gi): (b, ne, nn)", "reference padded section of x so padded section never used", "edge features (x) with from the 1-ring indices (Gi) applys", "which can use 2d convolution on output dimensions: Batch x", "nn) # flatten Gi Gi = Gi.float() + add_fac[:, 1:,", "x_1 = f[:, :, :, 1] + f[:, :, :,", "+ add_fac[:, 1:, :] return Gi def create_GeMM(self, x, Gi):", "= torch.index_select(x, dim=0, index=Gi_flat) f = f.view(Gishape[0], Gishape[1], Gishape[2], -1)", "on output dimensions: Batch x Channels x Edges x 5", "reference zero vector x = torch.cat((padding, x), dim=2) Gi =", "1] + f[:, :, :, 3] x_2 = f[:, :,", "which is of size #edges x 4 add the edge_id", "2d convolution on output dimensions: Batch x Channels x Edges", ":, 1] + f[:, :, :, 3] x_2 = f[:,", "i in mesh], 0) # build 'neighborhood image' and apply", "using F padded_gemm = F.pad(padded_gemm, (0, 0, 0, xsz -", "so padded section never used f = torch.index_select(x, dim=0, index=Gi_flat)", "x_4 = torch.abs(f[:, :, :, 2] - f[:, :, :,", "device=x.device) # add zero feature vector then shift all indices.", "= k def forward(self, x, mesh): x = x.squeeze(-1) #", "f[:, :, :, 2] + f[:, :, :, 4] x_3", "(1-ring) edge neighbors in the forward pass takes: x: edge", "bias=bias) self.k = k def forward(self, x, mesh): x =", "features (Batch x Features x Edges) mesh: list of mesh", "F.pad(padded_gemm, (0, 0, 0, xsz - m.edges_count), \"constant\", 0) padded_gemm", "convolution on output dimensions: Batch x Channels x Edges x", "the forward pass takes: x: edge features (Batch x Features", "k def forward(self, x, mesh): x = x.squeeze(-1) # pad", "= Gi.float() + add_fac[:, 1:, :] return Gi def create_GeMM(self,", "returns a 'fake image' which can use 2d convolution on", "make #edges x 5 then pad to desired size e.g.,", "self.k = k def forward(self, x, mesh): x = x.squeeze(-1)", ":, 2] + f[:, :, :, 4] x_3 = torch.abs(f[:,", "x Features x Edges) mesh: list of mesh data-structure (len(mesh)", "G = torch.cat([self.pad_gemm(i, x.shape[2], x.device) for i in mesh], 0)", "xsz - m.edges_count), \"constant\", 0) padded_gemm = padded_gemm.unsqueeze(0) return padded_gemm", "add_fac.view(b, ne, 1) add_fac = add_fac.repeat(1, 1, nn) # flatten", "then pad to desired size e.g., xsz x 5 \"\"\"", "x Edges) mesh: list of mesh data-structure (len(mesh) == Batch)", "x Edges x 5 \"\"\" Gishape = Gi.shape # pad", "3] x_2 = f[:, :, :, 2] + f[:, :,", "batch with zeros padding = torch.zeros((x.shape[0], x.shape[1], 1), requires_grad=True, device=x.device)", "# add zero feature vector then shift all indices. border", "in batch with zeros padding = torch.zeros((x.shape[0], x.shape[1], 1), requires_grad=True,", "sample in batch with zeros padding = torch.zeros((x.shape[0], x.shape[1], 1),", "and applies convolution \"\"\" def __init__(self, in_channels, out_channels, k=5, bias=True):", "symmetric functions to handle order invariance returns a 'fake image'", "padding = torch.zeros((x.shape[0], x.shape[1], 1), requires_grad=True, device=x.device) # add zero", "functions for an equivariant convolution x_1 = f[:, :, :,", "applys symmetric functions to handle order invariance returns a 'fake", "is of size #edges x 4 add the edge_id itself", "incident (1-ring) edge neighbors in the forward pass takes: x:", "size e.g., xsz x 5 \"\"\" padded_gemm = torch.tensor(m.gemm_edges, device=device).float()", "torch.stack([f[:, :, :, 0], x_1, x_2, x_3, x_4], dim=3) return", "= torch.cat([self.pad_gemm(i, x.shape[2], x.device) for i in mesh], 0) #", "#edges x 5 then pad to desired size e.g., xsz", "convolution G = self.create_GeMM(x, G) x = self.conv(G) return x", "image' which can use 2d convolution on output dimensions: Batch", "edge neighbors in the forward pass takes: x: edge features", "def create_GeMM(self, x, Gi): \"\"\" gathers the edge features (x)", "gathers the edge features (x) with from the 1-ring indices", "size #edges x 4 add the edge_id itself to make", "return f def pad_gemm(self, m, xsz, device): \"\"\" extracts one-ring", "zeros padding = torch.zeros((x.shape[0], x.shape[1], 1), requires_grad=True, device=x.device) # add", "ne, nn) = Gi.shape ne += 1 batch_n = torch.floor(torch.arange(b", "= batch_n * ne add_fac = add_fac.view(b, ne, 1) add_fac", "= add_fac.view(b, ne, 1) add_fac = add_fac.repeat(1, 1, nn) #", "f = torch.index_select(x, dim=0, index=Gi_flat) f = f.view(Gishape[0], Gishape[1], Gishape[2],", "first row of every sample in batch with zeros padding", "in mesh], 0) # build 'neighborhood image' and apply convolution", "\"\"\" extracts one-ring neighbors (4x) -> m.gemm_edges which is of", "padded section never used f = torch.index_select(x, dim=0, index=Gi_flat) f", "x), dim=2) Gi = Gi + 1 #shift # first", "# first flatten indices Gi_flat = self.flatten_gemm_inds(Gi) Gi_flat = Gi_flat.view(-1).long()", "shift all indices. border edges now reference zero vector x", "edges and 4 incident (1-ring) edge neighbors in the forward", "def pad_gemm(self, m, xsz, device): \"\"\" extracts one-ring neighbors (4x)", "Gi.shape # pad the first row of every sample in", "border edges now reference zero vector x = torch.cat((padding, x),", "batch_n = torch.floor(torch.arange(b * ne, device=Gi.device).float() / ne).view(b, ne) add_fac", "e.g., xsz x 5 \"\"\" padded_gemm = torch.tensor(m.gemm_edges, device=device).float() padded_gemm", "1] - f[:, :, :, 3]) x_4 = torch.abs(f[:, :,", "neighbors in the forward pass takes: x: edge features (Batch", "dim=3) return f def pad_gemm(self, m, xsz, device): \"\"\" extracts", "F padded_gemm = F.pad(padded_gemm, (0, 0, 0, xsz - m.edges_count),", "equivariant convolution x_1 = f[:, :, :, 1] + f[:,", "indices (Gi) applys symmetric functions to handle order invariance returns", "* ne add_fac = add_fac.view(b, ne, 1) add_fac = add_fac.repeat(1,", "= x.view(odim[0] * odim[2], odim[1]) # indices of gemm never", "<filename>models/layers/mesh_conv.py import torch import torch.nn as nn import torch.nn.functional as", "desired size e.g., xsz x 5 \"\"\" padded_gemm = torch.tensor(m.gemm_edges,", "x.device) for i in mesh], 0) # build 'neighborhood image'", "ne).view(b, ne) add_fac = batch_n * ne add_fac = add_fac.view(b,", "x: edge features (Batch x Features x Edges) mesh: list", "1) add_fac = add_fac.repeat(1, 1, nn) # flatten Gi Gi", "# indices of gemm never reference padded section of x", "apply convolution G = self.create_GeMM(x, G) x = self.conv(G) return", "x_4], dim=3) return f def pad_gemm(self, m, xsz, device): \"\"\"", "'neighborhood image' and apply convolution G = self.create_GeMM(x, G) x", "\"\"\" gathers the edge features (x) with from the 1-ring", "Gi.shape ne += 1 batch_n = torch.floor(torch.arange(b * ne, device=Gi.device).float()", "an equivariant convolution x_1 = f[:, :, :, 1] +", ":, 3]) x_4 = torch.abs(f[:, :, :, 2] - f[:,", "then shift all indices. border edges now reference zero vector", "= self.create_GeMM(x, G) x = self.conv(G) return x def flatten_gemm_inds(self,", "m, xsz, device): \"\"\" extracts one-ring neighbors (4x) -> m.gemm_edges", "output dimensions: Batch x Channels x Edges x 5 \"\"\"", "super(MeshConv, self).__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, k), bias=bias) self.k", "0, xsz - m.edges_count), \"constant\", 0) padded_gemm = padded_gemm.unsqueeze(0) return", "# flatten Gi Gi = Gi.float() + add_fac[:, 1:, :]", "the edge features (x) with from the 1-ring indices (Gi)", "the symmetric functions for an equivariant convolution x_1 = f[:,", "5 \"\"\" Gishape = Gi.shape # pad the first row", "now reference zero vector x = torch.cat((padding, x), dim=2) Gi", "mesh: list of mesh data-structure (len(mesh) == Batch) and applies", "4 incident (1-ring) edge neighbors in the forward pass takes:", "padded_gemm = torch.cat((torch.arange(int(m.edges_count), device=device).float().unsqueeze(1), padded_gemm), dim=1) # pad using F", "= Gi + 1 #shift # first flatten indices Gi_flat", "class MeshConv(nn.Module): \"\"\" Computes convolution between edges and 4 incident", "x = self.conv(G) return x def flatten_gemm_inds(self, Gi): (b, ne,", "(Gi) applys symmetric functions to handle order invariance returns a", "(len(mesh) == Batch) and applies convolution \"\"\" def __init__(self, in_channels,", "= torch.zeros((x.shape[0], x.shape[1], 1), requires_grad=True, device=x.device) # add zero feature", "F class MeshConv(nn.Module): \"\"\" Computes convolution between edges and 4", "= self.flatten_gemm_inds(Gi) Gi_flat = Gi_flat.view(-1).long() # odim = x.shape x", "can use 2d convolution on output dimensions: Batch x Channels", "edges now reference zero vector x = torch.cat((padding, x), dim=2)", "x_1, x_2, x_3, x_4], dim=3) return f def pad_gemm(self, m,", ":, :, 2] - f[:, :, :, 4]) f =", "row of every sample in batch with zeros padding =", "Gishape[1], Gishape[2], -1) f = f.permute(0, 3, 1, 2) #", "x 5 \"\"\" padded_gemm = torch.tensor(m.gemm_edges, device=device).float() padded_gemm = padded_gemm.requires_grad_()", "f[:, :, :, 4]) f = torch.stack([f[:, :, :, 0],", "takes: x: edge features (Batch x Features x Edges) mesh:", "#shift # first flatten indices Gi_flat = self.flatten_gemm_inds(Gi) Gi_flat =", "x 5 then pad to desired size e.g., xsz x", "torch.tensor(m.gemm_edges, device=device).float() padded_gemm = padded_gemm.requires_grad_() padded_gemm = torch.cat((torch.arange(int(m.edges_count), device=device).float().unsqueeze(1), padded_gemm),", "device): \"\"\" extracts one-ring neighbors (4x) -> m.gemm_edges which is", "Channels x Edges x 5 \"\"\" Gishape = Gi.shape #", "Computes convolution between edges and 4 incident (1-ring) edge neighbors", "padded_gemm = F.pad(padded_gemm, (0, 0, 0, xsz - m.edges_count), \"constant\",", "out_channels, k=5, bias=True): super(MeshConv, self).__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1,", ":, :, 3]) x_4 = torch.abs(f[:, :, :, 2] -", "flatten Gi Gi = Gi.float() + add_fac[:, 1:, :] return", "1 #shift # first flatten indices Gi_flat = self.flatten_gemm_inds(Gi) Gi_flat", "nn) = Gi.shape ne += 1 batch_n = torch.floor(torch.arange(b *", "device=device).float().unsqueeze(1), padded_gemm), dim=1) # pad using F padded_gemm = F.pad(padded_gemm,", "nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, k), bias=bias) self.k = k def forward(self,", "Batch) and applies convolution \"\"\" def __init__(self, in_channels, out_channels, k=5,", "build 'neighborhood image' and apply convolution G = self.create_GeMM(x, G)", "and apply convolution G = self.create_GeMM(x, G) x = self.conv(G)", "add_fac = add_fac.repeat(1, 1, nn) # flatten Gi Gi =", "= x.squeeze(-1) # pad gemm G = torch.cat([self.pad_gemm(i, x.shape[2], x.device)", "x = x.view(odim[0] * odim[2], odim[1]) # indices of gemm", "Gi.float() + add_fac[:, 1:, :] return Gi def create_GeMM(self, x,", "pad_gemm(self, m, xsz, device): \"\"\" extracts one-ring neighbors (4x) ->", "pad the first row of every sample in batch with", "zero vector x = torch.cat((padding, x), dim=2) Gi = Gi", "= torch.abs(f[:, :, :, 2] - f[:, :, :, 4])" ]
[ "= tables.open_file('development.hdf5', mode='r') # Dimentionality of the data structure. print(fileh.root.utterance_test.shape)", "mode='r') # Dimentionality of the data structure. print(fileh.root.utterance_test.shape) print(fileh.root.utterance_train.shape) print(fileh.root.label_train.shape)", "import numpy as np import matplotlib.pyplot as plt # Reading", "# Dimentionality of the data structure. print(fileh.root.utterance_test.shape) print(fileh.root.utterance_train.shape) print(fileh.root.label_train.shape) print(fileh.root.label_test.shape)", "numpy as np import matplotlib.pyplot as plt # Reading the", "matplotlib.pyplot as plt # Reading the file. fileh = tables.open_file('development.hdf5',", "import matplotlib.pyplot as plt # Reading the file. fileh =", "as np import matplotlib.pyplot as plt # Reading the file.", "np import matplotlib.pyplot as plt # Reading the file. fileh", "plt # Reading the file. fileh = tables.open_file('development.hdf5', mode='r') #", "tables import numpy as np import matplotlib.pyplot as plt #", "# Reading the file. fileh = tables.open_file('development.hdf5', mode='r') # Dimentionality", "fileh = tables.open_file('development.hdf5', mode='r') # Dimentionality of the data structure.", "tables.open_file('development.hdf5', mode='r') # Dimentionality of the data structure. print(fileh.root.utterance_test.shape) print(fileh.root.utterance_train.shape)", "as plt # Reading the file. fileh = tables.open_file('development.hdf5', mode='r')", "Reading the file. fileh = tables.open_file('development.hdf5', mode='r') # Dimentionality of", "the file. fileh = tables.open_file('development.hdf5', mode='r') # Dimentionality of the", "import tables import numpy as np import matplotlib.pyplot as plt", "file. fileh = tables.open_file('development.hdf5', mode='r') # Dimentionality of the data" ]
[ "def wrapper(test_case: unittest.TestCase, *args, **kwargs): loop = asyncio.get_event_loop() task =", "= asyncio.get_event_loop() task = loop.create_task(f(test_case, *args, **kwargs)) try: loop.run_until_complete(task) except", "unittest.TestCase, *args, **kwargs): loop = asyncio.get_event_loop() task = loop.create_task(f(test_case, *args,", "asyncio.get_event_loop() task = loop.create_task(f(test_case, *args, **kwargs)) try: loop.run_until_complete(task) except Exception:", "loop = asyncio.get_event_loop() task = loop.create_task(f(test_case, *args, **kwargs)) try: loop.run_until_complete(task)", "async_test(f): def wrapper(test_case: unittest.TestCase, *args, **kwargs): loop = asyncio.get_event_loop() task", "*args, **kwargs): loop = asyncio.get_event_loop() task = loop.create_task(f(test_case, *args, **kwargs))", "**kwargs): loop = asyncio.get_event_loop() task = loop.create_task(f(test_case, *args, **kwargs)) try:", "task = loop.create_task(f(test_case, *args, **kwargs)) try: loop.run_until_complete(task) except Exception: traceback.print_exc()", "import asyncio import traceback import unittest def async_test(f): def wrapper(test_case:", "asyncio import traceback import unittest def async_test(f): def wrapper(test_case: unittest.TestCase,", "wrapper(test_case: unittest.TestCase, *args, **kwargs): loop = asyncio.get_event_loop() task = loop.create_task(f(test_case,", "<reponame>LinkTsang/qtask-legacy-python import asyncio import traceback import unittest def async_test(f): def", "traceback import unittest def async_test(f): def wrapper(test_case: unittest.TestCase, *args, **kwargs):", "import traceback import unittest def async_test(f): def wrapper(test_case: unittest.TestCase, *args,", "*args, **kwargs)) try: loop.run_until_complete(task) except Exception: traceback.print_exc() raise return wrapper", "unittest def async_test(f): def wrapper(test_case: unittest.TestCase, *args, **kwargs): loop =", "import unittest def async_test(f): def wrapper(test_case: unittest.TestCase, *args, **kwargs): loop", "= loop.create_task(f(test_case, *args, **kwargs)) try: loop.run_until_complete(task) except Exception: traceback.print_exc() raise", "def async_test(f): def wrapper(test_case: unittest.TestCase, *args, **kwargs): loop = asyncio.get_event_loop()", "loop.create_task(f(test_case, *args, **kwargs)) try: loop.run_until_complete(task) except Exception: traceback.print_exc() raise return" ]
[ "request.POST) if newuser_form.is_valid(): newuser_form.save() newuser = auth.authenticate(username = newuser_form.cleaned_data['username'], password", "render(request, 'index.html', {'username': auth.get_user(request).username} ) def logout(request): auth.logout(request) return HttpResponseRedirect(\"/login\")", "args) def reg(request): auth.logout(request) error = '' if request.method ==", "locals() ) def main(request): return render(request, 'index.html', {'username': auth.get_user(request).username} )", "if request.POST: username = request.POST.get('username') password = request.POST.get('password') user =", "auth.logout(request) error = '' if request.method == \"POST\": newuser_form =", "render, render_to_response, redirect from django.contrib import auth from django.contrib.auth.forms import", "неверный пароль\" return render_to_response('login.html', args) else: return render_to_response('login.html', args) def", "<PASSWORD>.cleaned_data['<PASSWORD>']) auth.login(request, newuser) return redirect('/main') else: error = 'Проверьте правильность", "else: args['login_error'] = \"Пользователь не найден или пароль введен неверный", "def main(request): return render(request, 'index.html', {'username': auth.get_user(request).username} ) def logout(request):", "render_to_response('login.html', args) def reg(request): auth.logout(request) error = '' if request.method", "newuser_form.save() newuser = auth.authenticate(username = newuser_form.cleaned_data['username'], password = <PASSWORD>.cleaned_data['<PASSWORD>']) auth.login(request,", "if user is not None: auth.login(request, user) return redirect('/main') else:", "request.POST: username = request.POST.get('username') password = request.POST.get('password') user = auth.authenticate(username=username,", "import auth from django.contrib.auth.forms import UserCreationForm from django.template.context_processors import csrf", "user is not None: auth.login(request, user) return redirect('/main') else: args['login_error']", "return redirect('/main') else: args['login_error'] = \"Пользователь не найден или пароль", ") def main(request): return render(request, 'index.html', {'username': auth.get_user(request).username} ) def", "csrf from django.http import HttpResponseRedirect def login(request): args = {}", "django.http import HttpResponseRedirect def login(request): args = {} args.update(csrf(request)) if", "== \"POST\": newuser_form = UserCreationForm(data = request.POST) if newuser_form.is_valid(): newuser_form.save()", "auth from django.contrib.auth.forms import UserCreationForm from django.template.context_processors import csrf from", "redirect('/main') else: error = 'Проверьте правильность вводимых данных.' else: newuser_form", "return render(request, 'reg.html', locals() ) def main(request): return render(request, 'index.html',", "from django.shortcuts import render, render_to_response, redirect from django.contrib import auth", "auth.login(request, newuser) return redirect('/main') else: error = 'Проверьте правильность вводимых", "= {} args.update(csrf(request)) if request.POST: username = request.POST.get('username') password =", "= auth.authenticate(username=username, password=password) if user is not None: auth.login(request, user)", "from django.http import HttpResponseRedirect def login(request): args = {} args.update(csrf(request))", "request.method == \"POST\": newuser_form = UserCreationForm(data = request.POST) if newuser_form.is_valid():", "<reponame>theflatladder/kyrsovaya from django.shortcuts import render, render_to_response, redirect from django.contrib import", "найден или пароль введен неверный пароль\" return render_to_response('login.html', args) else:", "None: auth.login(request, user) return redirect('/main') else: args['login_error'] = \"Пользователь не", "'Проверьте правильность вводимых данных.' else: newuser_form = UserCreationForm() return render(request,", "args['login_error'] = \"Пользователь не найден или пароль введен неверный пароль\"", "request.POST.get('password') user = auth.authenticate(username=username, password=password) if user is not None:", "newuser_form = UserCreationForm(data = request.POST) if newuser_form.is_valid(): newuser_form.save() newuser =", "error = 'Проверьте правильность вводимых данных.' else: newuser_form = UserCreationForm()", "UserCreationForm() return render(request, 'reg.html', locals() ) def main(request): return render(request,", "newuser) return redirect('/main') else: error = 'Проверьте правильность вводимых данных.'", "args.update(csrf(request)) if request.POST: username = request.POST.get('username') password = request.POST.get('password') user", "django.template.context_processors import csrf from django.http import HttpResponseRedirect def login(request): args", "import csrf from django.http import HttpResponseRedirect def login(request): args =", "= UserCreationForm(data = request.POST) if newuser_form.is_valid(): newuser_form.save() newuser = auth.authenticate(username", "else: return render_to_response('login.html', args) def reg(request): auth.logout(request) error = ''", "данных.' else: newuser_form = UserCreationForm() return render(request, 'reg.html', locals() )", "login(request): args = {} args.update(csrf(request)) if request.POST: username = request.POST.get('username')", "def login(request): args = {} args.update(csrf(request)) if request.POST: username =", "'' if request.method == \"POST\": newuser_form = UserCreationForm(data = request.POST)", "return redirect('/main') else: error = 'Проверьте правильность вводимых данных.' else:", "или пароль введен неверный пароль\" return render_to_response('login.html', args) else: return", "= <PASSWORD>.cleaned_data['<PASSWORD>']) auth.login(request, newuser) return redirect('/main') else: error = 'Проверьте", "not None: auth.login(request, user) return redirect('/main') else: args['login_error'] = \"Пользователь", "UserCreationForm from django.template.context_processors import csrf from django.http import HttpResponseRedirect def", "UserCreationForm(data = request.POST) if newuser_form.is_valid(): newuser_form.save() newuser = auth.authenticate(username =", "newuser_form.is_valid(): newuser_form.save() newuser = auth.authenticate(username = newuser_form.cleaned_data['username'], password = <PASSWORD>.cleaned_data['<PASSWORD>'])", "else: error = 'Проверьте правильность вводимых данных.' else: newuser_form =", "= \"Пользователь не найден или пароль введен неверный пароль\" return", "= request.POST.get('password') user = auth.authenticate(username=username, password=password) if user is not", "\"Пользователь не найден или пароль введен неверный пароль\" return render_to_response('login.html',", "newuser_form = UserCreationForm() return render(request, 'reg.html', locals() ) def main(request):", "user = auth.authenticate(username=username, password=password) if user is not None: auth.login(request,", "newuser = auth.authenticate(username = newuser_form.cleaned_data['username'], password = <PASSWORD>.cleaned_data['<PASSWORD>']) auth.login(request, newuser)", "= auth.authenticate(username = newuser_form.cleaned_data['username'], password = <PASSWORD>.cleaned_data['<PASSWORD>']) auth.login(request, newuser) return", "= request.POST) if newuser_form.is_valid(): newuser_form.save() newuser = auth.authenticate(username = newuser_form.cleaned_data['username'],", "django.shortcuts import render, render_to_response, redirect from django.contrib import auth from", "import HttpResponseRedirect def login(request): args = {} args.update(csrf(request)) if request.POST:", "password = <PASSWORD>.cleaned_data['<PASSWORD>']) auth.login(request, newuser) return redirect('/main') else: error =", "main(request): return render(request, 'index.html', {'username': auth.get_user(request).username} ) def logout(request): auth.logout(request)", "user) return redirect('/main') else: args['login_error'] = \"Пользователь не найден или", "import render, render_to_response, redirect from django.contrib import auth from django.contrib.auth.forms", "\"POST\": newuser_form = UserCreationForm(data = request.POST) if newuser_form.is_valid(): newuser_form.save() newuser", "'reg.html', locals() ) def main(request): return render(request, 'index.html', {'username': auth.get_user(request).username}", "args) else: return render_to_response('login.html', args) def reg(request): auth.logout(request) error =", "render_to_response('login.html', args) else: return render_to_response('login.html', args) def reg(request): auth.logout(request) error", "= '' if request.method == \"POST\": newuser_form = UserCreationForm(data =", "from django.contrib.auth.forms import UserCreationForm from django.template.context_processors import csrf from django.http", "пароль\" return render_to_response('login.html', args) else: return render_to_response('login.html', args) def reg(request):", "request.POST.get('username') password = request.POST.get('password') user = auth.authenticate(username=username, password=password) if user", "пароль введен неверный пароль\" return render_to_response('login.html', args) else: return render_to_response('login.html',", "import UserCreationForm from django.template.context_processors import csrf from django.http import HttpResponseRedirect", "не найден или пароль введен неверный пароль\" return render_to_response('login.html', args)", "def reg(request): auth.logout(request) error = '' if request.method == \"POST\":", "newuser_form.cleaned_data['username'], password = <PASSWORD>.cleaned_data['<PASSWORD>']) auth.login(request, newuser) return redirect('/main') else: error", "правильность вводимых данных.' else: newuser_form = UserCreationForm() return render(request, 'reg.html',", "render_to_response, redirect from django.contrib import auth from django.contrib.auth.forms import UserCreationForm", "django.contrib.auth.forms import UserCreationForm from django.template.context_processors import csrf from django.http import", "вводимых данных.' else: newuser_form = UserCreationForm() return render(request, 'reg.html', locals()", "django.contrib import auth from django.contrib.auth.forms import UserCreationForm from django.template.context_processors import", "auth.authenticate(username=username, password=password) if user is not None: auth.login(request, user) return", "return render_to_response('login.html', args) def reg(request): auth.logout(request) error = '' if", "HttpResponseRedirect def login(request): args = {} args.update(csrf(request)) if request.POST: username", "redirect from django.contrib import auth from django.contrib.auth.forms import UserCreationForm from", "username = request.POST.get('username') password = request.POST.get('password') user = auth.authenticate(username=username, password=password)", "= newuser_form.cleaned_data['username'], password = <PASSWORD>.cleaned_data['<PASSWORD>']) auth.login(request, newuser) return redirect('/main') else:", "= 'Проверьте правильность вводимых данных.' else: newuser_form = UserCreationForm() return", "auth.login(request, user) return redirect('/main') else: args['login_error'] = \"Пользователь не найден", "auth.authenticate(username = newuser_form.cleaned_data['username'], password = <PASSWORD>.cleaned_data['<PASSWORD>']) auth.login(request, newuser) return redirect('/main')", "from django.template.context_processors import csrf from django.http import HttpResponseRedirect def login(request):", "else: newuser_form = UserCreationForm() return render(request, 'reg.html', locals() ) def", "password = request.POST.get('password') user = auth.authenticate(username=username, password=password) if user is", "password=password) if user is not None: auth.login(request, user) return redirect('/main')", "введен неверный пароль\" return render_to_response('login.html', args) else: return render_to_response('login.html', args)", "render(request, 'reg.html', locals() ) def main(request): return render(request, 'index.html', {'username':", "from django.contrib import auth from django.contrib.auth.forms import UserCreationForm from django.template.context_processors", "= request.POST.get('username') password = request.POST.get('password') user = auth.authenticate(username=username, password=password) if", "args = {} args.update(csrf(request)) if request.POST: username = request.POST.get('username') password", "reg(request): auth.logout(request) error = '' if request.method == \"POST\": newuser_form", "if request.method == \"POST\": newuser_form = UserCreationForm(data = request.POST) if", "{} args.update(csrf(request)) if request.POST: username = request.POST.get('username') password = request.POST.get('password')", "is not None: auth.login(request, user) return redirect('/main') else: args['login_error'] =", "return render(request, 'index.html', {'username': auth.get_user(request).username} ) def logout(request): auth.logout(request) return", "error = '' if request.method == \"POST\": newuser_form = UserCreationForm(data", "= UserCreationForm() return render(request, 'reg.html', locals() ) def main(request): return", "if newuser_form.is_valid(): newuser_form.save() newuser = auth.authenticate(username = newuser_form.cleaned_data['username'], password =", "return render_to_response('login.html', args) else: return render_to_response('login.html', args) def reg(request): auth.logout(request)", "redirect('/main') else: args['login_error'] = \"Пользователь не найден или пароль введен" ]
[ "testpklID=int(sys.argv[3]), NetworkType=int(sys.argv[4]), runs=int(sys.argv[5])) else: print(\"argument errors, try\\npython runfile.py <FacePatchID> <trainpklID>", "if len(sys.argv)==6: runPatch(GPU_Device_ID=1, FacePatchID=int(sys.argv[1]), trainpklID=int(sys.argv[2]), testpklID=int(sys.argv[3]), NetworkType=int(sys.argv[4]), runs=int(sys.argv[5])) else: print(\"argument", "trainpklID=int(sys.argv[2]), testpklID=int(sys.argv[3]), NetworkType=int(sys.argv[4]), runs=int(sys.argv[5])) else: print(\"argument errors, try\\npython runfile.py <FacePatchID>", "runs=int(sys.argv[5])) else: print(\"argument errors, try\\npython runfile.py <FacePatchID> <trainpklID> <testpklID> <NetworkType>", "FacePatchID=int(sys.argv[1]), trainpklID=int(sys.argv[2]), testpklID=int(sys.argv[3]), NetworkType=int(sys.argv[4]), runs=int(sys.argv[5])) else: print(\"argument errors, try\\npython runfile.py", "runPatch(GPU_Device_ID=1, FacePatchID=int(sys.argv[1]), trainpklID=int(sys.argv[2]), testpklID=int(sys.argv[3]), NetworkType=int(sys.argv[4]), runs=int(sys.argv[5])) else: print(\"argument errors, try\\npython", "else: print(\"argument errors, try\\npython runfile.py <FacePatchID> <trainpklID> <testpklID> <NetworkType> <runs>\")", "runPatch import sys if len(sys.argv)==6: runPatch(GPU_Device_ID=1, FacePatchID=int(sys.argv[1]), trainpklID=int(sys.argv[2]), testpklID=int(sys.argv[3]), NetworkType=int(sys.argv[4]),", "import runPatch import sys if len(sys.argv)==6: runPatch(GPU_Device_ID=1, FacePatchID=int(sys.argv[1]), trainpklID=int(sys.argv[2]), testpklID=int(sys.argv[3]),", "len(sys.argv)==6: runPatch(GPU_Device_ID=1, FacePatchID=int(sys.argv[1]), trainpklID=int(sys.argv[2]), testpklID=int(sys.argv[3]), NetworkType=int(sys.argv[4]), runs=int(sys.argv[5])) else: print(\"argument errors,", "sys if len(sys.argv)==6: runPatch(GPU_Device_ID=1, FacePatchID=int(sys.argv[1]), trainpklID=int(sys.argv[2]), testpklID=int(sys.argv[3]), NetworkType=int(sys.argv[4]), runs=int(sys.argv[5])) else:", "Facepatchindependenttrain import runPatch import sys if len(sys.argv)==6: runPatch(GPU_Device_ID=1, FacePatchID=int(sys.argv[1]), trainpklID=int(sys.argv[2]),", "<gh_stars>0 from Facepatchindependenttrain import runPatch import sys if len(sys.argv)==6: runPatch(GPU_Device_ID=1,", "import sys if len(sys.argv)==6: runPatch(GPU_Device_ID=1, FacePatchID=int(sys.argv[1]), trainpklID=int(sys.argv[2]), testpklID=int(sys.argv[3]), NetworkType=int(sys.argv[4]), runs=int(sys.argv[5]))", "from Facepatchindependenttrain import runPatch import sys if len(sys.argv)==6: runPatch(GPU_Device_ID=1, FacePatchID=int(sys.argv[1]),", "NetworkType=int(sys.argv[4]), runs=int(sys.argv[5])) else: print(\"argument errors, try\\npython runfile.py <FacePatchID> <trainpklID> <testpklID>" ]
[ "xmltext.index('adminlang') assert xmltext.index('creationtoolversion') assert xmltext.index('datatype') assert xmltext.index('o-tmf') assert xmltext.index('segtype') assert", "posource, sourcelanguage='en', targetlanguage='af'): \"\"\"helper that converts po source to tmx", "the escaping of quotes (and slash)\"\"\" minipo = r'''msgid \"Hello", "fuzzy messages are excluded\"\"\" minipo = r'''#, fuzzy msgid \"One\"", "self.po2tmx(minipo) print \"The generated xml:\" print str(tmx) assert tmx.translate(\"First line\\nSecond", "\"Everyone\"') == 'Good day \"All\"' assert tmx.translate(r'Use \\\".') == r'Gebruik", "kolom\" ''' tmx = self.po2tmx(minipo) print \"The generated xml:\" print", "\"Applications\" msgstr \"Toepassings\" \"\"\" tmx = self.po2tmx(minipo) print \"The generated", "str(tmx) assert tmx.translate(u\"Bézier curve\") == u\"Bézier-kurwe\" class TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX): \"\"\"Tests", "program ABC # msgid \"\" msgstr \"\" \"Project-Id-Version: program 2.1-branch\\n\"", "column\\tSecond column\") == \"Eerste kolom\\tTweede kolom\" def test_escapedquotes(self): \"\"\"Test the", "xmltext.index('segtype') assert xmltext.index('srclang') def test_sourcelanguage(self): minipo = 'msgid \"String\"\\nmsgstr \"String\"\\n'", "\"xh\" def test_targetlanguage(self): minipo = 'msgid \"String\"\\nmsgstr \"String\"\\n' tmx =", "generated xml:\" print str(tmx) assert tmx.translate(\"Applications\") == \"Toepassings\" assert tmx.translate(\"bla\")", "== 'Eerste deel en ekstra' def test_escapednewlines(self): \"\"\"Test the escaping", "\"The generated xml:\" print str(tmx) assert tmx.translate('First part and extra')", "#!/usr/bin/env python # -*- coding: utf-8 -*- from translate.convert import", "source, we want the target tuv assert tuv.get(\"{%s}lang\" % lisa.XML_NS)", "minipo = r'''msgid \"Hello \\\"Everyone\\\"\" msgstr \"Good day \\\"All\\\"\" msgid", "msgstr \"Toepassings\" \"\"\" tmx = self.po2tmx(minipo) print \"The generated xml:\"", "TestPO2TMX): \"\"\"Tests running actual po2tmx commands on files\"\"\" convertmodule =", "wStringIO.StringIO(posource) outputfile = wStringIO.StringIO() outputfile.tmxfile = tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage) po2tmx.convertpo(inputfile, outputfile,", "def test_escapedquotes(self): \"\"\"Test the escaping of quotes (and slash)\"\"\" minipo", "print str(tmx) assert \"<tu\" not in str(tmx) assert len(tmx.units) ==", "'msgid \"String\"\\nmsgstr \"String\"\\n' tmx = self.po2tmx(minipo, sourcelanguage=\"xh\") print \"The generated", "escaping of tabs\"\"\" minipo = r'''msgid \"First column\\tSecond column\" msgstr", "= r'''msgid \"First column\\tSecond column\" msgstr \"Eerste kolom\\tTweede kolom\" '''", "\"Gebruik \\\\\\\".\" ''' tmx = self.po2tmx(minipo) print \"The generated xml:\"", "column\") == \"Eerste kolom\\tTweede kolom\" def test_escapedquotes(self): \"\"\"Test the escaping", "test_multiline(self): \"\"\"Test multiline po entry\"\"\" minipo = r'''msgid \"First part", "= r'''msgid \"First line\\nSecond line\" msgstr \"Eerste lyn\\nTweede lyn\" '''", "\"Content-Transfer-Encoding: 8bit\\n\" # Please remember to do something #: ../dir/file.xml.in.h:1", "ABC # msgid \"\" msgstr \"\" \"Project-Id-Version: program 2.1-branch\\n\" \"Report-Msgid-Bugs-To:", "def test_sourcelanguage(self): minipo = 'msgid \"String\"\\nmsgstr \"String\"\\n' tmx = self.po2tmx(minipo,", "Please remember to do something #: ../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4 msgid \"Applications\"", "assert xmltext.index('o-tmf') assert xmltext.index('segtype') assert xmltext.index('srclang') def test_sourcelanguage(self): minipo =", "= tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage) po2tmx.convertpo(inputfile, outputfile, templatefile=None, sourcelanguage=sourcelanguage, targetlanguage=targetlanguage) return outputfile.tmxfile", "Toolkit - po2tmx\"') assert xmltext.index('adminlang') assert xmltext.index('creationtoolversion') assert xmltext.index('datatype') assert", "messages are excluded\"\"\" minipo = r'''#, fuzzy msgid \"One\" msgstr", "\"MIME-Version: 1.0\\n\" \"Content-Type: text/plain; charset=UTF-8\\n\" \"Content-Transfer-Encoding: 8bit\\n\" # Please remember", "tuv.get(\"{%s}lang\" % lisa.XML_NS) == \"xh\" def test_multiline(self): \"\"\"Test multiline po", "tabs\"\"\" minipo = r'''msgid \"First column\\tSecond column\" msgstr \"Eerste kolom\\tTweede", "from translate.convert import po2tmx from translate.convert import test_convert from translate.misc", "lyn\\nTweede lyn\" ''' tmx = self.po2tmx(minipo) print \"The generated xml:\"", "def test_exclusions(self): \"\"\"Test that empty and fuzzy messages are excluded\"\"\"", "\"PO-Revision-Date: 2004-03-30 17:02+0200\\n\" \"Last-Translator: Zuza Software Foundation <<EMAIL>>\\n\" \"Language-Team: Afrikaans", "translate.misc import wStringIO from translate.storage import tmx from translate.storage import", "r'''msgid \"Bézier curve\" msgstr \"Bézier-kurwe\" ''' tmx = self.po2tmx(minipo) print", "\" \"and extra\" msgstr \"Eerste deel \" \"en ekstra\"''' tmx", "\"\"\"Test that empty and fuzzy messages are excluded\"\"\" minipo =", "assert tmx.translate('First part and extra') == 'Eerste deel en ekstra'", "\"The generated xml:\" print str(tmx) header = tmx.document.find(\"header\") assert header.get(\"srclang\")", "msgstr \"Eerste kolom\\tTweede kolom\" ''' tmx = self.po2tmx(minipo) print \"The", "\"POT-Creation-Date: 2006-01-09 07:15+0100\\n\" \"PO-Revision-Date: 2004-03-30 17:02+0200\\n\" \"Last-Translator: Zuza Software Foundation", "17:02+0200\\n\" \"Last-Translator: Zuza Software Foundation <<EMAIL>>\\n\" \"Language-Team: Afrikaans <<EMAIL>>\\n\" \"MIME-Version:", "\"String\"\\n' tmx = self.po2tmx(minipo, sourcelanguage=\"xh\") print \"The generated xml:\" print", "generated xml:\" print str(tmx) assert tmx.translate(\"First line\\nSecond line\") == \"Eerste", "assert xmltext.index('datatype') assert xmltext.index('o-tmf') assert xmltext.index('segtype') assert xmltext.index('srclang') def test_sourcelanguage(self):", "tmx.translate(\"bla\") is None xmltext = str(tmx) assert xmltext.index('creationtool=\"Translate Toolkit -", "= self.po2tmx(minipo) print \"The generated xml:\" print str(tmx) assert tmx.translate('First", "part and extra') == 'Eerste deel en ekstra' def test_escapednewlines(self):", "== r'Gebruik \\\".' def test_exclusions(self): \"\"\"Test that empty and fuzzy", "\"Eerste deel \" \"en ekstra\"''' tmx = self.po2tmx(minipo) print \"The", "not in str(tmx) assert len(tmx.units) == 0 def test_nonascii(self): \"\"\"Tests", "tmx.translate(\"Applications\") == \"Toepassings\" assert tmx.translate(\"bla\") is None xmltext = str(tmx)", "\"\" msgstr \"\" \"Project-Id-Version: program 2.1-branch\\n\" \"Report-Msgid-Bugs-To: \\n\" \"POT-Creation-Date: 2006-01-09", "\"\"\"Tests running actual po2tmx commands on files\"\"\" convertmodule = po2tmx", "str(tmx) assert tmx.translate('First part and extra') == 'Eerste deel en", "line\" msgstr \"Eerste lyn\\nTweede lyn\" ''' tmx = self.po2tmx(minipo) print", "quotes (and slash)\"\"\" minipo = r'''msgid \"Hello \\\"Everyone\\\"\" msgstr \"Good", "xml:\" print str(tmx) assert tmx.translate('First part and extra') == 'Eerste", "want the target tuv assert tuv.get(\"{%s}lang\" % lisa.XML_NS) == \"xh\"", "line\\nSecond line\") == \"Eerste lyn\\nTweede lyn\" def test_escapedtabs(self): \"\"\"Test the", "tmx.document.findall(\".//%s\" % tmx.namespaced(\"tuv\"))[1] #tag[0] will be the source, we want", "escaping of quotes (and slash)\"\"\" minipo = r'''msgid \"Hello \\\"Everyone\\\"\"", "\"\"\"Tests that non-ascii conversion works.\"\"\" minipo = r'''msgid \"Bézier curve\"", "print \"The generated xml:\" print str(tmx) tuv = tmx.document.findall(\".//%s\" %", "of tabs\"\"\" minipo = r'''msgid \"First column\\tSecond column\" msgstr \"Eerste", "lyn\" ''' tmx = self.po2tmx(minipo) print \"The generated xml:\" print", "are excluded\"\"\" minipo = r'''#, fuzzy msgid \"One\" msgstr \"Een\"", "import tmx from translate.storage import lisa class TestPO2TMX: def po2tmx(self,", "# Please remember to do something #: ../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4 msgid", "running actual po2tmx commands on files\"\"\" convertmodule = po2tmx def", "import wStringIO from translate.storage import tmx from translate.storage import lisa", "\"String\"\\n' tmx = self.po2tmx(minipo, targetlanguage=\"xh\") print \"The generated xml:\" print", "'Eerste deel en ekstra' def test_escapednewlines(self): \"\"\"Test the escaping of", "kolom\\tTweede kolom\" ''' tmx = self.po2tmx(minipo) print \"The generated xml:\"", "== \"xh\" def test_multiline(self): \"\"\"Test multiline po entry\"\"\" minipo =", "assert xmltext.index('creationtoolversion') assert xmltext.index('datatype') assert xmltext.index('o-tmf') assert xmltext.index('segtype') assert xmltext.index('srclang')", "#tag[0] will be the source, we want the target tuv", "of program ABC # msgid \"\" msgstr \"\" \"Project-Id-Version: program", "that non-ascii conversion works.\"\"\" minipo = r'''msgid \"Bézier curve\" msgstr", "generated xml:\" print str(tmx) assert \"<tu\" not in str(tmx) assert", "do something #: ../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4 msgid \"Applications\" msgstr \"Toepassings\" \"\"\"", "outputfile.tmxfile def test_basic(self): minipo = r\"\"\"# Afrikaans translation of program", "= tmx.document.findall(\".//%s\" % tmx.namespaced(\"tuv\"))[1] #tag[0] will be the source, we", "= test_convert.TestConvertCommand.test_help(self) options = self.help_check(options, \"-l LANG, --language=LANG\") options =", "print \"The generated xml:\" print str(tmx) assert tmx.translate('First part and", "test_escapedtabs(self): \"\"\"Test the escaping of tabs\"\"\" minipo = r'''msgid \"First", "\\\"Everyone\\\"\" msgstr \"Good day \\\"All\\\"\" msgid \"Use \\\\\\\".\" msgstr \"Gebruik", "test_convert from translate.misc import wStringIO from translate.storage import tmx from", "print str(tmx) assert tmx.translate('Hello \"Everyone\"') == 'Good day \"All\"' assert", "minipo = r'''msgid \"First line\\nSecond line\" msgstr \"Eerste lyn\\nTweede lyn\"", "== \"xh\" def test_targetlanguage(self): minipo = 'msgid \"String\"\\nmsgstr \"String\"\\n' tmx", "u\"Bézier-kurwe\" class TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX): \"\"\"Tests running actual po2tmx commands on", "= r'''msgid \"First part \" \"and extra\" msgstr \"Eerste deel", "xml:\" print str(tmx) assert tmx.translate('Hello \"Everyone\"') == 'Good day \"All\"'", "= 'msgid \"String\"\\nmsgstr \"String\"\\n' tmx = self.po2tmx(minipo, targetlanguage=\"xh\") print \"The", "return outputfile.tmxfile def test_basic(self): minipo = r\"\"\"# Afrikaans translation of", "def test_escapedtabs(self): \"\"\"Test the escaping of tabs\"\"\" minipo = r'''msgid", "convertmodule = po2tmx def test_help(self): \"\"\"tests getting help\"\"\" options =", "lisa class TestPO2TMX: def po2tmx(self, posource, sourcelanguage='en', targetlanguage='af'): \"\"\"helper that", "assert header.get(\"srclang\") == \"xh\" def test_targetlanguage(self): minipo = 'msgid \"String\"\\nmsgstr", "<<EMAIL>>\\n\" \"MIME-Version: 1.0\\n\" \"Content-Type: text/plain; charset=UTF-8\\n\" \"Content-Transfer-Encoding: 8bit\\n\" # Please", "07:15+0100\\n\" \"PO-Revision-Date: 2004-03-30 17:02+0200\\n\" \"Last-Translator: Zuza Software Foundation <<EMAIL>>\\n\" \"Language-Team:", "lyn\\nTweede lyn\" def test_escapedtabs(self): \"\"\"Test the escaping of tabs\"\"\" minipo", "print \"The generated xml:\" print str(tmx) assert \"<tu\" not in", "column\\tSecond column\" msgstr \"Eerste kolom\\tTweede kolom\" ''' tmx = self.po2tmx(minipo)", "== 0 def test_nonascii(self): \"\"\"Tests that non-ascii conversion works.\"\"\" minipo", "from translate.storage import tmx from translate.storage import lisa class TestPO2TMX:", "str(tmx) assert \"<tu\" not in str(tmx) assert len(tmx.units) == 0", "\"Eerste lyn\\nTweede lyn\" ''' tmx = self.po2tmx(minipo) print \"The generated", "generated xml:\" print str(tmx) assert tmx.translate('Hello \"Everyone\"') == 'Good day", "= r\"\"\"# Afrikaans translation of program ABC # msgid \"\"", "= 'msgid \"String\"\\nmsgstr \"String\"\\n' tmx = self.po2tmx(minipo, sourcelanguage=\"xh\") print \"The", "print str(tmx) tuv = tmx.document.findall(\".//%s\" % tmx.namespaced(\"tuv\"))[1] #tag[0] will be", "print str(tmx) assert tmx.translate('First part and extra') == 'Eerste deel", "of quotes (and slash)\"\"\" minipo = r'''msgid \"Hello \\\"Everyone\\\"\" msgstr", "TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX): \"\"\"Tests running actual po2tmx commands on files\"\"\" convertmodule", "<<EMAIL>>\\n\" \"Language-Team: Afrikaans <<EMAIL>>\\n\" \"MIME-Version: 1.0\\n\" \"Content-Type: text/plain; charset=UTF-8\\n\" \"Content-Transfer-Encoding:", "\"Two\" msgstr \"\" msgid \"\" msgstr \"Drie\" ''' tmx =", "xmltext.index('datatype') assert xmltext.index('o-tmf') assert xmltext.index('segtype') assert xmltext.index('srclang') def test_sourcelanguage(self): minipo", "po entry\"\"\" minipo = r'''msgid \"First part \" \"and extra\"", "\\\\\\\".\" msgstr \"Gebruik \\\\\\\".\" ''' tmx = self.po2tmx(minipo) print \"The", "msgstr \"Eerste lyn\\nTweede lyn\" ''' tmx = self.po2tmx(minipo) print \"The", "tmx.namespaced(\"tuv\"))[1] #tag[0] will be the source, we want the target", "multiline po entry\"\"\" minipo = r'''msgid \"First part \" \"and", "''' tmx = self.po2tmx(minipo) print str(tmx) assert tmx.translate(u\"Bézier curve\") ==", "def test_help(self): \"\"\"tests getting help\"\"\" options = test_convert.TestConvertCommand.test_help(self) options =", "2004-03-30 17:02+0200\\n\" \"Last-Translator: Zuza Software Foundation <<EMAIL>>\\n\" \"Language-Team: Afrikaans <<EMAIL>>\\n\"", "== \"Eerste kolom\\tTweede kolom\" def test_escapedquotes(self): \"\"\"Test the escaping of", "text/plain; charset=UTF-8\\n\" \"Content-Transfer-Encoding: 8bit\\n\" # Please remember to do something", "tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage) po2tmx.convertpo(inputfile, outputfile, templatefile=None, sourcelanguage=sourcelanguage, targetlanguage=targetlanguage) return outputfile.tmxfile def", "non-ascii conversion works.\"\"\" minipo = r'''msgid \"Bézier curve\" msgstr \"Bézier-kurwe\"", "% tmx.namespaced(\"tuv\"))[1] #tag[0] will be the source, we want the", "\"The generated xml:\" print str(tmx) assert tmx.translate(\"First column\\tSecond column\") ==", "generated xml:\" print str(tmx) tuv = tmx.document.findall(\".//%s\" % tmx.namespaced(\"tuv\"))[1] #tag[0]", "Zuza Software Foundation <<EMAIL>>\\n\" \"Language-Team: Afrikaans <<EMAIL>>\\n\" \"MIME-Version: 1.0\\n\" \"Content-Type:", "deel en ekstra' def test_escapednewlines(self): \"\"\"Test the escaping of newlines\"\"\"", "tmx = self.po2tmx(minipo) print \"The generated xml:\" print str(tmx) assert", "TestPO2TMX: def po2tmx(self, posource, sourcelanguage='en', targetlanguage='af'): \"\"\"helper that converts po", "\"Bézier curve\" msgstr \"Bézier-kurwe\" ''' tmx = self.po2tmx(minipo) print str(tmx)", "entry\"\"\" minipo = r'''msgid \"First part \" \"and extra\" msgstr", "\\n\" \"POT-Creation-Date: 2006-01-09 07:15+0100\\n\" \"PO-Revision-Date: 2004-03-30 17:02+0200\\n\" \"Last-Translator: Zuza Software", "of newlines\"\"\" minipo = r'''msgid \"First line\\nSecond line\" msgstr \"Eerste", "po2tmx def test_help(self): \"\"\"tests getting help\"\"\" options = test_convert.TestConvertCommand.test_help(self) options", "\"The generated xml:\" print str(tmx) tuv = tmx.document.findall(\".//%s\" % tmx.namespaced(\"tuv\"))[1]", "msgstr \"Eerste deel \" \"en ekstra\"''' tmx = self.po2tmx(minipo) print", "minipo = r'''msgid \"Bézier curve\" msgstr \"Bézier-kurwe\" ''' tmx =", "xml:\" print str(tmx) assert tmx.translate(\"First line\\nSecond line\") == \"Eerste lyn\\nTweede", "\"\"\"Test the escaping of newlines\"\"\" minipo = r'''msgid \"First line\\nSecond", "outputfile, templatefile=None, sourcelanguage=sourcelanguage, targetlanguage=targetlanguage) return outputfile.tmxfile def test_basic(self): minipo =", "ekstra' def test_escapednewlines(self): \"\"\"Test the escaping of newlines\"\"\" minipo =", "\"\"\"helper that converts po source to tmx source without requiring", "\"The generated xml:\" print str(tmx) assert tmx.translate(\"First line\\nSecond line\") ==", "\\\".') == r'Gebruik \\\".' def test_exclusions(self): \"\"\"Test that empty and", "help\"\"\" options = test_convert.TestConvertCommand.test_help(self) options = self.help_check(options, \"-l LANG, --language=LANG\")", "be the source, we want the target tuv assert tuv.get(\"{%s}lang\"", "\"First column\\tSecond column\" msgstr \"Eerste kolom\\tTweede kolom\" ''' tmx =", "msgid \"Two\" msgstr \"\" msgid \"\" msgstr \"Drie\" ''' tmx", "day \"All\"' assert tmx.translate(r'Use \\\".') == r'Gebruik \\\".' def test_exclusions(self):", "target tuv assert tuv.get(\"{%s}lang\" % lisa.XML_NS) == \"xh\" def test_multiline(self):", "print str(tmx) assert tmx.translate(\"Applications\") == \"Toepassings\" assert tmx.translate(\"bla\") is None", "\"\" msgstr \"Drie\" ''' tmx = self.po2tmx(minipo) print \"The generated", "templatefile=None, sourcelanguage=sourcelanguage, targetlanguage=targetlanguage) return outputfile.tmxfile def test_basic(self): minipo = r\"\"\"#", "tmx = self.po2tmx(minipo) print str(tmx) assert tmx.translate(u\"Bézier curve\") == u\"Bézier-kurwe\"", "Software Foundation <<EMAIL>>\\n\" \"Language-Team: Afrikaans <<EMAIL>>\\n\" \"MIME-Version: 1.0\\n\" \"Content-Type: text/plain;", "msgid \"Use \\\\\\\".\" msgstr \"Gebruik \\\\\\\".\" ''' tmx = self.po2tmx(minipo)", "newlines\"\"\" minipo = r'''msgid \"First line\\nSecond line\" msgstr \"Eerste lyn\\nTweede", "extra\" msgstr \"Eerste deel \" \"en ekstra\"''' tmx = self.po2tmx(minipo)", "\\\"All\\\"\" msgid \"Use \\\\\\\".\" msgstr \"Gebruik \\\\\\\".\" ''' tmx =", "getting help\"\"\" options = test_convert.TestConvertCommand.test_help(self) options = self.help_check(options, \"-l LANG,", "msgid \"One\" msgstr \"Een\" msgid \"Two\" msgstr \"\" msgid \"\"", "wStringIO.StringIO() outputfile.tmxfile = tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage) po2tmx.convertpo(inputfile, outputfile, templatefile=None, sourcelanguage=sourcelanguage, targetlanguage=targetlanguage)", "ekstra\"''' tmx = self.po2tmx(minipo) print \"The generated xml:\" print str(tmx)", "\"and extra\" msgstr \"Eerste deel \" \"en ekstra\"''' tmx =", "import lisa class TestPO2TMX: def po2tmx(self, posource, sourcelanguage='en', targetlanguage='af'): \"\"\"helper", "\"\"\"tests getting help\"\"\" options = test_convert.TestConvertCommand.test_help(self) options = self.help_check(options, \"-l", "\"Een\" msgid \"Two\" msgstr \"\" msgid \"\" msgstr \"Drie\" '''", "\"Content-Type: text/plain; charset=UTF-8\\n\" \"Content-Transfer-Encoding: 8bit\\n\" # Please remember to do", "str(tmx) assert tmx.translate(\"Applications\") == \"Toepassings\" assert tmx.translate(\"bla\") is None xmltext", "translate.storage import tmx from translate.storage import lisa class TestPO2TMX: def", "2006-01-09 07:15+0100\\n\" \"PO-Revision-Date: 2004-03-30 17:02+0200\\n\" \"Last-Translator: Zuza Software Foundation <<EMAIL>>\\n\"", "tmx.translate(\"First line\\nSecond line\") == \"Eerste lyn\\nTweede lyn\" def test_escapedtabs(self): \"\"\"Test", "len(tmx.units) == 0 def test_nonascii(self): \"\"\"Tests that non-ascii conversion works.\"\"\"", "tuv assert tuv.get(\"{%s}lang\" % lisa.XML_NS) == \"xh\" def test_multiline(self): \"\"\"Test", "sourcelanguage=sourcelanguage, targetlanguage=targetlanguage) return outputfile.tmxfile def test_basic(self): minipo = r\"\"\"# Afrikaans", "tmx = self.po2tmx(minipo, targetlanguage=\"xh\") print \"The generated xml:\" print str(tmx)", "= r'''msgid \"Bézier curve\" msgstr \"Bézier-kurwe\" ''' tmx = self.po2tmx(minipo)", "'msgid \"String\"\\nmsgstr \"String\"\\n' tmx = self.po2tmx(minipo, targetlanguage=\"xh\") print \"The generated", "assert tuv.get(\"{%s}lang\" % lisa.XML_NS) == \"xh\" def test_multiline(self): \"\"\"Test multiline", "po2tmx from translate.convert import test_convert from translate.misc import wStringIO from", "the target tuv assert tuv.get(\"{%s}lang\" % lisa.XML_NS) == \"xh\" def", "== \"Toepassings\" assert tmx.translate(\"bla\") is None xmltext = str(tmx) assert", "tmx.translate(r'Use \\\".') == r'Gebruik \\\".' def test_exclusions(self): \"\"\"Test that empty", "<gh_stars>1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- from translate.convert", "print \"The generated xml:\" print str(tmx) assert tmx.translate(\"First column\\tSecond column\")", "print \"The generated xml:\" print str(tmx) assert tmx.translate(\"Applications\") == \"Toepassings\"", "empty and fuzzy messages are excluded\"\"\" minipo = r'''#, fuzzy", "xmltext.index('o-tmf') assert xmltext.index('segtype') assert xmltext.index('srclang') def test_sourcelanguage(self): minipo = 'msgid", "from translate.convert import test_convert from translate.misc import wStringIO from translate.storage", "and fuzzy messages are excluded\"\"\" minipo = r'''#, fuzzy msgid", "po2tmx commands on files\"\"\" convertmodule = po2tmx def test_help(self): \"\"\"tests", "\"Eerste lyn\\nTweede lyn\" def test_escapedtabs(self): \"\"\"Test the escaping of tabs\"\"\"", "day \\\"All\\\"\" msgid \"Use \\\\\\\".\" msgstr \"Gebruik \\\\\\\".\" ''' tmx", "test_escapednewlines(self): \"\"\"Test the escaping of newlines\"\"\" minipo = r'''msgid \"First", "\"Hello \\\"Everyone\\\"\" msgstr \"Good day \\\"All\\\"\" msgid \"Use \\\\\\\".\" msgstr", "deel \" \"en ekstra\"''' tmx = self.po2tmx(minipo) print \"The generated", "\"\"\"Test the escaping of tabs\"\"\" minipo = r'''msgid \"First column\\tSecond", "msgstr \"\" msgid \"\" msgstr \"Drie\" ''' tmx = self.po2tmx(minipo)", "generated xml:\" print str(tmx) assert tmx.translate('First part and extra') ==", "slash)\"\"\" minipo = r'''msgid \"Hello \\\"Everyone\\\"\" msgstr \"Good day \\\"All\\\"\"", "= self.po2tmx(minipo) print \"The generated xml:\" print str(tmx) assert tmx.translate(\"First", "0 def test_nonascii(self): \"\"\"Tests that non-ascii conversion works.\"\"\" minipo =", "\"Use \\\\\\\".\" msgstr \"Gebruik \\\\\\\".\" ''' tmx = self.po2tmx(minipo) print", "msgstr \"Een\" msgid \"Two\" msgstr \"\" msgid \"\" msgstr \"Drie\"", "self.po2tmx(minipo) print \"The generated xml:\" print str(tmx) assert tmx.translate(\"First column\\tSecond", "line\") == \"Eerste lyn\\nTweede lyn\" def test_escapedtabs(self): \"\"\"Test the escaping", "po2tmx.convertpo(inputfile, outputfile, templatefile=None, sourcelanguage=sourcelanguage, targetlanguage=targetlanguage) return outputfile.tmxfile def test_basic(self): minipo", "assert tmx.translate(\"First line\\nSecond line\") == \"Eerste lyn\\nTweede lyn\" def test_escapedtabs(self):", "self.po2tmx(minipo) print str(tmx) assert tmx.translate(u\"Bézier curve\") == u\"Bézier-kurwe\" class TestPO2TMXCommand(test_convert.TestConvertCommand,", "\"All\"' assert tmx.translate(r'Use \\\".') == r'Gebruik \\\".' def test_exclusions(self): \"\"\"Test", "= str(tmx) assert xmltext.index('creationtool=\"Translate Toolkit - po2tmx\"') assert xmltext.index('adminlang') assert", "= r'''msgid \"Hello \\\"Everyone\\\"\" msgstr \"Good day \\\"All\\\"\" msgid \"Use", "we want the target tuv assert tuv.get(\"{%s}lang\" % lisa.XML_NS) ==", "and extra') == 'Eerste deel en ekstra' def test_escapednewlines(self): \"\"\"Test", "str(tmx) header = tmx.document.find(\"header\") assert header.get(\"srclang\") == \"xh\" def test_targetlanguage(self):", "test_sourcelanguage(self): minipo = 'msgid \"String\"\\nmsgstr \"String\"\\n' tmx = self.po2tmx(minipo, sourcelanguage=\"xh\")", "tmx.translate('First part and extra') == 'Eerste deel en ekstra' def", "def test_targetlanguage(self): minipo = 'msgid \"String\"\\nmsgstr \"String\"\\n' tmx = self.po2tmx(minipo,", "generated xml:\" print str(tmx) header = tmx.document.find(\"header\") assert header.get(\"srclang\") ==", "= self.po2tmx(minipo) print \"The generated xml:\" print str(tmx) assert tmx.translate('Hello", "test_exclusions(self): \"\"\"Test that empty and fuzzy messages are excluded\"\"\" minipo", "kolom\" def test_escapedquotes(self): \"\"\"Test the escaping of quotes (and slash)\"\"\"", "escaping of newlines\"\"\" minipo = r'''msgid \"First line\\nSecond line\" msgstr", "commands on files\"\"\" convertmodule = po2tmx def test_help(self): \"\"\"tests getting", "minipo = 'msgid \"String\"\\nmsgstr \"String\"\\n' tmx = self.po2tmx(minipo, sourcelanguage=\"xh\") print", "assert tmx.translate(\"Applications\") == \"Toepassings\" assert tmx.translate(\"bla\") is None xmltext =", "curve\") == u\"Bézier-kurwe\" class TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX): \"\"\"Tests running actual po2tmx", "\"Language-Team: Afrikaans <<EMAIL>>\\n\" \"MIME-Version: 1.0\\n\" \"Content-Type: text/plain; charset=UTF-8\\n\" \"Content-Transfer-Encoding: 8bit\\n\"", "test_basic(self): minipo = r\"\"\"# Afrikaans translation of program ABC #", "class TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX): \"\"\"Tests running actual po2tmx commands on files\"\"\"", "in str(tmx) assert len(tmx.units) == 0 def test_nonascii(self): \"\"\"Tests that", "\"\"\"Test multiline po entry\"\"\" minipo = r'''msgid \"First part \"", "minipo = r'''msgid \"First column\\tSecond column\" msgstr \"Eerste kolom\\tTweede kolom\"", "== u\"Bézier-kurwe\" class TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX): \"\"\"Tests running actual po2tmx commands", "assert \"<tu\" not in str(tmx) assert len(tmx.units) == 0 def", "str(tmx) tuv = tmx.document.findall(\".//%s\" % tmx.namespaced(\"tuv\"))[1] #tag[0] will be the", "class TestPO2TMX: def po2tmx(self, posource, sourcelanguage='en', targetlanguage='af'): \"\"\"helper that converts", "\"Eerste kolom\\tTweede kolom\" ''' tmx = self.po2tmx(minipo) print \"The generated", "assert tmx.translate('Hello \"Everyone\"') == 'Good day \"All\"' assert tmx.translate(r'Use \\\".')", "test_escapedquotes(self): \"\"\"Test the escaping of quotes (and slash)\"\"\" minipo =", "tuv = tmx.document.findall(\".//%s\" % tmx.namespaced(\"tuv\"))[1] #tag[0] will be the source,", "options = test_convert.TestConvertCommand.test_help(self) options = self.help_check(options, \"-l LANG, --language=LANG\") options", "assert xmltext.index('adminlang') assert xmltext.index('creationtoolversion') assert xmltext.index('datatype') assert xmltext.index('o-tmf') assert xmltext.index('segtype')", "minipo = r\"\"\"# Afrikaans translation of program ABC # msgid", "assert tmx.translate(u\"Bézier curve\") == u\"Bézier-kurwe\" class TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX): \"\"\"Tests running", "tmx.translate(\"First column\\tSecond column\") == \"Eerste kolom\\tTweede kolom\" def test_escapedquotes(self): \"\"\"Test", "assert xmltext.index('creationtool=\"Translate Toolkit - po2tmx\"') assert xmltext.index('adminlang') assert xmltext.index('creationtoolversion') assert", "xml:\" print str(tmx) tuv = tmx.document.findall(\".//%s\" % tmx.namespaced(\"tuv\"))[1] #tag[0] will", "minipo = 'msgid \"String\"\\nmsgstr \"String\"\\n' tmx = self.po2tmx(minipo, targetlanguage=\"xh\") print", "tmx.document.find(\"header\") assert header.get(\"srclang\") == \"xh\" def test_targetlanguage(self): minipo = 'msgid", "= po2tmx def test_help(self): \"\"\"tests getting help\"\"\" options = test_convert.TestConvertCommand.test_help(self)", "from translate.storage import lisa class TestPO2TMX: def po2tmx(self, posource, sourcelanguage='en',", "(and slash)\"\"\" minipo = r'''msgid \"Hello \\\"Everyone\\\"\" msgstr \"Good day", "\"\"\" tmx = self.po2tmx(minipo) print \"The generated xml:\" print str(tmx)", "'Good day \"All\"' assert tmx.translate(r'Use \\\".') == r'Gebruik \\\".' def", "str(tmx) assert tmx.translate(\"First line\\nSecond line\") == \"Eerste lyn\\nTweede lyn\" def", "= self.po2tmx(minipo) print \"The generated xml:\" print str(tmx) assert \"<tu\"", "xmltext.index('creationtoolversion') assert xmltext.index('datatype') assert xmltext.index('o-tmf') assert xmltext.index('segtype') assert xmltext.index('srclang') def", "= self.po2tmx(minipo) print \"The generated xml:\" print str(tmx) assert tmx.translate(\"Applications\")", "conversion works.\"\"\" minipo = r'''msgid \"Bézier curve\" msgstr \"Bézier-kurwe\" '''", "remember to do something #: ../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4 msgid \"Applications\" msgstr", "= self.po2tmx(minipo, targetlanguage=\"xh\") print \"The generated xml:\" print str(tmx) tuv", "outputfile = wStringIO.StringIO() outputfile.tmxfile = tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage) po2tmx.convertpo(inputfile, outputfile, templatefile=None,", "xmltext.index('creationtool=\"Translate Toolkit - po2tmx\"') assert xmltext.index('adminlang') assert xmltext.index('creationtoolversion') assert xmltext.index('datatype')", "tmx.translate(u\"Bézier curve\") == u\"Bézier-kurwe\" class TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX): \"\"\"Tests running actual", "kolom\\tTweede kolom\" def test_escapedquotes(self): \"\"\"Test the escaping of quotes (and", "r'''msgid \"First part \" \"and extra\" msgstr \"Eerste deel \"", "coding: utf-8 -*- from translate.convert import po2tmx from translate.convert import", "../dir/file2.xml.in.h:4 msgid \"Applications\" msgstr \"Toepassings\" \"\"\" tmx = self.po2tmx(minipo) print", "actual po2tmx commands on files\"\"\" convertmodule = po2tmx def test_help(self):", "= wStringIO.StringIO() outputfile.tmxfile = tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage) po2tmx.convertpo(inputfile, outputfile, templatefile=None, sourcelanguage=sourcelanguage,", "assert tmx.translate(\"bla\") is None xmltext = str(tmx) assert xmltext.index('creationtool=\"Translate Toolkit", "generated xml:\" print str(tmx) assert tmx.translate(\"First column\\tSecond column\") == \"Eerste", "print str(tmx) assert tmx.translate(\"First line\\nSecond line\") == \"Eerste lyn\\nTweede lyn\"", "def test_nonascii(self): \"\"\"Tests that non-ascii conversion works.\"\"\" minipo = r'''msgid", "that empty and fuzzy messages are excluded\"\"\" minipo = r'''#,", "\"String\"\\nmsgstr \"String\"\\n' tmx = self.po2tmx(minipo, sourcelanguage=\"xh\") print \"The generated xml:\"", "Afrikaans <<EMAIL>>\\n\" \"MIME-Version: 1.0\\n\" \"Content-Type: text/plain; charset=UTF-8\\n\" \"Content-Transfer-Encoding: 8bit\\n\" #", "\"\" \"Project-Id-Version: program 2.1-branch\\n\" \"Report-Msgid-Bugs-To: \\n\" \"POT-Creation-Date: 2006-01-09 07:15+0100\\n\" \"PO-Revision-Date:", "the escaping of newlines\"\"\" minipo = r'''msgid \"First line\\nSecond line\"", "po2tmx\"') assert xmltext.index('adminlang') assert xmltext.index('creationtoolversion') assert xmltext.index('datatype') assert xmltext.index('o-tmf') assert", "= self.po2tmx(minipo) print str(tmx) assert tmx.translate(u\"Bézier curve\") == u\"Bézier-kurwe\" class", "str(tmx) assert tmx.translate(\"First column\\tSecond column\") == \"Eerste kolom\\tTweede kolom\" def", "self.po2tmx(minipo, targetlanguage=\"xh\") print \"The generated xml:\" print str(tmx) tuv =", "xml:\" print str(tmx) assert tmx.translate(\"First column\\tSecond column\") == \"Eerste kolom\\tTweede", "\"Bézier-kurwe\" ''' tmx = self.po2tmx(minipo) print str(tmx) assert tmx.translate(u\"Bézier curve\")", "2.1-branch\\n\" \"Report-Msgid-Bugs-To: \\n\" \"POT-Creation-Date: 2006-01-09 07:15+0100\\n\" \"PO-Revision-Date: 2004-03-30 17:02+0200\\n\" \"Last-Translator:", "test_nonascii(self): \"\"\"Tests that non-ascii conversion works.\"\"\" minipo = r'''msgid \"Bézier", "\"<tu\" not in str(tmx) assert len(tmx.units) == 0 def test_nonascii(self):", "msgstr \"\" \"Project-Id-Version: program 2.1-branch\\n\" \"Report-Msgid-Bugs-To: \\n\" \"POT-Creation-Date: 2006-01-09 07:15+0100\\n\"", "tmx from translate.storage import lisa class TestPO2TMX: def po2tmx(self, posource,", "tmx source without requiring files\"\"\" inputfile = wStringIO.StringIO(posource) outputfile =", "\\\".' def test_exclusions(self): \"\"\"Test that empty and fuzzy messages are", "\"First line\\nSecond line\" msgstr \"Eerste lyn\\nTweede lyn\" ''' tmx =", "\"One\" msgstr \"Een\" msgid \"Two\" msgstr \"\" msgid \"\" msgstr", "../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4 msgid \"Applications\" msgstr \"Toepassings\" \"\"\" tmx = self.po2tmx(minipo)", "header = tmx.document.find(\"header\") assert header.get(\"srclang\") == \"xh\" def test_targetlanguage(self): minipo", "files\"\"\" convertmodule = po2tmx def test_help(self): \"\"\"tests getting help\"\"\" options", "test_convert.TestConvertCommand.test_help(self) options = self.help_check(options, \"-l LANG, --language=LANG\") options = self.help_check(options,", "xml:\" print str(tmx) assert \"<tu\" not in str(tmx) assert len(tmx.units)", "r'''#, fuzzy msgid \"One\" msgstr \"Een\" msgid \"Two\" msgstr \"\"", "curve\" msgstr \"Bézier-kurwe\" ''' tmx = self.po2tmx(minipo) print str(tmx) assert", "msgstr \"Drie\" ''' tmx = self.po2tmx(minipo) print \"The generated xml:\"", "that converts po source to tmx source without requiring files\"\"\"", "self.po2tmx(minipo) print \"The generated xml:\" print str(tmx) assert tmx.translate('First part", "targetlanguage=targetlanguage) return outputfile.tmxfile def test_basic(self): minipo = r\"\"\"# Afrikaans translation", "import test_convert from translate.misc import wStringIO from translate.storage import tmx", "= r'''#, fuzzy msgid \"One\" msgstr \"Een\" msgid \"Two\" msgstr", "def test_escapednewlines(self): \"\"\"Test the escaping of newlines\"\"\" minipo = r'''msgid", "import po2tmx from translate.convert import test_convert from translate.misc import wStringIO", "print str(tmx) header = tmx.document.find(\"header\") assert header.get(\"srclang\") == \"xh\" def", "header.get(\"srclang\") == \"xh\" def test_targetlanguage(self): minipo = 'msgid \"String\"\\nmsgstr \"String\"\\n'", "\"\" msgid \"\" msgstr \"Drie\" ''' tmx = self.po2tmx(minipo) print", "assert tmx.translate(\"First column\\tSecond column\") == \"Eerste kolom\\tTweede kolom\" def test_escapedquotes(self):", "8bit\\n\" # Please remember to do something #: ../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4", "utf-8 -*- from translate.convert import po2tmx from translate.convert import test_convert", "po source to tmx source without requiring files\"\"\" inputfile =", "== 'Good day \"All\"' assert tmx.translate(r'Use \\\".') == r'Gebruik \\\".'", "xmltext.index('srclang') def test_sourcelanguage(self): minipo = 'msgid \"String\"\\nmsgstr \"String\"\\n' tmx =", "self.po2tmx(minipo) print \"The generated xml:\" print str(tmx) assert tmx.translate(\"Applications\") ==", "translate.convert import test_convert from translate.misc import wStringIO from translate.storage import", "xmltext = str(tmx) assert xmltext.index('creationtool=\"Translate Toolkit - po2tmx\"') assert xmltext.index('adminlang')", "assert xmltext.index('srclang') def test_sourcelanguage(self): minipo = 'msgid \"String\"\\nmsgstr \"String\"\\n' tmx", "\" \"en ekstra\"''' tmx = self.po2tmx(minipo) print \"The generated xml:\"", "the source, we want the target tuv assert tuv.get(\"{%s}lang\" %", "Foundation <<EMAIL>>\\n\" \"Language-Team: Afrikaans <<EMAIL>>\\n\" \"MIME-Version: 1.0\\n\" \"Content-Type: text/plain; charset=UTF-8\\n\"", "requiring files\"\"\" inputfile = wStringIO.StringIO(posource) outputfile = wStringIO.StringIO() outputfile.tmxfile =", "r'''msgid \"First column\\tSecond column\" msgstr \"Eerste kolom\\tTweede kolom\" ''' tmx", "on files\"\"\" convertmodule = po2tmx def test_help(self): \"\"\"tests getting help\"\"\"", "-*- coding: utf-8 -*- from translate.convert import po2tmx from translate.convert", "assert len(tmx.units) == 0 def test_nonascii(self): \"\"\"Tests that non-ascii conversion", "- po2tmx\"') assert xmltext.index('adminlang') assert xmltext.index('creationtoolversion') assert xmltext.index('datatype') assert xmltext.index('o-tmf')", "without requiring files\"\"\" inputfile = wStringIO.StringIO(posource) outputfile = wStringIO.StringIO() outputfile.tmxfile", "to tmx source without requiring files\"\"\" inputfile = wStringIO.StringIO(posource) outputfile", "== \"Eerste lyn\\nTweede lyn\" def test_escapedtabs(self): \"\"\"Test the escaping of", "r'Gebruik \\\".' def test_exclusions(self): \"\"\"Test that empty and fuzzy messages", "print \"The generated xml:\" print str(tmx) assert tmx.translate('Hello \"Everyone\"') ==", "the escaping of tabs\"\"\" minipo = r'''msgid \"First column\\tSecond column\"", "translate.storage import lisa class TestPO2TMX: def po2tmx(self, posource, sourcelanguage='en', targetlanguage='af'):", "extra') == 'Eerste deel en ekstra' def test_escapednewlines(self): \"\"\"Test the", "\"The generated xml:\" print str(tmx) assert tmx.translate(\"Applications\") == \"Toepassings\" assert", "excluded\"\"\" minipo = r'''#, fuzzy msgid \"One\" msgstr \"Een\" msgid", "options = self.help_check(options, \"-l LANG, --language=LANG\") options = self.help_check(options, \"--source-language=LANG\",", "# msgid \"\" msgstr \"\" \"Project-Id-Version: program 2.1-branch\\n\" \"Report-Msgid-Bugs-To: \\n\"", "line\\nSecond line\" msgstr \"Eerste lyn\\nTweede lyn\" ''' tmx = self.po2tmx(minipo)", "\"en ekstra\"''' tmx = self.po2tmx(minipo) print \"The generated xml:\" print", "\"The generated xml:\" print str(tmx) assert tmx.translate('Hello \"Everyone\"') == 'Good", "targetlanguage='af'): \"\"\"helper that converts po source to tmx source without", "print \"The generated xml:\" print str(tmx) header = tmx.document.find(\"header\") assert", "print \"The generated xml:\" print str(tmx) assert tmx.translate(\"First line\\nSecond line\")", "msgstr \"Bézier-kurwe\" ''' tmx = self.po2tmx(minipo) print str(tmx) assert tmx.translate(u\"Bézier", "xml:\" print str(tmx) assert tmx.translate(\"Applications\") == \"Toepassings\" assert tmx.translate(\"bla\") is", "is None xmltext = str(tmx) assert xmltext.index('creationtool=\"Translate Toolkit - po2tmx\"')", "lisa.XML_NS) == \"xh\" def test_multiline(self): \"\"\"Test multiline po entry\"\"\" minipo", "self.po2tmx(minipo) print \"The generated xml:\" print str(tmx) assert \"<tu\" not", "test_help(self): \"\"\"tests getting help\"\"\" options = test_convert.TestConvertCommand.test_help(self) options = self.help_check(options,", "\"String\"\\nmsgstr \"String\"\\n' tmx = self.po2tmx(minipo, targetlanguage=\"xh\") print \"The generated xml:\"", "xml:\" print str(tmx) header = tmx.document.find(\"header\") assert header.get(\"srclang\") == \"xh\"", "assert xmltext.index('segtype') assert xmltext.index('srclang') def test_sourcelanguage(self): minipo = 'msgid \"String\"\\nmsgstr", "python # -*- coding: utf-8 -*- from translate.convert import po2tmx", "\"First part \" \"and extra\" msgstr \"Eerste deel \" \"en", "print str(tmx) assert tmx.translate(u\"Bézier curve\") == u\"Bézier-kurwe\" class TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX):", "translate.convert import po2tmx from translate.convert import test_convert from translate.misc import", "#: ../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4 msgid \"Applications\" msgstr \"Toepassings\" \"\"\" tmx =", "sourcelanguage='en', targetlanguage='af'): \"\"\"helper that converts po source to tmx source", "str(tmx) assert len(tmx.units) == 0 def test_nonascii(self): \"\"\"Tests that non-ascii", "wStringIO from translate.storage import tmx from translate.storage import lisa class", "targetlanguage=\"xh\") print \"The generated xml:\" print str(tmx) tuv = tmx.document.findall(\".//%s\"", "translation of program ABC # msgid \"\" msgstr \"\" \"Project-Id-Version:", "\"Eerste kolom\\tTweede kolom\" def test_escapedquotes(self): \"\"\"Test the escaping of quotes", "msgstr \"Gebruik \\\\\\\".\" ''' tmx = self.po2tmx(minipo) print \"The generated", "\"Toepassings\" \"\"\" tmx = self.po2tmx(minipo) print \"The generated xml:\" print", "% lisa.XML_NS) == \"xh\" def test_multiline(self): \"\"\"Test multiline po entry\"\"\"", "program 2.1-branch\\n\" \"Report-Msgid-Bugs-To: \\n\" \"POT-Creation-Date: 2006-01-09 07:15+0100\\n\" \"PO-Revision-Date: 2004-03-30 17:02+0200\\n\"", "source without requiring files\"\"\" inputfile = wStringIO.StringIO(posource) outputfile = wStringIO.StringIO()", "self.po2tmx(minipo, sourcelanguage=\"xh\") print \"The generated xml:\" print str(tmx) header =", "something #: ../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4 msgid \"Applications\" msgstr \"Toepassings\" \"\"\" tmx", "\"\"\"Test the escaping of quotes (and slash)\"\"\" minipo = r'''msgid", "works.\"\"\" minipo = r'''msgid \"Bézier curve\" msgstr \"Bézier-kurwe\" ''' tmx", "Afrikaans translation of program ABC # msgid \"\" msgstr \"\"", "# -*- coding: utf-8 -*- from translate.convert import po2tmx from", "\"Toepassings\" assert tmx.translate(\"bla\") is None xmltext = str(tmx) assert xmltext.index('creationtool=\"Translate", "r'''msgid \"Hello \\\"Everyone\\\"\" msgstr \"Good day \\\"All\\\"\" msgid \"Use \\\\\\\".\"", "en ekstra' def test_escapednewlines(self): \"\"\"Test the escaping of newlines\"\"\" minipo", "= tmx.document.find(\"header\") assert header.get(\"srclang\") == \"xh\" def test_targetlanguage(self): minipo =", "from translate.misc import wStringIO from translate.storage import tmx from translate.storage", "minipo = r'''msgid \"First part \" \"and extra\" msgstr \"Eerste", "def test_basic(self): minipo = r\"\"\"# Afrikaans translation of program ABC", "sourcelanguage=sourcelanguage) po2tmx.convertpo(inputfile, outputfile, templatefile=None, sourcelanguage=sourcelanguage, targetlanguage=targetlanguage) return outputfile.tmxfile def test_basic(self):", "column\" msgstr \"Eerste kolom\\tTweede kolom\" ''' tmx = self.po2tmx(minipo) print", "\"Drie\" ''' tmx = self.po2tmx(minipo) print \"The generated xml:\" print", "tmx = self.po2tmx(minipo, sourcelanguage=\"xh\") print \"The generated xml:\" print str(tmx)", "= self.help_check(options, \"-l LANG, --language=LANG\") options = self.help_check(options, \"--source-language=LANG\", last=True)", "fuzzy msgid \"One\" msgstr \"Een\" msgid \"Two\" msgstr \"\" msgid", "minipo = r'''#, fuzzy msgid \"One\" msgstr \"Een\" msgid \"Two\"", "lyn\" def test_escapedtabs(self): \"\"\"Test the escaping of tabs\"\"\" minipo =", "print str(tmx) assert tmx.translate(\"First column\\tSecond column\") == \"Eerste kolom\\tTweede kolom\"", "tmx.translate('Hello \"Everyone\"') == 'Good day \"All\"' assert tmx.translate(r'Use \\\".') ==", "assert tmx.translate(r'Use \\\".') == r'Gebruik \\\".' def test_exclusions(self): \"\"\"Test that", "to do something #: ../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4 msgid \"Applications\" msgstr \"Toepassings\"", "\"Project-Id-Version: program 2.1-branch\\n\" \"Report-Msgid-Bugs-To: \\n\" \"POT-Creation-Date: 2006-01-09 07:15+0100\\n\" \"PO-Revision-Date: 2004-03-30", "def po2tmx(self, posource, sourcelanguage='en', targetlanguage='af'): \"\"\"helper that converts po source", "1.0\\n\" \"Content-Type: text/plain; charset=UTF-8\\n\" \"Content-Transfer-Encoding: 8bit\\n\" # Please remember to", "inputfile = wStringIO.StringIO(posource) outputfile = wStringIO.StringIO() outputfile.tmxfile = tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage)", "converts po source to tmx source without requiring files\"\"\" inputfile", "\"The generated xml:\" print str(tmx) assert \"<tu\" not in str(tmx)", "r'''msgid \"First line\\nSecond line\" msgstr \"Eerste lyn\\nTweede lyn\" ''' tmx", "msgstr \"Good day \\\"All\\\"\" msgid \"Use \\\\\\\".\" msgstr \"Gebruik \\\\\\\".\"", "msgid \"\" msgstr \"Drie\" ''' tmx = self.po2tmx(minipo) print \"The", "= wStringIO.StringIO(posource) outputfile = wStringIO.StringIO() outputfile.tmxfile = tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage) po2tmx.convertpo(inputfile,", "str(tmx) assert xmltext.index('creationtool=\"Translate Toolkit - po2tmx\"') assert xmltext.index('adminlang') assert xmltext.index('creationtoolversion')", "po2tmx(self, posource, sourcelanguage='en', targetlanguage='af'): \"\"\"helper that converts po source to", "self.po2tmx(minipo) print \"The generated xml:\" print str(tmx) assert tmx.translate('Hello \"Everyone\"')", "msgid \"\" msgstr \"\" \"Project-Id-Version: program 2.1-branch\\n\" \"Report-Msgid-Bugs-To: \\n\" \"POT-Creation-Date:", "charset=UTF-8\\n\" \"Content-Transfer-Encoding: 8bit\\n\" # Please remember to do something #:", "part \" \"and extra\" msgstr \"Eerste deel \" \"en ekstra\"'''", "r\"\"\"# Afrikaans translation of program ABC # msgid \"\" msgstr", "test_targetlanguage(self): minipo = 'msgid \"String\"\\nmsgstr \"String\"\\n' tmx = self.po2tmx(minipo, targetlanguage=\"xh\")", "-*- from translate.convert import po2tmx from translate.convert import test_convert from", "sourcelanguage=\"xh\") print \"The generated xml:\" print str(tmx) header = tmx.document.find(\"header\")", "files\"\"\" inputfile = wStringIO.StringIO(posource) outputfile = wStringIO.StringIO() outputfile.tmxfile = tmx.tmxfile(inputfile=None,", "outputfile.tmxfile = tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage) po2tmx.convertpo(inputfile, outputfile, templatefile=None, sourcelanguage=sourcelanguage, targetlanguage=targetlanguage) return", "= self.po2tmx(minipo, sourcelanguage=\"xh\") print \"The generated xml:\" print str(tmx) header", "\"Last-Translator: Zuza Software Foundation <<EMAIL>>\\n\" \"Language-Team: Afrikaans <<EMAIL>>\\n\" \"MIME-Version: 1.0\\n\"", "\"Good day \\\"All\\\"\" msgid \"Use \\\\\\\".\" msgstr \"Gebruik \\\\\\\".\" '''", "\"Report-Msgid-Bugs-To: \\n\" \"POT-Creation-Date: 2006-01-09 07:15+0100\\n\" \"PO-Revision-Date: 2004-03-30 17:02+0200\\n\" \"Last-Translator: Zuza", "None xmltext = str(tmx) assert xmltext.index('creationtool=\"Translate Toolkit - po2tmx\"') assert", "will be the source, we want the target tuv assert", "\\\\\\\".\" ''' tmx = self.po2tmx(minipo) print \"The generated xml:\" print", "def test_multiline(self): \"\"\"Test multiline po entry\"\"\" minipo = r'''msgid \"First", "str(tmx) assert tmx.translate('Hello \"Everyone\"') == 'Good day \"All\"' assert tmx.translate(r'Use", "\"xh\" def test_multiline(self): \"\"\"Test multiline po entry\"\"\" minipo = r'''msgid", "msgid \"Applications\" msgstr \"Toepassings\" \"\"\" tmx = self.po2tmx(minipo) print \"The", "source to tmx source without requiring files\"\"\" inputfile = wStringIO.StringIO(posource)", "''' tmx = self.po2tmx(minipo) print \"The generated xml:\" print str(tmx)" ]
[ "sys.argv[1] == 'resume_all': jsonrq.resume_all() elif sys.argv[1] == 'show_info': show_torrent_info(sys.argv[2]) elif", "def show_torrent_info(info_hash): \"\"\" Display current torrent info :param info_hash: :return:", "torr_info['dl_speed'], torr_info['ul_speed'] ), _('total DL: {0}MB; total UL: {1}MB').format( torr_info['total_download'],", "# coding: utf-8 # Module: commands # Created on: 28.07.2015", "info_dialog.iscanceled(): info_dialog.update(torr_info['progress'], _('state: {0}; seeds: {1}; peers: {2}').format( torr_info['state'], torr_info['num_seeds'],", "elif sys.argv[1] == 'pause_all': jsonrq.pause_all() elif sys.argv[1] == 'resume_all': jsonrq.resume_all()", "{1}; peers: {2}').format( torr_info['state'], torr_info['num_seeds'], torr_info['num_peers'] ), _('size: {0}MB; DL", "= xbmcgui.DialogProgress() info_dialog.create(torr_info['name']) while not info_dialog.iscanceled(): info_dialog.update(torr_info['progress'], _('state: {0}; seeds:", "= addon.initialize_gettext() def show_torrent_info(info_hash): \"\"\" Display current torrent info :param", "== 'resume_all': jsonrq.resume_all() elif sys.argv[1] == 'show_info': show_torrent_info(sys.argv[2]) elif sys.argv[1]", "# Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html \"\"\" Context menu commands \"\"\"", "= Addon('plugin.video.yatp') _ = addon.initialize_gettext() def show_torrent_info(info_hash): \"\"\" Display current", "info :param info_hash: :return: \"\"\" torr_info = jsonrq.get_torrent_info(info_hash) info_dialog =", "sys import xbmc import xbmcgui import json_requests as jsonrq from", "seeds: {1}; peers: {2}').format( torr_info['state'], torr_info['num_seeds'], torr_info['num_peers'] ), _('size: {0}MB;", "sys.argv[1] == 'delete' and xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do you really", "show_torrent_info(sys.argv[2]) elif sys.argv[1] == 'restore_finished': jsonrq.restore_finished(sys.argv[2]) else: addon.log_debug('Command cancelled or", "torr_info = jsonrq.get_torrent_info(info_hash) if __name__ == '__main__': if sys.argv[1] ==", "commands \"\"\" import sys import xbmc import xbmcgui import json_requests", "{1}MB').format( torr_info['total_download'], torr_info['total_upload']) ) xbmc.sleep(1000) torr_info = jsonrq.get_torrent_info(info_hash) if __name__", "'restore_finished': jsonrq.restore_finished(sys.argv[2]) else: addon.log_debug('Command cancelled or invalid command: {0}'.format(sys.argv[1])) xbmc.executebuiltin('Container.Refresh')", "28.07.2015 # Author: <NAME> aka <NAME>. (<EMAIL>) # Licence: GPL", "torrent info :param info_hash: :return: \"\"\" torr_info = jsonrq.get_torrent_info(info_hash) info_dialog", "True) elif sys.argv[1] == 'pause_all': jsonrq.pause_all() elif sys.argv[1] == 'resume_all':", "jsonrq.resume_all() elif sys.argv[1] == 'show_info': show_torrent_info(sys.argv[2]) elif sys.argv[1] == 'restore_finished':", "== 'delete' and xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do you really want", "== 'show_info': show_torrent_info(sys.argv[2]) elif sys.argv[1] == 'restore_finished': jsonrq.restore_finished(sys.argv[2]) else: addon.log_debug('Command", "'__main__': if sys.argv[1] == 'pause': jsonrq.pause_torrent(sys.argv[2]) elif sys.argv[1] == 'resume':", "{0}MB; DL speed: {1}KB/s; UL speed: {2}KB/s').format( torr_info['size'], torr_info['dl_speed'], torr_info['ul_speed']", "xbmc.sleep(1000) torr_info = jsonrq.get_torrent_info(info_hash) if __name__ == '__main__': if sys.argv[1]", "Display current torrent info :param info_hash: :return: \"\"\" torr_info =", "jsonrq.get_torrent_info(info_hash) info_dialog = xbmcgui.DialogProgress() info_dialog.create(torr_info['name']) while not info_dialog.iscanceled(): info_dialog.update(torr_info['progress'], _('state:", "commands # Created on: 28.07.2015 # Author: <NAME> aka <NAME>.", "files?'), _('Warning: The files will be deleted permanently!')): jsonrq.remove_torrent(sys.argv[2], True)", "_ = addon.initialize_gettext() def show_torrent_info(info_hash): \"\"\" Display current torrent info", "), _('size: {0}MB; DL speed: {1}KB/s; UL speed: {2}KB/s').format( torr_info['size'],", "v.3: http://www.gnu.org/copyleft/gpl.html \"\"\" Context menu commands \"\"\" import sys import", "you really want to delete the torrent with files?'), _('Warning:", "with files?'), _('Warning: The files will be deleted permanently!')): jsonrq.remove_torrent(sys.argv[2],", "_('size: {0}MB; DL speed: {1}KB/s; UL speed: {2}KB/s').format( torr_info['size'], torr_info['dl_speed'],", "xbmcgui.DialogProgress() info_dialog.create(torr_info['name']) while not info_dialog.iscanceled(): info_dialog.update(torr_info['progress'], _('state: {0}; seeds: {1};", "jsonrq.pause_all() elif sys.argv[1] == 'resume_all': jsonrq.resume_all() elif sys.argv[1] == 'show_info':", "= jsonrq.get_torrent_info(info_hash) info_dialog = xbmcgui.DialogProgress() info_dialog.create(torr_info['name']) while not info_dialog.iscanceled(): info_dialog.update(torr_info['progress'],", "jsonrq.remove_torrent(sys.argv[2], True) elif sys.argv[1] == 'pause_all': jsonrq.pause_all() elif sys.argv[1] ==", "_('Warning: The files will be deleted permanently!')): jsonrq.remove_torrent(sys.argv[2], True) elif", "utf-8 # Module: commands # Created on: 28.07.2015 # Author:", "xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do you really want to delete the", "'resume_all': jsonrq.resume_all() elif sys.argv[1] == 'show_info': show_torrent_info(sys.argv[2]) elif sys.argv[1] ==", "== 'resume': jsonrq.resume_torrent(sys.argv[2]) elif sys.argv[1] == 'delete' and xbmcgui.Dialog().yesno( _('Confirm", "# Author: <NAME> aka <NAME>. (<EMAIL>) # Licence: GPL v.3:", "'pause': jsonrq.pause_torrent(sys.argv[2]) elif sys.argv[1] == 'resume': jsonrq.resume_torrent(sys.argv[2]) elif sys.argv[1] ==", "elif sys.argv[1] == 'restore_finished': jsonrq.restore_finished(sys.argv[2]) else: addon.log_debug('Command cancelled or invalid", "files will be deleted permanently!')): jsonrq.remove_torrent(sys.argv[2], True) elif sys.argv[1] ==", "<NAME> aka <NAME>. (<EMAIL>) # Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html \"\"\"", "<NAME>. (<EMAIL>) # Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html \"\"\" Context menu", "torr_info = jsonrq.get_torrent_info(info_hash) info_dialog = xbmcgui.DialogProgress() info_dialog.create(torr_info['name']) while not info_dialog.iscanceled():", "total UL: {1}MB').format( torr_info['total_download'], torr_info['total_upload']) ) xbmc.sleep(1000) torr_info = jsonrq.get_torrent_info(info_hash)", "you really want to delete the torrent?')): jsonrq.remove_torrent(sys.argv[2], False) elif", "Author: <NAME> aka <NAME>. (<EMAIL>) # Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html", "the torrent with files?'), _('Warning: The files will be deleted", "== 'pause_all': jsonrq.pause_all() elif sys.argv[1] == 'resume_all': jsonrq.resume_all() elif sys.argv[1]", "== 'restore_finished': jsonrq.restore_finished(sys.argv[2]) else: addon.log_debug('Command cancelled or invalid command: {0}'.format(sys.argv[1]))", "{1}KB/s; UL speed: {2}KB/s').format( torr_info['size'], torr_info['dl_speed'], torr_info['ul_speed'] ), _('total DL:", "delete the torrent with files?'), _('Warning: The files will be", "info_dialog = xbmcgui.DialogProgress() info_dialog.create(torr_info['name']) while not info_dialog.iscanceled(): info_dialog.update(torr_info['progress'], _('state: {0};", ":param info_hash: :return: \"\"\" torr_info = jsonrq.get_torrent_info(info_hash) info_dialog = xbmcgui.DialogProgress()", "if sys.argv[1] == 'pause': jsonrq.pause_torrent(sys.argv[2]) elif sys.argv[1] == 'resume': jsonrq.resume_torrent(sys.argv[2])", "UL speed: {2}KB/s').format( torr_info['size'], torr_info['dl_speed'], torr_info['ul_speed'] ), _('total DL: {0}MB;", "\"\"\" Context menu commands \"\"\" import sys import xbmc import", "http://www.gnu.org/copyleft/gpl.html \"\"\" Context menu commands \"\"\" import sys import xbmc", "Addon addon = Addon('plugin.video.yatp') _ = addon.initialize_gettext() def show_torrent_info(info_hash): \"\"\"", "if __name__ == '__main__': if sys.argv[1] == 'pause': jsonrq.pause_torrent(sys.argv[2]) elif", "Addon('plugin.video.yatp') _ = addon.initialize_gettext() def show_torrent_info(info_hash): \"\"\" Display current torrent", "coding: utf-8 # Module: commands # Created on: 28.07.2015 #", "\"\"\" import sys import xbmc import xbmcgui import json_requests as", "speed: {2}KB/s').format( torr_info['size'], torr_info['dl_speed'], torr_info['ul_speed'] ), _('total DL: {0}MB; total", "really want to delete the torrent?')): jsonrq.remove_torrent(sys.argv[2], False) elif sys.argv[1]", "torr_info['num_seeds'], torr_info['num_peers'] ), _('size: {0}MB; DL speed: {1}KB/s; UL speed:", "jsonrq.get_torrent_info(info_hash) if __name__ == '__main__': if sys.argv[1] == 'pause': jsonrq.pause_torrent(sys.argv[2])", "xbmcgui import json_requests as jsonrq from simpleplugin import Addon addon", "== '__main__': if sys.argv[1] == 'pause': jsonrq.pause_torrent(sys.argv[2]) elif sys.argv[1] ==", "{2}KB/s').format( torr_info['size'], torr_info['dl_speed'], torr_info['ul_speed'] ), _('total DL: {0}MB; total UL:", "be deleted permanently!')): jsonrq.remove_torrent(sys.argv[2], True) elif sys.argv[1] == 'pause_all': jsonrq.pause_all()", "info_dialog.update(torr_info['progress'], _('state: {0}; seeds: {1}; peers: {2}').format( torr_info['state'], torr_info['num_seeds'], torr_info['num_peers']", "want to delete the torrent?')): jsonrq.remove_torrent(sys.argv[2], False) elif sys.argv[1] ==", "{2}').format( torr_info['state'], torr_info['num_seeds'], torr_info['num_peers'] ), _('size: {0}MB; DL speed: {1}KB/s;", "torr_info['num_peers'] ), _('size: {0}MB; DL speed: {1}KB/s; UL speed: {2}KB/s').format(", ") xbmc.sleep(1000) torr_info = jsonrq.get_torrent_info(info_hash) if __name__ == '__main__': if", "jsonrq.remove_torrent(sys.argv[2], False) elif sys.argv[1] == 'delete_with_files'and xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do", "from simpleplugin import Addon addon = Addon('plugin.video.yatp') _ = addon.initialize_gettext()", "torr_info['state'], torr_info['num_seeds'], torr_info['num_peers'] ), _('size: {0}MB; DL speed: {1}KB/s; UL", "xbmc import xbmcgui import json_requests as jsonrq from simpleplugin import", "import xbmc import xbmcgui import json_requests as jsonrq from simpleplugin", "), _('total DL: {0}MB; total UL: {1}MB').format( torr_info['total_download'], torr_info['total_upload']) )", "simpleplugin import Addon addon = Addon('plugin.video.yatp') _ = addon.initialize_gettext() def", "torr_info['size'], torr_info['dl_speed'], torr_info['ul_speed'] ), _('total DL: {0}MB; total UL: {1}MB').format(", "speed: {1}KB/s; UL speed: {2}KB/s').format( torr_info['size'], torr_info['dl_speed'], torr_info['ul_speed'] ), _('total", "torr_info['total_download'], torr_info['total_upload']) ) xbmc.sleep(1000) torr_info = jsonrq.get_torrent_info(info_hash) if __name__ ==", "Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html \"\"\" Context menu commands \"\"\" import", "jsonrq.resume_torrent(sys.argv[2]) elif sys.argv[1] == 'delete' and xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do", "'resume': jsonrq.resume_torrent(sys.argv[2]) elif sys.argv[1] == 'delete' and xbmcgui.Dialog().yesno( _('Confirm delete'),", "delete'), _('Do you really want to delete the torrent?')): jsonrq.remove_torrent(sys.argv[2],", "_('state: {0}; seeds: {1}; peers: {2}').format( torr_info['state'], torr_info['num_seeds'], torr_info['num_peers'] ),", "not info_dialog.iscanceled(): info_dialog.update(torr_info['progress'], _('state: {0}; seeds: {1}; peers: {2}').format( torr_info['state'],", "sys.argv[1] == 'pause_all': jsonrq.pause_all() elif sys.argv[1] == 'resume_all': jsonrq.resume_all() elif", "{0}MB; total UL: {1}MB').format( torr_info['total_download'], torr_info['total_upload']) ) xbmc.sleep(1000) torr_info =", "addon = Addon('plugin.video.yatp') _ = addon.initialize_gettext() def show_torrent_info(info_hash): \"\"\" Display", "peers: {2}').format( torr_info['state'], torr_info['num_seeds'], torr_info['num_peers'] ), _('size: {0}MB; DL speed:", "sys.argv[1] == 'restore_finished': jsonrq.restore_finished(sys.argv[2]) else: addon.log_debug('Command cancelled or invalid command:", "import sys import xbmc import xbmcgui import json_requests as jsonrq", "sys.argv[1] == 'resume': jsonrq.resume_torrent(sys.argv[2]) elif sys.argv[1] == 'delete' and xbmcgui.Dialog().yesno(", "elif sys.argv[1] == 'delete' and xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do you", "jsonrq from simpleplugin import Addon addon = Addon('plugin.video.yatp') _ =", "_('Do you really want to delete the torrent with files?'),", "delete'), _('Do you really want to delete the torrent with", "DL speed: {1}KB/s; UL speed: {2}KB/s').format( torr_info['size'], torr_info['dl_speed'], torr_info['ul_speed'] ),", "want to delete the torrent with files?'), _('Warning: The files", "torr_info['ul_speed'] ), _('total DL: {0}MB; total UL: {1}MB').format( torr_info['total_download'], torr_info['total_upload'])", "elif sys.argv[1] == 'resume_all': jsonrq.resume_all() elif sys.argv[1] == 'show_info': show_torrent_info(sys.argv[2])", "torrent with files?'), _('Warning: The files will be deleted permanently!')):", "'delete' and xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do you really want to", "== 'pause': jsonrq.pause_torrent(sys.argv[2]) elif sys.argv[1] == 'resume': jsonrq.resume_torrent(sys.argv[2]) elif sys.argv[1]", "sys.argv[1] == 'delete_with_files'and xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do you really want", "Context menu commands \"\"\" import sys import xbmc import xbmcgui", "import Addon addon = Addon('plugin.video.yatp') _ = addon.initialize_gettext() def show_torrent_info(info_hash):", "= jsonrq.get_torrent_info(info_hash) if __name__ == '__main__': if sys.argv[1] == 'pause':", "import xbmcgui import json_requests as jsonrq from simpleplugin import Addon", "_('Confirm delete'), _('Do you really want to delete the torrent", "DL: {0}MB; total UL: {1}MB').format( torr_info['total_download'], torr_info['total_upload']) ) xbmc.sleep(1000) torr_info", "<filename>plugin.video.yatp/libs/client/commands.py<gh_stars>10-100 # coding: utf-8 # Module: commands # Created on:", "elif sys.argv[1] == 'resume': jsonrq.resume_torrent(sys.argv[2]) elif sys.argv[1] == 'delete' and", "and xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do you really want to delete", "# Created on: 28.07.2015 # Author: <NAME> aka <NAME>. (<EMAIL>)", "menu commands \"\"\" import sys import xbmc import xbmcgui import", "addon.initialize_gettext() def show_torrent_info(info_hash): \"\"\" Display current torrent info :param info_hash:", "json_requests as jsonrq from simpleplugin import Addon addon = Addon('plugin.video.yatp')", "will be deleted permanently!')): jsonrq.remove_torrent(sys.argv[2], True) elif sys.argv[1] == 'pause_all':", ":return: \"\"\" torr_info = jsonrq.get_torrent_info(info_hash) info_dialog = xbmcgui.DialogProgress() info_dialog.create(torr_info['name']) while", "info_hash: :return: \"\"\" torr_info = jsonrq.get_torrent_info(info_hash) info_dialog = xbmcgui.DialogProgress() info_dialog.create(torr_info['name'])", "'pause_all': jsonrq.pause_all() elif sys.argv[1] == 'resume_all': jsonrq.resume_all() elif sys.argv[1] ==", "# Module: commands # Created on: 28.07.2015 # Author: <NAME>", "deleted permanently!')): jsonrq.remove_torrent(sys.argv[2], True) elif sys.argv[1] == 'pause_all': jsonrq.pause_all() elif", "elif sys.argv[1] == 'show_info': show_torrent_info(sys.argv[2]) elif sys.argv[1] == 'restore_finished': jsonrq.restore_finished(sys.argv[2])", "_('Confirm delete'), _('Do you really want to delete the torrent?')):", "really want to delete the torrent with files?'), _('Warning: The", "jsonrq.pause_torrent(sys.argv[2]) elif sys.argv[1] == 'resume': jsonrq.resume_torrent(sys.argv[2]) elif sys.argv[1] == 'delete'", "{0}; seeds: {1}; peers: {2}').format( torr_info['state'], torr_info['num_seeds'], torr_info['num_peers'] ), _('size:", "torr_info['total_upload']) ) xbmc.sleep(1000) torr_info = jsonrq.get_torrent_info(info_hash) if __name__ == '__main__':", "on: 28.07.2015 # Author: <NAME> aka <NAME>. (<EMAIL>) # Licence:", "The files will be deleted permanently!')): jsonrq.remove_torrent(sys.argv[2], True) elif sys.argv[1]", "import json_requests as jsonrq from simpleplugin import Addon addon =", "info_dialog.create(torr_info['name']) while not info_dialog.iscanceled(): info_dialog.update(torr_info['progress'], _('state: {0}; seeds: {1}; peers:", "show_torrent_info(info_hash): \"\"\" Display current torrent info :param info_hash: :return: \"\"\"", "delete the torrent?')): jsonrq.remove_torrent(sys.argv[2], False) elif sys.argv[1] == 'delete_with_files'and xbmcgui.Dialog().yesno(", "current torrent info :param info_hash: :return: \"\"\" torr_info = jsonrq.get_torrent_info(info_hash)", "sys.argv[1] == 'pause': jsonrq.pause_torrent(sys.argv[2]) elif sys.argv[1] == 'resume': jsonrq.resume_torrent(sys.argv[2]) elif", "sys.argv[1] == 'show_info': show_torrent_info(sys.argv[2]) elif sys.argv[1] == 'restore_finished': jsonrq.restore_finished(sys.argv[2]) else:", "_('Do you really want to delete the torrent?')): jsonrq.remove_torrent(sys.argv[2], False)", "torrent?')): jsonrq.remove_torrent(sys.argv[2], False) elif sys.argv[1] == 'delete_with_files'and xbmcgui.Dialog().yesno( _('Confirm delete'),", "while not info_dialog.iscanceled(): info_dialog.update(torr_info['progress'], _('state: {0}; seeds: {1}; peers: {2}').format(", "elif sys.argv[1] == 'delete_with_files'and xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do you really", "== 'delete_with_files'and xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do you really want to", "as jsonrq from simpleplugin import Addon addon = Addon('plugin.video.yatp') _", "to delete the torrent?')): jsonrq.remove_torrent(sys.argv[2], False) elif sys.argv[1] == 'delete_with_files'and", "False) elif sys.argv[1] == 'delete_with_files'and xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do you", "'show_info': show_torrent_info(sys.argv[2]) elif sys.argv[1] == 'restore_finished': jsonrq.restore_finished(sys.argv[2]) else: addon.log_debug('Command cancelled", "_('total DL: {0}MB; total UL: {1}MB').format( torr_info['total_download'], torr_info['total_upload']) ) xbmc.sleep(1000)", "UL: {1}MB').format( torr_info['total_download'], torr_info['total_upload']) ) xbmc.sleep(1000) torr_info = jsonrq.get_torrent_info(info_hash) if", "to delete the torrent with files?'), _('Warning: The files will", "__name__ == '__main__': if sys.argv[1] == 'pause': jsonrq.pause_torrent(sys.argv[2]) elif sys.argv[1]", "'delete_with_files'and xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do you really want to delete", "GPL v.3: http://www.gnu.org/copyleft/gpl.html \"\"\" Context menu commands \"\"\" import sys", "permanently!')): jsonrq.remove_torrent(sys.argv[2], True) elif sys.argv[1] == 'pause_all': jsonrq.pause_all() elif sys.argv[1]", "(<EMAIL>) # Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html \"\"\" Context menu commands", "\"\"\" torr_info = jsonrq.get_torrent_info(info_hash) info_dialog = xbmcgui.DialogProgress() info_dialog.create(torr_info['name']) while not", "the torrent?')): jsonrq.remove_torrent(sys.argv[2], False) elif sys.argv[1] == 'delete_with_files'and xbmcgui.Dialog().yesno( _('Confirm", "Created on: 28.07.2015 # Author: <NAME> aka <NAME>. (<EMAIL>) #", "\"\"\" Display current torrent info :param info_hash: :return: \"\"\" torr_info", "Module: commands # Created on: 28.07.2015 # Author: <NAME> aka", "aka <NAME>. (<EMAIL>) # Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html \"\"\" Context" ]
[ "from distutils.core import setup setup(name='Mimik', version='1.0', description='Python framework for markov", "import setup setup(name='Mimik', version='1.0', description='Python framework for markov models', author='<NAME>',", "version='1.0', description='Python framework for markov models', author='<NAME>', author_email='<EMAIL>', url='https://www.python.org/sigs/distutils-sig/', packages=['distutils',", "python from distutils.core import setup setup(name='Mimik', version='1.0', description='Python framework for", "setup setup(name='Mimik', version='1.0', description='Python framework for markov models', author='<NAME>', author_email='<EMAIL>',", "description='Python framework for markov models', author='<NAME>', author_email='<EMAIL>', url='https://www.python.org/sigs/distutils-sig/', packages=['distutils', 'distutils.command'],", "setup(name='Mimik', version='1.0', description='Python framework for markov models', author='<NAME>', author_email='<EMAIL>', url='https://www.python.org/sigs/distutils-sig/',", "#!/usr/bin/env python from distutils.core import setup setup(name='Mimik', version='1.0', description='Python framework", "distutils.core import setup setup(name='Mimik', version='1.0', description='Python framework for markov models',", "framework for markov models', author='<NAME>', author_email='<EMAIL>', url='https://www.python.org/sigs/distutils-sig/', packages=['distutils', 'distutils.command'], )" ]
[ "Open Images dataset used for machine learning training. The image", "from their Flickr server source, verified for fixity, had EXIF", "size, and timestamp to the workflow dbase utctime = datetime.utcnow()", "images[-1] path, dirs, files = next(os.walk(batch_dir)) for file in files:", "their Flickr server source, verified for fixity, had EXIF metadata", "get the package size back under threshold images.pop(-1) except Exception", "open_images SET package_name = ? WHERE ImageID = ?\", (tarball_name,", "image files have been downloaded from their Flickr server source,", "batch source directory.\") except OSError as e: print(\"Unable to delete", "+= image[1] print(\"Total batch size: \" + get_human_readable_file_size(package_size)) if package_size", "for file in files: if file.find(last_image[0]) != -1: filepath =", "print(e) # record the tarball package name for each image", "image[0],),) cursor.execute(\"INSERT INTO packages (name, size, timestamp) VALUES (?,?,?)\", (tarball_name,", "= size / 1024.0 # apply the division return \"%.*f", "print(\"Created tarball \" + tarball_name + \".\") except Exception as", "size: \" + get_human_readable_file_size(package_size)) if package_size < package_threshold: print(\"Not enough", "= os.path.join( abs_path, \"deplatformr_open_images_workflow.sqlite\") workflow_db = sqlite3.connect(db_path) cursor = workflow_db.cursor()", "their annotation data, segmentation files and newly generated sha512 checksums.", "next power of 2 = 1GiB print(\"Package threshold: \" +", "Exception as e: print(\"Unable to create a package for batch", "in a sidecar metadata files using schema.org/ImageObject and JSON-LD format.',", "a subset of the Google Open Images dataset used for", "and are now bundled here with their annotation data, segmentation", "\"GiB\", \"TiB\"] suffixIndex = 0 while size > 1024 and", "0 while size > 1024 and suffixIndex < 4: suffixIndex", "= 838860800 # 800 Mib to the next power of", "new batch directory split = os.path.split(batch_dir) new_dir_number = int(split[1]) +", "return() else: try: # create new batch directory split =", "?\", (tarball_name, image[0],),) cursor.execute(\"INSERT INTO packages (name, size, timestamp) VALUES", "and newly generated sha512 checksums. This content and context is", "SET package_name = ? WHERE ImageID = ?\", (tarball_name, image[0],),)", "is described in a sidecar metadata files using schema.org/ImageObject and", "package size back under threshold images.pop(-1) except Exception as e:", "# increment the index of the suffix size = size", "and timestamp to the workflow dbase utctime = datetime.utcnow() tarball_size", "(name, size, timestamp) VALUES (?,?,?)\", (tarball_name, tarball_size, utctime,),) workflow_db.commit() workflow_db.close()", "images.pop(-1) except Exception as e: print(\"Unable to separate batch to", "increment the index of the suffix size = size /", "\"deplatformr-open-images-\" + split[1] bagit.make_bag(batch_dir, {'Source-Organization': 'Deplatformr Project', 'Organization-Address': 'https://open-images.deplatformr.com', 'External-Description':", "and context is described in a sidecar metadata files using", "\"source_data/deplatformr_open_images_v6.sqlite\") images_db = sqlite3.connect(db_path) cursor = images_db.cursor() for image in", "new_batch_dir = os.path.join(split[0], str(new_dir_number)) os.makedirs(new_batch_dir) # move all related files", "keep within threshold last_image = images[-1] path, dirs, files =", "if package_size < package_threshold: print(\"Not enough images yet to make", "except Exception as e: print(\"Unable to create a tarball package", "files: if file.find(last_image[0]) != -1: filepath = os.path.join(path, file) shutil.move(filepath,", "for image in images: cursor.execute(\"UPDATE open_images SET package_name = ?", "path, dirs, files = next(os.walk(batch_dir)) for file in files: if", "get_human_readable_file_size(package_size)) if package_size < package_threshold: print(\"Not enough images yet to", "cursor = workflow_db.cursor() for image in images: print(\"Linking image \"", "package_size += image[1] print(\"Total batch size: \" + get_human_readable_file_size(package_size)) if", "directory split = os.path.split(batch_dir) new_dir_number = int(split[1]) + 1 new_batch_dir", "int(split[1]) + 1 new_batch_dir = os.path.join(split[0], str(new_dir_number)) os.makedirs(new_batch_dir) # move", "= external_identifier + \".tar\" tarball = tarfile.open(os.path.join( packages_dir, tarball_name), \"w\")", "new_batch_dir, file)) # drop the last image from the list", "file in files: if file.find(last_image[0]) != -1: filepath = os.path.join(path,", "+ \" to \" + tarball_name + \" in SQLite.\")", "within threshold last_image = images[-1] path, dirs, files = next(os.walk(batch_dir))", "print(\"Linking image \" + image[0] + \" to \" +", "tarball_size = os.path.getsize( os.path.join(packages_dir, tarball_name)) print(\"Tarball size is: \" +", "'This package contains a subset of the Google Open Images", "image from the list (convert tuple) to get the package", "separate batch to make a package.\") print(e) return() # Convert", "Bagit directory.\") try: # Create the tar package packages_dir =", "\" + tarball_name + \" in SQLite.\") cursor.execute( \"UPDATE images", "= images_db.cursor() for image in images: cursor.execute(\"UPDATE open_images SET package_name", "0 for image in images: package_size += image[1] print(\"Total batch", "= sqlite3.connect(db_path) cursor = images_db.cursor() for image in images: cursor.execute(\"UPDATE", "precision=2): suffixes = [\"B\", \"KiB\", \"MiB\", \"GiB\", \"TiB\"] suffixIndex =", "datetime import datetime import bagit def create_package(images, batch_dir): package_threshold =", "record the tarball package name for each image db_path =", "external_identifier = \"deplatformr-open-images-\" + split[1] bagit.make_bag(batch_dir, {'Source-Organization': 'Deplatformr Project', 'Organization-Address':", "a sidecar metadata files using schema.org/ImageObject and JSON-LD format.', 'External-Identifier':", "print(\"Unable to create a tarball package from batch.\") print(e) return()", "timestamp) VALUES (?,?,?)\", (tarball_name, tarball_size, utctime,),) workflow_db.commit() workflow_db.close() except Exception", "# record the tarball package name for each image db_path", "image that's getting removed from batch to keep within threshold", "shutil import sqlite3 import tarfile from datetime import datetime import", "'External-Identifier': external_identifier, 'License': 'https://creativecommons.org/licenses/by/2.0/'}, checksums=[\"sha512\"]) print(\"Created a Bagit directory.\") try:", "WHERE image_id = ?\", (tarball_name, image[0],),) cursor.execute(\"INSERT INTO packages (name,", "(convert tuple) to get the package size back under threshold", "= ? WHERE ImageID = ?\", (tarball_name, image[0],),) images_db.commit() images_db.close()", "a Bagit directory external_identifier = \"deplatformr-open-images-\" + split[1] bagit.make_bag(batch_dir, {'Source-Organization':", "'External-Description': 'This package contains a subset of the Google Open", "dbase utctime = datetime.utcnow() tarball_size = os.path.getsize( os.path.join(packages_dir, tarball_name)) print(\"Tarball", "been downloaded from their Flickr server source, verified for fixity,", "size / 1024.0 # apply the division return \"%.*f %s\"", "= 0 for image in images: package_size += image[1] print(\"Total", "contains a subset of the Google Open Images dataset used", "= os.path.join(split[0], str(new_dir_number)) os.makedirs(new_batch_dir) # move all related files for", "+ split[1] bagit.make_bag(batch_dir, {'Source-Organization': 'Deplatformr Project', 'Organization-Address': 'https://open-images.deplatformr.com', 'External-Description': 'This", "abs_path, \"deplatformr_open_images_workflow.sqlite\") workflow_db = sqlite3.connect(db_path) cursor = workflow_db.cursor() for image", "= tarfile.open(os.path.join( packages_dir, tarball_name), \"w\") tarball.add(batch_dir, arcname=external_identifier) tarball.close() print(\"Created tarball", "# Convert batch directory into a Bagit directory external_identifier =", "the Google Open Images dataset used for machine learning training.", "machine learning training. The image files have been downloaded from", "package_threshold = 838860800 # 800 Mib to the next power", "from this batch.\") return() else: try: # create new batch", "image in images: cursor.execute(\"UPDATE open_images SET package_name = ? WHERE", "db_path = os.path.join( abs_path, \"deplatformr_open_images_workflow.sqlite\") workflow_db = sqlite3.connect(db_path) cursor =", "print(\"Not enough images yet to make a package from this", "related files for the last image that's getting removed from", "[\"B\", \"KiB\", \"MiB\", \"GiB\", \"TiB\"] suffixIndex = 0 while size", "create a package for batch directory \" + batch_dir) print(e)", "cursor = images_db.cursor() for image in images: cursor.execute(\"UPDATE open_images SET", "the suffix size = size / 1024.0 # apply the", "filepath = os.path.join(path, file) shutil.move(filepath, os.path.join( new_batch_dir, file)) # drop", "\" to \" + tarball_name + \" in SQLite.\") cursor.execute(", "= workflow_db.cursor() for image in images: print(\"Linking image \" +", "tar package packages_dir = os.path.join( os.getcwd(), \"source_data/packages/\") tarball_name = external_identifier", "datetime import bagit def create_package(images, batch_dir): package_threshold = 838860800 #", "print(\"Unable to delete the source directory.\") print(e) # record the", "package from this batch.\") return() else: try: # create new", "= \"deplatformr-open-images-\" + split[1] bagit.make_bag(batch_dir, {'Source-Organization': 'Deplatformr Project', 'Organization-Address': 'https://open-images.deplatformr.com',", "> 1024 and suffixIndex < 4: suffixIndex += 1 #", "OSError as e: print(\"Unable to delete the source directory.\") print(e)", "+ get_human_readable_file_size(package_size)) if package_size < package_threshold: print(\"Not enough images yet", "image \" + image[0] + \" to \" + tarball_name", "getting removed from batch to keep within threshold last_image =", "JSON-LD format.', 'External-Identifier': external_identifier, 'License': 'https://creativecommons.org/licenses/by/2.0/'}, checksums=[\"sha512\"]) print(\"Created a Bagit", "try: shutil.rmtree(batch_dir) print(\"Deleted the batch source directory.\") except OSError as", "dataset used for machine learning training. The image files have", "threshold: \" + get_human_readable_file_size(package_threshold)) abs_path = os.getcwd() try: package_size =", "index of the suffix size = size / 1024.0 #", "the list (convert tuple) to get the package size back", "directory \" + batch_dir) print(e) def get_human_readable_file_size(size, precision=2): suffixes =", "import bagit def create_package(images, batch_dir): package_threshold = 838860800 # 800", "1024.0 # apply the division return \"%.*f %s\" % (precision,", "packages_dir = os.path.join( os.getcwd(), \"source_data/packages/\") tarball_name = external_identifier + \".tar\"", "< 4: suffixIndex += 1 # increment the index of", "package_name = ? WHERE image_id = ?\", (tarball_name, image[0],),) cursor.execute(\"INSERT", "return() # Convert batch directory into a Bagit directory external_identifier", "batch_dir) print(e) def get_human_readable_file_size(size, precision=2): suffixes = [\"B\", \"KiB\", \"MiB\",", "workflow_db.cursor() for image in images: print(\"Linking image \" + image[0]", "workflow dbase utctime = datetime.utcnow() tarball_size = os.path.getsize( os.path.join(packages_dir, tarball_name))", "to make a package from this batch.\") return() else: try:", "VALUES (?,?,?)\", (tarball_name, tarball_size, utctime,),) workflow_db.commit() workflow_db.close() except Exception as", "abs_path = os.getcwd() try: package_size = 0 for image in", "files and newly generated sha512 checksums. This content and context", "tarball package name for each image db_path = os.path.join( abs_path,", "the last image from the list (convert tuple) to get", "each image db_path = os.path.join( abs_path, \"source_data/deplatformr_open_images_v6.sqlite\") images_db = sqlite3.connect(db_path)", "try: package_size = 0 for image in images: package_size +=", "Bagit directory external_identifier = \"deplatformr-open-images-\" + split[1] bagit.make_bag(batch_dir, {'Source-Organization': 'Deplatformr", "sha512 checksums. This content and context is described in a", "\"KiB\", \"MiB\", \"GiB\", \"TiB\"] suffixIndex = 0 while size >", "utctime = datetime.utcnow() tarball_size = os.path.getsize( os.path.join(packages_dir, tarball_name)) print(\"Tarball size", "context is described in a sidecar metadata files using schema.org/ImageObject", "segmentation files and newly generated sha512 checksums. This content and", "threshold last_image = images[-1] path, dirs, files = next(os.walk(batch_dir)) for", "INTO packages (name, size, timestamp) VALUES (?,?,?)\", (tarball_name, tarball_size, utctime,),)", "This content and context is described in a sidecar metadata", "SET package_name = ? WHERE image_id = ?\", (tarball_name, image[0],),)", "the batch source directory.\") except OSError as e: print(\"Unable to", "tarball = tarfile.open(os.path.join( packages_dir, tarball_name), \"w\") tarball.add(batch_dir, arcname=external_identifier) tarball.close() print(\"Created", "and suffixIndex < 4: suffixIndex += 1 # increment the", "annotation data, segmentation files and newly generated sha512 checksums. This", "!= -1: filepath = os.path.join(path, file) shutil.move(filepath, os.path.join( new_batch_dir, file))", "server source, verified for fixity, had EXIF metadata extracted, and", "source, verified for fixity, had EXIF metadata extracted, and are", "image[1] print(\"Total batch size: \" + get_human_readable_file_size(package_size)) if package_size <", "tarball_size, utctime,),) workflow_db.commit() workflow_db.close() except Exception as e: print(\"Unable to", "of the suffix size = size / 1024.0 # apply", "all related files for the last image that's getting removed", "batch.\") return() else: try: # create new batch directory split", "the source directory.\") print(e) # record the tarball package name", "to get the package size back under threshold images.pop(-1) except", "SQLite.\") cursor.execute( \"UPDATE images SET package_name = ? WHERE image_id", "os.makedirs(new_batch_dir) # move all related files for the last image", "to make a package.\") print(e) return() # Convert batch directory", "subset of the Google Open Images dataset used for machine", "size is: \" + get_human_readable_file_size(tarball_size)) db_path = os.path.join( abs_path, \"deplatformr_open_images_workflow.sqlite\")", "def create_package(images, batch_dir): package_threshold = 838860800 # 800 Mib to", "# apply the division return \"%.*f %s\" % (precision, size,", "directory.\") except OSError as e: print(\"Unable to delete the source", "'https://creativecommons.org/licenses/by/2.0/'}, checksums=[\"sha512\"]) print(\"Created a Bagit directory.\") try: # Create the", "as e: print(\"Unable to create a tarball package from batch.\")", "os.getcwd(), \"source_data/packages/\") tarball_name = external_identifier + \".tar\" tarball = tarfile.open(os.path.join(", "for the last image that's getting removed from batch to", "drop the last image from the list (convert tuple) to", "# add tarball name, size, and timestamp to the workflow", "for fixity, had EXIF metadata extracted, and are now bundled", "checksums=[\"sha512\"]) print(\"Created a Bagit directory.\") try: # Create the tar", "files using schema.org/ImageObject and JSON-LD format.', 'External-Identifier': external_identifier, 'License': 'https://creativecommons.org/licenses/by/2.0/'},", "\".tar\" tarball = tarfile.open(os.path.join( packages_dir, tarball_name), \"w\") tarball.add(batch_dir, arcname=external_identifier) tarball.close()", "\"MiB\", \"GiB\", \"TiB\"] suffixIndex = 0 while size > 1024", "Project', 'Organization-Address': 'https://open-images.deplatformr.com', 'External-Description': 'This package contains a subset of", "a package from this batch.\") return() else: try: # create", "+ \".tar\" tarball = tarfile.open(os.path.join( packages_dir, tarball_name), \"w\") tarball.add(batch_dir, arcname=external_identifier)", "get_human_readable_file_size(package_threshold)) abs_path = os.getcwd() try: package_size = 0 for image", "\" in SQLite.\") cursor.execute( \"UPDATE images SET package_name = ?", "tarball_name)) print(\"Tarball size is: \" + get_human_readable_file_size(tarball_size)) db_path = os.path.join(", "packages_dir, tarball_name), \"w\") tarball.add(batch_dir, arcname=external_identifier) tarball.close() print(\"Created tarball \" +", "= os.getcwd() try: package_size = 0 for image in images:", "shutil.rmtree(batch_dir) print(\"Deleted the batch source directory.\") except OSError as e:", "external_identifier + \".tar\" tarball = tarfile.open(os.path.join( packages_dir, tarball_name), \"w\") tarball.add(batch_dir,", "images_db.close() # add tarball name, size, and timestamp to the", "print(\"Tarball size is: \" + get_human_readable_file_size(tarball_size)) db_path = os.path.join( abs_path,", "downloaded from their Flickr server source, verified for fixity, had", "EXIF metadata extracted, and are now bundled here with their", "of the Google Open Images dataset used for machine learning", "Create the tar package packages_dir = os.path.join( os.getcwd(), \"source_data/packages/\") tarball_name", "image_id = ?\", (tarball_name, image[0],),) cursor.execute(\"INSERT INTO packages (name, size,", "package_size = 0 for image in images: package_size += image[1]", "import os import shutil import sqlite3 import tarfile from datetime", "training. The image files have been downloaded from their Flickr", "package for batch directory \" + batch_dir) print(e) def get_human_readable_file_size(size,", "suffixes = [\"B\", \"KiB\", \"MiB\", \"GiB\", \"TiB\"] suffixIndex = 0", "from batch to keep within threshold last_image = images[-1] path,", "os.path.join(packages_dir, tarball_name)) print(\"Tarball size is: \" + get_human_readable_file_size(tarball_size)) db_path =", "import shutil import sqlite3 import tarfile from datetime import datetime", "= os.path.join(path, file) shutil.move(filepath, os.path.join( new_batch_dir, file)) # drop the", "external_identifier, 'License': 'https://creativecommons.org/licenses/by/2.0/'}, checksums=[\"sha512\"]) print(\"Created a Bagit directory.\") try: #", "images: cursor.execute(\"UPDATE open_images SET package_name = ? WHERE ImageID =", "\" + image[0] + \" to \" + tarball_name +", "tarball package from batch.\") print(e) return() try: shutil.rmtree(batch_dir) print(\"Deleted the", "4: suffixIndex += 1 # increment the index of the", "to create a package for batch directory \" + batch_dir)", "in images: package_size += image[1] print(\"Total batch size: \" +", "the package size back under threshold images.pop(-1) except Exception as", "print(e) return() try: shutil.rmtree(batch_dir) print(\"Deleted the batch source directory.\") except", "package.\") print(e) return() # Convert batch directory into a Bagit", "(tarball_name, image[0],),) images_db.commit() images_db.close() # add tarball name, size, and", "and JSON-LD format.', 'External-Identifier': external_identifier, 'License': 'https://creativecommons.org/licenses/by/2.0/'}, checksums=[\"sha512\"]) print(\"Created a", "sidecar metadata files using schema.org/ImageObject and JSON-LD format.', 'External-Identifier': external_identifier,", "delete the source directory.\") print(e) # record the tarball package", "bagit.make_bag(batch_dir, {'Source-Organization': 'Deplatformr Project', 'Organization-Address': 'https://open-images.deplatformr.com', 'External-Description': 'This package contains", "directory into a Bagit directory external_identifier = \"deplatformr-open-images-\" + split[1]", "os.path.getsize( os.path.join(packages_dir, tarball_name)) print(\"Tarball size is: \" + get_human_readable_file_size(tarball_size)) db_path", "to the next power of 2 = 1GiB print(\"Package threshold:", "os.path.join( abs_path, \"source_data/deplatformr_open_images_v6.sqlite\") images_db = sqlite3.connect(db_path) cursor = images_db.cursor() for", "into a Bagit directory external_identifier = \"deplatformr-open-images-\" + split[1] bagit.make_bag(batch_dir,", "files = next(os.walk(batch_dir)) for file in files: if file.find(last_image[0]) !=", "batch size: \" + get_human_readable_file_size(package_size)) if package_size < package_threshold: print(\"Not", "as e: print(\"Unable to create a package for batch directory", "print(\"Unable to create a package for batch directory \" +", "tarball name, size, and timestamp to the workflow dbase utctime", "directory.\") try: # Create the tar package packages_dir = os.path.join(", "= os.path.join( abs_path, \"source_data/deplatformr_open_images_v6.sqlite\") images_db = sqlite3.connect(db_path) cursor = images_db.cursor()", "size > 1024 and suffixIndex < 4: suffixIndex += 1", "# create new batch directory split = os.path.split(batch_dir) new_dir_number =", "import datetime import bagit def create_package(images, batch_dir): package_threshold = 838860800", "size, timestamp) VALUES (?,?,?)\", (tarball_name, tarball_size, utctime,),) workflow_db.commit() workflow_db.close() except", "apply the division return \"%.*f %s\" % (precision, size, suffixes[suffixIndex])", "838860800 # 800 Mib to the next power of 2", "images_db.commit() images_db.close() # add tarball name, size, and timestamp to", "print(e) def get_human_readable_file_size(size, precision=2): suffixes = [\"B\", \"KiB\", \"MiB\", \"GiB\",", "= [\"B\", \"KiB\", \"MiB\", \"GiB\", \"TiB\"] suffixIndex = 0 while", "last_image = images[-1] path, dirs, files = next(os.walk(batch_dir)) for file", "\".\") except Exception as e: print(\"Unable to create a tarball", "cursor.execute(\"INSERT INTO packages (name, size, timestamp) VALUES (?,?,?)\", (tarball_name, tarball_size,", "make a package from this batch.\") return() else: try: #", "= os.path.join( os.getcwd(), \"source_data/packages/\") tarball_name = external_identifier + \".tar\" tarball", "create new batch directory split = os.path.split(batch_dir) new_dir_number = int(split[1])", "under threshold images.pop(-1) except Exception as e: print(\"Unable to separate", "return() try: shutil.rmtree(batch_dir) print(\"Deleted the batch source directory.\") except OSError", "print(e) return() # Convert batch directory into a Bagit directory", "def get_human_readable_file_size(size, precision=2): suffixes = [\"B\", \"KiB\", \"MiB\", \"GiB\", \"TiB\"]", "'Deplatformr Project', 'Organization-Address': 'https://open-images.deplatformr.com', 'External-Description': 'This package contains a subset", "checksums. This content and context is described in a sidecar", "tarball_name), \"w\") tarball.add(batch_dir, arcname=external_identifier) tarball.close() print(\"Created tarball \" + tarball_name", "os.path.join(path, file) shutil.move(filepath, os.path.join( new_batch_dir, file)) # drop the last", "file) shutil.move(filepath, os.path.join( new_batch_dir, file)) # drop the last image", "the tarball package name for each image db_path = os.path.join(", "1 new_batch_dir = os.path.join(split[0], str(new_dir_number)) os.makedirs(new_batch_dir) # move all related", "schema.org/ImageObject and JSON-LD format.', 'External-Identifier': external_identifier, 'License': 'https://creativecommons.org/licenses/by/2.0/'}, checksums=[\"sha512\"]) print(\"Created", "metadata extracted, and are now bundled here with their annotation", "in images: cursor.execute(\"UPDATE open_images SET package_name = ? WHERE ImageID", "= 0 while size > 1024 and suffixIndex < 4:", "from datetime import datetime import bagit def create_package(images, batch_dir): package_threshold", "'License': 'https://creativecommons.org/licenses/by/2.0/'}, checksums=[\"sha512\"]) print(\"Created a Bagit directory.\") try: # Create", "(?,?,?)\", (tarball_name, tarball_size, utctime,),) workflow_db.commit() workflow_db.close() except Exception as e:", "now bundled here with their annotation data, segmentation files and", "except Exception as e: print(\"Unable to create a package for", "e: print(\"Unable to delete the source directory.\") print(e) # record", "images yet to make a package from this batch.\") return()", "as e: print(\"Unable to delete the source directory.\") print(e) #", "+ 1 new_batch_dir = os.path.join(split[0], str(new_dir_number)) os.makedirs(new_batch_dir) # move all", "Exception as e: print(\"Unable to create a tarball package from", "learning training. The image files have been downloaded from their", "tarball \" + tarball_name + \".\") except Exception as e:", "Images dataset used for machine learning training. The image files", "bundled here with their annotation data, segmentation files and newly", "from the list (convert tuple) to get the package size", "package_size < package_threshold: print(\"Not enough images yet to make a", "data, segmentation files and newly generated sha512 checksums. This content", "1GiB print(\"Package threshold: \" + get_human_readable_file_size(package_threshold)) abs_path = os.getcwd() try:", "for each image db_path = os.path.join( abs_path, \"source_data/deplatformr_open_images_v6.sqlite\") images_db =", "workflow_db.commit() workflow_db.close() except Exception as e: print(\"Unable to create a", "package from batch.\") print(e) return() try: shutil.rmtree(batch_dir) print(\"Deleted the batch", "size back under threshold images.pop(-1) except Exception as e: print(\"Unable", "package contains a subset of the Google Open Images dataset", "file.find(last_image[0]) != -1: filepath = os.path.join(path, file) shutil.move(filepath, os.path.join( new_batch_dir,", "{'Source-Organization': 'Deplatformr Project', 'Organization-Address': 'https://open-images.deplatformr.com', 'External-Description': 'This package contains a", "to keep within threshold last_image = images[-1] path, dirs, files", "for machine learning training. The image files have been downloaded", "image[0],),) images_db.commit() images_db.close() # add tarball name, size, and timestamp", "'https://open-images.deplatformr.com', 'External-Description': 'This package contains a subset of the Google", "# Create the tar package packages_dir = os.path.join( os.getcwd(), \"source_data/packages/\")", "tarball.close() print(\"Created tarball \" + tarball_name + \".\") except Exception", "package name for each image db_path = os.path.join( abs_path, \"source_data/deplatformr_open_images_v6.sqlite\")", "2 = 1GiB print(\"Package threshold: \" + get_human_readable_file_size(package_threshold)) abs_path =", "workflow_db.close() except Exception as e: print(\"Unable to create a package", "here with their annotation data, segmentation files and newly generated", "e: print(\"Unable to create a package for batch directory \"", "tarball_name + \" in SQLite.\") cursor.execute( \"UPDATE images SET package_name", "Exception as e: print(\"Unable to separate batch to make a", "e: print(\"Unable to separate batch to make a package.\") print(e)", "print(\"Unable to separate batch to make a package.\") print(e) return()", "sqlite3 import tarfile from datetime import datetime import bagit def", "'Organization-Address': 'https://open-images.deplatformr.com', 'External-Description': 'This package contains a subset of the", "= next(os.walk(batch_dir)) for file in files: if file.find(last_image[0]) != -1:", "name, size, and timestamp to the workflow dbase utctime =", "tarball_name + \".\") except Exception as e: print(\"Unable to create", "images_db.cursor() for image in images: cursor.execute(\"UPDATE open_images SET package_name =", "a tarball package from batch.\") print(e) return() try: shutil.rmtree(batch_dir) print(\"Deleted", "bagit def create_package(images, batch_dir): package_threshold = 838860800 # 800 Mib", "create a tarball package from batch.\") print(e) return() try: shutil.rmtree(batch_dir)", "cursor.execute(\"UPDATE open_images SET package_name = ? WHERE ImageID = ?\",", "last image from the list (convert tuple) to get the", "the last image that's getting removed from batch to keep", "removed from batch to keep within threshold last_image = images[-1]", "back under threshold images.pop(-1) except Exception as e: print(\"Unable to", "power of 2 = 1GiB print(\"Package threshold: \" + get_human_readable_file_size(package_threshold))", "with their annotation data, segmentation files and newly generated sha512", "image in images: package_size += image[1] print(\"Total batch size: \"", "is: \" + get_human_readable_file_size(tarball_size)) db_path = os.path.join( abs_path, \"deplatformr_open_images_workflow.sqlite\") workflow_db", "Flickr server source, verified for fixity, had EXIF metadata extracted,", "directory.\") print(e) # record the tarball package name for each", "str(new_dir_number)) os.makedirs(new_batch_dir) # move all related files for the last", "+= 1 # increment the index of the suffix size", "next(os.walk(batch_dir)) for file in files: if file.find(last_image[0]) != -1: filepath", "used for machine learning training. The image files have been", "arcname=external_identifier) tarball.close() print(\"Created tarball \" + tarball_name + \".\") except", "the index of the suffix size = size / 1024.0", "as e: print(\"Unable to separate batch to make a package.\")", "the division return \"%.*f %s\" % (precision, size, suffixes[suffixIndex]) return()", "files have been downloaded from their Flickr server source, verified", "to separate batch to make a package.\") print(e) return() #", "\" + get_human_readable_file_size(package_size)) if package_size < package_threshold: print(\"Not enough images", "last image that's getting removed from batch to keep within", "+ \".\") except Exception as e: print(\"Unable to create a", "tuple) to get the package size back under threshold images.pop(-1)", "extracted, and are now bundled here with their annotation data,", "to create a tarball package from batch.\") print(e) return() try:", "in images: print(\"Linking image \" + image[0] + \" to", "create_package(images, batch_dir): package_threshold = 838860800 # 800 Mib to the", "sqlite3.connect(db_path) cursor = images_db.cursor() for image in images: cursor.execute(\"UPDATE open_images", "fixity, had EXIF metadata extracted, and are now bundled here", "add tarball name, size, and timestamp to the workflow dbase", "\"UPDATE images SET package_name = ? WHERE image_id = ?\",", "\" + tarball_name + \".\") except Exception as e: print(\"Unable", "= ? WHERE image_id = ?\", (tarball_name, image[0],),) cursor.execute(\"INSERT INTO", "get_human_readable_file_size(size, precision=2): suffixes = [\"B\", \"KiB\", \"MiB\", \"GiB\", \"TiB\"] suffixIndex", "using schema.org/ImageObject and JSON-LD format.', 'External-Identifier': external_identifier, 'License': 'https://creativecommons.org/licenses/by/2.0/'}, checksums=[\"sha512\"])", "try: # create new batch directory split = os.path.split(batch_dir) new_dir_number", "new_dir_number = int(split[1]) + 1 new_batch_dir = os.path.join(split[0], str(new_dir_number)) os.makedirs(new_batch_dir)", "timestamp to the workflow dbase utctime = datetime.utcnow() tarball_size =", "of 2 = 1GiB print(\"Package threshold: \" + get_human_readable_file_size(package_threshold)) abs_path", "image[0] + \" to \" + tarball_name + \" in", "source directory.\") except OSError as e: print(\"Unable to delete the", "shutil.move(filepath, os.path.join( new_batch_dir, file)) # drop the last image from", "try: # Create the tar package packages_dir = os.path.join( os.getcwd(),", "suffixIndex = 0 while size > 1024 and suffixIndex <", "suffix size = size / 1024.0 # apply the division", "the next power of 2 = 1GiB print(\"Package threshold: \"", "\" + batch_dir) print(e) def get_human_readable_file_size(size, precision=2): suffixes = [\"B\",", "? WHERE ImageID = ?\", (tarball_name, image[0],),) images_db.commit() images_db.close() #", "+ batch_dir) print(e) def get_human_readable_file_size(size, precision=2): suffixes = [\"B\", \"KiB\",", "have been downloaded from their Flickr server source, verified for", "batch to keep within threshold last_image = images[-1] path, dirs,", "\"source_data/packages/\") tarball_name = external_identifier + \".tar\" tarball = tarfile.open(os.path.join( packages_dir,", "enough images yet to make a package from this batch.\")", "+ tarball_name + \".\") except Exception as e: print(\"Unable to", "are now bundled here with their annotation data, segmentation files", "verified for fixity, had EXIF metadata extracted, and are now", "# drop the last image from the list (convert tuple)", "\"w\") tarball.add(batch_dir, arcname=external_identifier) tarball.close() print(\"Created tarball \" + tarball_name +", "make a package.\") print(e) return() # Convert batch directory into", "# 800 Mib to the next power of 2 =", "name for each image db_path = os.path.join( abs_path, \"source_data/deplatformr_open_images_v6.sqlite\") images_db", "generated sha512 checksums. This content and context is described in", "for batch directory \" + batch_dir) print(e) def get_human_readable_file_size(size, precision=2):", "to the workflow dbase utctime = datetime.utcnow() tarball_size = os.path.getsize(", "file)) # drop the last image from the list (convert", "package packages_dir = os.path.join( os.getcwd(), \"source_data/packages/\") tarball_name = external_identifier +", "print(\"Package threshold: \" + get_human_readable_file_size(package_threshold)) abs_path = os.getcwd() try: package_size", "directory external_identifier = \"deplatformr-open-images-\" + split[1] bagit.make_bag(batch_dir, {'Source-Organization': 'Deplatformr Project',", "os.path.join(split[0], str(new_dir_number)) os.makedirs(new_batch_dir) # move all related files for the", "batch directory into a Bagit directory external_identifier = \"deplatformr-open-images-\" +", "os import shutil import sqlite3 import tarfile from datetime import", "batch_dir): package_threshold = 838860800 # 800 Mib to the next", "\"TiB\"] suffixIndex = 0 while size > 1024 and suffixIndex", "batch to make a package.\") print(e) return() # Convert batch", "while size > 1024 and suffixIndex < 4: suffixIndex +=", "+ tarball_name + \" in SQLite.\") cursor.execute( \"UPDATE images SET", "utctime,),) workflow_db.commit() workflow_db.close() except Exception as e: print(\"Unable to create", "Mib to the next power of 2 = 1GiB print(\"Package", "images: package_size += image[1] print(\"Total batch size: \" + get_human_readable_file_size(package_size))", "= os.path.split(batch_dir) new_dir_number = int(split[1]) + 1 new_batch_dir = os.path.join(split[0],", "os.getcwd() try: package_size = 0 for image in images: package_size", "/ 1024.0 # apply the division return \"%.*f %s\" %", "batch.\") print(e) return() try: shutil.rmtree(batch_dir) print(\"Deleted the batch source directory.\")", "packages (name, size, timestamp) VALUES (?,?,?)\", (tarball_name, tarball_size, utctime,),) workflow_db.commit()", "image in images: print(\"Linking image \" + image[0] + \"", "datetime.utcnow() tarball_size = os.path.getsize( os.path.join(packages_dir, tarball_name)) print(\"Tarball size is: \"", "\" + get_human_readable_file_size(tarball_size)) db_path = os.path.join( abs_path, \"deplatformr_open_images_workflow.sqlite\") workflow_db =", "sqlite3.connect(db_path) cursor = workflow_db.cursor() for image in images: print(\"Linking image", "workflow_db = sqlite3.connect(db_path) cursor = workflow_db.cursor() for image in images:", "os.path.join( os.getcwd(), \"source_data/packages/\") tarball_name = external_identifier + \".tar\" tarball =", "in SQLite.\") cursor.execute( \"UPDATE images SET package_name = ? WHERE", "print(\"Total batch size: \" + get_human_readable_file_size(package_size)) if package_size < package_threshold:", "import tarfile from datetime import datetime import bagit def create_package(images,", "for image in images: print(\"Linking image \" + image[0] +", "Google Open Images dataset used for machine learning training. The", "e: print(\"Unable to create a tarball package from batch.\") print(e)", "batch directory \" + batch_dir) print(e) def get_human_readable_file_size(size, precision=2): suffixes", "content and context is described in a sidecar metadata files", "= 1GiB print(\"Package threshold: \" + get_human_readable_file_size(package_threshold)) abs_path = os.getcwd()", "?\", (tarball_name, image[0],),) images_db.commit() images_db.close() # add tarball name, size,", "= os.path.getsize( os.path.join(packages_dir, tarball_name)) print(\"Tarball size is: \" + get_human_readable_file_size(tarball_size))", "? WHERE image_id = ?\", (tarball_name, image[0],),) cursor.execute(\"INSERT INTO packages", "metadata files using schema.org/ImageObject and JSON-LD format.', 'External-Identifier': external_identifier, 'License':", "split[1] bagit.make_bag(batch_dir, {'Source-Organization': 'Deplatformr Project', 'Organization-Address': 'https://open-images.deplatformr.com', 'External-Description': 'This package", "\"deplatformr_open_images_workflow.sqlite\") workflow_db = sqlite3.connect(db_path) cursor = workflow_db.cursor() for image in", "from batch.\") print(e) return() try: shutil.rmtree(batch_dir) print(\"Deleted the batch source", "tarball.add(batch_dir, arcname=external_identifier) tarball.close() print(\"Created tarball \" + tarball_name + \".\")", "else: try: # create new batch directory split = os.path.split(batch_dir)", "had EXIF metadata extracted, and are now bundled here with", "to delete the source directory.\") print(e) # record the tarball", "The image files have been downloaded from their Flickr server", "\" + get_human_readable_file_size(package_threshold)) abs_path = os.getcwd() try: package_size = 0", "a Bagit directory.\") try: # Create the tar package packages_dir", "os.path.join( new_batch_dir, file)) # drop the last image from the", "suffixIndex += 1 # increment the index of the suffix", "images SET package_name = ? WHERE image_id = ?\", (tarball_name,", "+ get_human_readable_file_size(package_threshold)) abs_path = os.getcwd() try: package_size = 0 for", "= sqlite3.connect(db_path) cursor = workflow_db.cursor() for image in images: print(\"Linking", "described in a sidecar metadata files using schema.org/ImageObject and JSON-LD", "= int(split[1]) + 1 new_batch_dir = os.path.join(split[0], str(new_dir_number)) os.makedirs(new_batch_dir) #", "+ \" in SQLite.\") cursor.execute( \"UPDATE images SET package_name =", "source directory.\") print(e) # record the tarball package name for", "to \" + tarball_name + \" in SQLite.\") cursor.execute( \"UPDATE", "os.path.join( abs_path, \"deplatformr_open_images_workflow.sqlite\") workflow_db = sqlite3.connect(db_path) cursor = workflow_db.cursor() for", "a package.\") print(e) return() # Convert batch directory into a", "format.', 'External-Identifier': external_identifier, 'License': 'https://creativecommons.org/licenses/by/2.0/'}, checksums=[\"sha512\"]) print(\"Created a Bagit directory.\")", "move all related files for the last image that's getting", "(tarball_name, tarball_size, utctime,),) workflow_db.commit() workflow_db.close() except Exception as e: print(\"Unable", "tarfile from datetime import datetime import bagit def create_package(images, batch_dir):", "tarfile.open(os.path.join( packages_dir, tarball_name), \"w\") tarball.add(batch_dir, arcname=external_identifier) tarball.close() print(\"Created tarball \"", "the workflow dbase utctime = datetime.utcnow() tarball_size = os.path.getsize( os.path.join(packages_dir,", "split = os.path.split(batch_dir) new_dir_number = int(split[1]) + 1 new_batch_dir =", "except OSError as e: print(\"Unable to delete the source directory.\")", "if file.find(last_image[0]) != -1: filepath = os.path.join(path, file) shutil.move(filepath, os.path.join(", "dirs, files = next(os.walk(batch_dir)) for file in files: if file.find(last_image[0])", "import sqlite3 import tarfile from datetime import datetime import bagit", "print(\"Deleted the batch source directory.\") except OSError as e: print(\"Unable", "= ?\", (tarball_name, image[0],),) images_db.commit() images_db.close() # add tarball name,", "for image in images: package_size += image[1] print(\"Total batch size:", "tarball_name = external_identifier + \".tar\" tarball = tarfile.open(os.path.join( packages_dir, tarball_name),", "Convert batch directory into a Bagit directory external_identifier = \"deplatformr-open-images-\"", "package_name = ? WHERE ImageID = ?\", (tarball_name, image[0],),) images_db.commit()", "threshold images.pop(-1) except Exception as e: print(\"Unable to separate batch", "db_path = os.path.join( abs_path, \"source_data/deplatformr_open_images_v6.sqlite\") images_db = sqlite3.connect(db_path) cursor =", "except Exception as e: print(\"Unable to separate batch to make", "abs_path, \"source_data/deplatformr_open_images_v6.sqlite\") images_db = sqlite3.connect(db_path) cursor = images_db.cursor() for image", "that's getting removed from batch to keep within threshold last_image", "cursor.execute( \"UPDATE images SET package_name = ? WHERE image_id =", "suffixIndex < 4: suffixIndex += 1 # increment the index", "size = size / 1024.0 # apply the division return", "# move all related files for the last image that's", "print(\"Created a Bagit directory.\") try: # Create the tar package", "1 # increment the index of the suffix size =", "ImageID = ?\", (tarball_name, image[0],),) images_db.commit() images_db.close() # add tarball", "in files: if file.find(last_image[0]) != -1: filepath = os.path.join(path, file)", "= images[-1] path, dirs, files = next(os.walk(batch_dir)) for file in", "images: print(\"Linking image \" + image[0] + \" to \"", "yet to make a package from this batch.\") return() else:", "list (convert tuple) to get the package size back under", "800 Mib to the next power of 2 = 1GiB", "os.path.split(batch_dir) new_dir_number = int(split[1]) + 1 new_batch_dir = os.path.join(split[0], str(new_dir_number))", "a package for batch directory \" + batch_dir) print(e) def", "WHERE ImageID = ?\", (tarball_name, image[0],),) images_db.commit() images_db.close() # add", "(tarball_name, image[0],),) cursor.execute(\"INSERT INTO packages (name, size, timestamp) VALUES (?,?,?)\",", "images_db = sqlite3.connect(db_path) cursor = images_db.cursor() for image in images:", "image db_path = os.path.join( abs_path, \"source_data/deplatformr_open_images_v6.sqlite\") images_db = sqlite3.connect(db_path) cursor", "= ?\", (tarball_name, image[0],),) cursor.execute(\"INSERT INTO packages (name, size, timestamp)", "newly generated sha512 checksums. This content and context is described", "< package_threshold: print(\"Not enough images yet to make a package", "the tar package packages_dir = os.path.join( os.getcwd(), \"source_data/packages/\") tarball_name =", "package_threshold: print(\"Not enough images yet to make a package from", "this batch.\") return() else: try: # create new batch directory", "= datetime.utcnow() tarball_size = os.path.getsize( os.path.join(packages_dir, tarball_name)) print(\"Tarball size is:", "+ image[0] + \" to \" + tarball_name + \"", "1024 and suffixIndex < 4: suffixIndex += 1 # increment", "+ get_human_readable_file_size(tarball_size)) db_path = os.path.join( abs_path, \"deplatformr_open_images_workflow.sqlite\") workflow_db = sqlite3.connect(db_path)", "-1: filepath = os.path.join(path, file) shutil.move(filepath, os.path.join( new_batch_dir, file)) #", "batch directory split = os.path.split(batch_dir) new_dir_number = int(split[1]) + 1", "files for the last image that's getting removed from batch", "get_human_readable_file_size(tarball_size)) db_path = os.path.join( abs_path, \"deplatformr_open_images_workflow.sqlite\") workflow_db = sqlite3.connect(db_path) cursor" ]
[ "'MYH9', 'MYO15A', 'MYO3A', 'MYO6', 'MYO7A', 'OSBPL2', 'OTOA', 'OTOF', 'OTOG', 'OTOGL',", "'RIPOR2', 'S1PR2', 'SERPINB6', 'SIX1', 'SLC17A8', 'SLC26A4', 'SLC52A2', 'SLITRK6', 'SMPX', 'SOX10',", "'COCH', 'COL11A2', 'DIAPH1', 'DIAPH3', 'DMXL2', 'DNMT1', 'DSPP', 'EDN3', 'EDNRB', 'EPS8',", "'SOX10', 'STRC', 'SYNE4', 'TBC1D24', 'TECTA', 'TIMM8A', 'TMC1', 'TMIE', 'TMPRSS3', 'TPRN',", "'CDH23', 'CEACAM16', 'CEP78', 'CHD7', 'CIB2', 'CISD2', 'CLDN14', 'CLIC5', 'CLPP', 'CLRN1',", "'EYA4', 'GIPC3', 'GJB2', 'GJB6', 'GPSM2', 'GRHL2', 'GRXCR1', 'GSDME', 'HGF', 'HSD17B4',", "'HGF', 'HSD17B4', 'ILDR1', 'KCNE1', 'KCNQ1', 'KCNQ4', 'LARS2', 'LHFPL5', 'LOXHD1', 'LRTOMT',", "'OTOG', 'OTOGL', 'P2RX2', 'PAX3', 'PDZD7', 'PJVK', 'POU3F4', 'POU4F3', 'PRPS1', 'PTPRQ',", "'CLPP', 'CLRN1', 'COCH', 'COL11A2', 'DIAPH1', 'DIAPH3', 'DMXL2', 'DNMT1', 'DSPP', 'EDN3',", "'ABHD12', 'ACTG1', 'ADGRV1', 'AIFM1', 'ATP6V1B1', 'BCS1L', 'BSND', 'CABP2', 'CACNA1D', 'CDC14A',", "'MITF', 'MSRB3', 'MT-RNR1', 'MT-TS1', 'MYH14', 'MYH9', 'MYO15A', 'MYO3A', 'MYO6', 'MYO7A',", "'GRXCR1', 'GSDME', 'HGF', 'HSD17B4', 'ILDR1', 'KCNE1', 'KCNQ1', 'KCNQ4', 'LARS2', 'LHFPL5',", "'STRC', 'SYNE4', 'TBC1D24', 'TECTA', 'TIMM8A', 'TMC1', 'TMIE', 'TMPRSS3', 'TPRN', 'TRIOBP',", "'TIMM8A', 'TMC1', 'TMIE', 'TMPRSS3', 'TPRN', 'TRIOBP', 'TUBB4B', 'USH1C', 'USH1G', 'USH2A',", "'SLC26A4', 'SLC52A2', 'SLITRK6', 'SMPX', 'SOX10', 'STRC', 'SYNE4', 'TBC1D24', 'TECTA', 'TIMM8A',", "evalRec(env, rec): \"\"\"hl_reportable\"\"\" return (len(set(rec.Genes) & { 'ABHD12', 'ACTG1', 'ADGRV1',", "\"\"\"hl_reportable\"\"\" return (len(set(rec.Genes) & { 'ABHD12', 'ACTG1', 'ADGRV1', 'AIFM1', 'ATP6V1B1',", "'PJVK', 'POU3F4', 'POU4F3', 'PRPS1', 'PTPRQ', 'RDX', 'RIPOR2', 'S1PR2', 'SERPINB6', 'SIX1',", "'MYO3A', 'MYO6', 'MYO7A', 'OSBPL2', 'OTOA', 'OTOF', 'OTOG', 'OTOGL', 'P2RX2', 'PAX3',", "'OTOA', 'OTOF', 'OTOG', 'OTOGL', 'P2RX2', 'PAX3', 'PDZD7', 'PJVK', 'POU3F4', 'POU4F3',", "'SERPINB6', 'SIX1', 'SLC17A8', 'SLC26A4', 'SLC52A2', 'SLITRK6', 'SMPX', 'SOX10', 'STRC', 'SYNE4',", "'TMC1', 'TMIE', 'TMPRSS3', 'TPRN', 'TRIOBP', 'TUBB4B', 'USH1C', 'USH1G', 'USH2A', 'WFS1',", "return (len(set(rec.Genes) & { 'ABHD12', 'ACTG1', 'ADGRV1', 'AIFM1', 'ATP6V1B1', 'BCS1L',", "{ 'ABHD12', 'ACTG1', 'ADGRV1', 'AIFM1', 'ATP6V1B1', 'BCS1L', 'BSND', 'CABP2', 'CACNA1D',", "'ILDR1', 'KCNE1', 'KCNQ1', 'KCNQ4', 'LARS2', 'LHFPL5', 'LOXHD1', 'LRTOMT', 'MARVELD2', 'MIR96',", "'COL11A2', 'DIAPH1', 'DIAPH3', 'DMXL2', 'DNMT1', 'DSPP', 'EDN3', 'EDNRB', 'EPS8', 'EPS8L2',", "'GPSM2', 'GRHL2', 'GRXCR1', 'GSDME', 'HGF', 'HSD17B4', 'ILDR1', 'KCNE1', 'KCNQ1', 'KCNQ4',", "'CEP78', 'CHD7', 'CIB2', 'CISD2', 'CLDN14', 'CLIC5', 'CLPP', 'CLRN1', 'COCH', 'COL11A2',", "'LARS2', 'LHFPL5', 'LOXHD1', 'LRTOMT', 'MARVELD2', 'MIR96', 'MITF', 'MSRB3', 'MT-RNR1', 'MT-TS1',", "'CDC14A', 'CDH23', 'CEACAM16', 'CEP78', 'CHD7', 'CIB2', 'CISD2', 'CLDN14', 'CLIC5', 'CLPP',", "'OTOGL', 'P2RX2', 'PAX3', 'PDZD7', 'PJVK', 'POU3F4', 'POU4F3', 'PRPS1', 'PTPRQ', 'RDX',", "'PTPRQ', 'RDX', 'RIPOR2', 'S1PR2', 'SERPINB6', 'SIX1', 'SLC17A8', 'SLC26A4', 'SLC52A2', 'SLITRK6',", "'S1PR2', 'SERPINB6', 'SIX1', 'SLC17A8', 'SLC26A4', 'SLC52A2', 'SLITRK6', 'SMPX', 'SOX10', 'STRC',", "'TMIE', 'TMPRSS3', 'TPRN', 'TRIOBP', 'TUBB4B', 'USH1C', 'USH1G', 'USH2A', 'WFS1', 'WHRN',", "'SLC17A8', 'SLC26A4', 'SLC52A2', 'SLITRK6', 'SMPX', 'SOX10', 'STRC', 'SYNE4', 'TBC1D24', 'TECTA',", "'KCNE1', 'KCNQ1', 'KCNQ4', 'LARS2', 'LHFPL5', 'LOXHD1', 'LRTOMT', 'MARVELD2', 'MIR96', 'MITF',", "'GSDME', 'HGF', 'HSD17B4', 'ILDR1', 'KCNE1', 'KCNQ1', 'KCNQ4', 'LARS2', 'LHFPL5', 'LOXHD1',", "'TPRN', 'TRIOBP', 'TUBB4B', 'USH1C', 'USH1G', 'USH2A', 'WFS1', 'WHRN', } )", "'DNMT1', 'DSPP', 'EDN3', 'EDNRB', 'EPS8', 'EPS8L2', 'ESPN', 'ESRRB', 'EYA1', 'EYA4',", "'CEACAM16', 'CEP78', 'CHD7', 'CIB2', 'CISD2', 'CLDN14', 'CLIC5', 'CLPP', 'CLRN1', 'COCH',", "'DMXL2', 'DNMT1', 'DSPP', 'EDN3', 'EDNRB', 'EPS8', 'EPS8L2', 'ESPN', 'ESRRB', 'EYA1',", "'BSND', 'CABP2', 'CACNA1D', 'CDC14A', 'CDH23', 'CEACAM16', 'CEP78', 'CHD7', 'CIB2', 'CISD2',", "'MT-RNR1', 'MT-TS1', 'MYH14', 'MYH9', 'MYO15A', 'MYO3A', 'MYO6', 'MYO7A', 'OSBPL2', 'OTOA',", "'EDNRB', 'EPS8', 'EPS8L2', 'ESPN', 'ESRRB', 'EYA1', 'EYA4', 'GIPC3', 'GJB2', 'GJB6',", "'KCNQ1', 'KCNQ4', 'LARS2', 'LHFPL5', 'LOXHD1', 'LRTOMT', 'MARVELD2', 'MIR96', 'MITF', 'MSRB3',", "'BCS1L', 'BSND', 'CABP2', 'CACNA1D', 'CDC14A', 'CDH23', 'CEACAM16', 'CEP78', 'CHD7', 'CIB2',", "'CLIC5', 'CLPP', 'CLRN1', 'COCH', 'COL11A2', 'DIAPH1', 'DIAPH3', 'DMXL2', 'DNMT1', 'DSPP',", "'LOXHD1', 'LRTOMT', 'MARVELD2', 'MIR96', 'MITF', 'MSRB3', 'MT-RNR1', 'MT-TS1', 'MYH14', 'MYH9',", "'MSRB3', 'MT-RNR1', 'MT-TS1', 'MYH14', 'MYH9', 'MYO15A', 'MYO3A', 'MYO6', 'MYO7A', 'OSBPL2',", "'MARVELD2', 'MIR96', 'MITF', 'MSRB3', 'MT-RNR1', 'MT-TS1', 'MYH14', 'MYH9', 'MYO15A', 'MYO3A',", "'CHD7', 'CIB2', 'CISD2', 'CLDN14', 'CLIC5', 'CLPP', 'CLRN1', 'COCH', 'COL11A2', 'DIAPH1',", "'CACNA1D', 'CDC14A', 'CDH23', 'CEACAM16', 'CEP78', 'CHD7', 'CIB2', 'CISD2', 'CLDN14', 'CLIC5',", "'DIAPH1', 'DIAPH3', 'DMXL2', 'DNMT1', 'DSPP', 'EDN3', 'EDNRB', 'EPS8', 'EPS8L2', 'ESPN',", "'DSPP', 'EDN3', 'EDNRB', 'EPS8', 'EPS8L2', 'ESPN', 'ESRRB', 'EYA1', 'EYA4', 'GIPC3',", "'MYO6', 'MYO7A', 'OSBPL2', 'OTOA', 'OTOF', 'OTOG', 'OTOGL', 'P2RX2', 'PAX3', 'PDZD7',", "'ADGRV1', 'AIFM1', 'ATP6V1B1', 'BCS1L', 'BSND', 'CABP2', 'CACNA1D', 'CDC14A', 'CDH23', 'CEACAM16',", "'LRTOMT', 'MARVELD2', 'MIR96', 'MITF', 'MSRB3', 'MT-RNR1', 'MT-TS1', 'MYH14', 'MYH9', 'MYO15A',", "'MIR96', 'MITF', 'MSRB3', 'MT-RNR1', 'MT-TS1', 'MYH14', 'MYH9', 'MYO15A', 'MYO3A', 'MYO6',", "'ESRRB', 'EYA1', 'EYA4', 'GIPC3', 'GJB2', 'GJB6', 'GPSM2', 'GRHL2', 'GRXCR1', 'GSDME',", "'HSD17B4', 'ILDR1', 'KCNE1', 'KCNQ1', 'KCNQ4', 'LARS2', 'LHFPL5', 'LOXHD1', 'LRTOMT', 'MARVELD2',", "'GJB2', 'GJB6', 'GPSM2', 'GRHL2', 'GRXCR1', 'GSDME', 'HGF', 'HSD17B4', 'ILDR1', 'KCNE1',", "'GIPC3', 'GJB2', 'GJB6', 'GPSM2', 'GRHL2', 'GRXCR1', 'GSDME', 'HGF', 'HSD17B4', 'ILDR1',", "'GRHL2', 'GRXCR1', 'GSDME', 'HGF', 'HSD17B4', 'ILDR1', 'KCNE1', 'KCNQ1', 'KCNQ4', 'LARS2',", "'OSBPL2', 'OTOA', 'OTOF', 'OTOG', 'OTOGL', 'P2RX2', 'PAX3', 'PDZD7', 'PJVK', 'POU3F4',", "'TUBB4B', 'USH1C', 'USH1G', 'USH2A', 'WFS1', 'WHRN', } ) > 0)", "'MT-TS1', 'MYH14', 'MYH9', 'MYO15A', 'MYO3A', 'MYO6', 'MYO7A', 'OSBPL2', 'OTOA', 'OTOF',", "'P2RX2', 'PAX3', 'PDZD7', 'PJVK', 'POU3F4', 'POU4F3', 'PRPS1', 'PTPRQ', 'RDX', 'RIPOR2',", "'CABP2', 'CACNA1D', 'CDC14A', 'CDH23', 'CEACAM16', 'CEP78', 'CHD7', 'CIB2', 'CISD2', 'CLDN14',", "'EPS8', 'EPS8L2', 'ESPN', 'ESRRB', 'EYA1', 'EYA4', 'GIPC3', 'GJB2', 'GJB6', 'GPSM2',", "'POU3F4', 'POU4F3', 'PRPS1', 'PTPRQ', 'RDX', 'RIPOR2', 'S1PR2', 'SERPINB6', 'SIX1', 'SLC17A8',", "'MYO7A', 'OSBPL2', 'OTOA', 'OTOF', 'OTOG', 'OTOGL', 'P2RX2', 'PAX3', 'PDZD7', 'PJVK',", "'CISD2', 'CLDN14', 'CLIC5', 'CLPP', 'CLRN1', 'COCH', 'COL11A2', 'DIAPH1', 'DIAPH3', 'DMXL2',", "'ATP6V1B1', 'BCS1L', 'BSND', 'CABP2', 'CACNA1D', 'CDC14A', 'CDH23', 'CEACAM16', 'CEP78', 'CHD7',", "'CLRN1', 'COCH', 'COL11A2', 'DIAPH1', 'DIAPH3', 'DMXL2', 'DNMT1', 'DSPP', 'EDN3', 'EDNRB',", "'PDZD7', 'PJVK', 'POU3F4', 'POU4F3', 'PRPS1', 'PTPRQ', 'RDX', 'RIPOR2', 'S1PR2', 'SERPINB6',", "'TBC1D24', 'TECTA', 'TIMM8A', 'TMC1', 'TMIE', 'TMPRSS3', 'TPRN', 'TRIOBP', 'TUBB4B', 'USH1C',", "'TECTA', 'TIMM8A', 'TMC1', 'TMIE', 'TMPRSS3', 'TPRN', 'TRIOBP', 'TUBB4B', 'USH1C', 'USH1G',", "'ESPN', 'ESRRB', 'EYA1', 'EYA4', 'GIPC3', 'GJB2', 'GJB6', 'GPSM2', 'GRHL2', 'GRXCR1',", "'CLDN14', 'CLIC5', 'CLPP', 'CLRN1', 'COCH', 'COL11A2', 'DIAPH1', 'DIAPH3', 'DMXL2', 'DNMT1',", "'OTOF', 'OTOG', 'OTOGL', 'P2RX2', 'PAX3', 'PDZD7', 'PJVK', 'POU3F4', 'POU4F3', 'PRPS1',", "def evalRec(env, rec): \"\"\"hl_reportable\"\"\" return (len(set(rec.Genes) & { 'ABHD12', 'ACTG1',", "& { 'ABHD12', 'ACTG1', 'ADGRV1', 'AIFM1', 'ATP6V1B1', 'BCS1L', 'BSND', 'CABP2',", "'CIB2', 'CISD2', 'CLDN14', 'CLIC5', 'CLPP', 'CLRN1', 'COCH', 'COL11A2', 'DIAPH1', 'DIAPH3',", "'MYH14', 'MYH9', 'MYO15A', 'MYO3A', 'MYO6', 'MYO7A', 'OSBPL2', 'OTOA', 'OTOF', 'OTOG',", "'SIX1', 'SLC17A8', 'SLC26A4', 'SLC52A2', 'SLITRK6', 'SMPX', 'SOX10', 'STRC', 'SYNE4', 'TBC1D24',", "'TMPRSS3', 'TPRN', 'TRIOBP', 'TUBB4B', 'USH1C', 'USH1G', 'USH2A', 'WFS1', 'WHRN', }", "'MYO15A', 'MYO3A', 'MYO6', 'MYO7A', 'OSBPL2', 'OTOA', 'OTOF', 'OTOG', 'OTOGL', 'P2RX2',", "'EPS8L2', 'ESPN', 'ESRRB', 'EYA1', 'EYA4', 'GIPC3', 'GJB2', 'GJB6', 'GPSM2', 'GRHL2',", "'GJB6', 'GPSM2', 'GRHL2', 'GRXCR1', 'GSDME', 'HGF', 'HSD17B4', 'ILDR1', 'KCNE1', 'KCNQ1',", "'SLITRK6', 'SMPX', 'SOX10', 'STRC', 'SYNE4', 'TBC1D24', 'TECTA', 'TIMM8A', 'TMC1', 'TMIE',", "'DIAPH3', 'DMXL2', 'DNMT1', 'DSPP', 'EDN3', 'EDNRB', 'EPS8', 'EPS8L2', 'ESPN', 'ESRRB',", "'PAX3', 'PDZD7', 'PJVK', 'POU3F4', 'POU4F3', 'PRPS1', 'PTPRQ', 'RDX', 'RIPOR2', 'S1PR2',", "(len(set(rec.Genes) & { 'ABHD12', 'ACTG1', 'ADGRV1', 'AIFM1', 'ATP6V1B1', 'BCS1L', 'BSND',", "'LHFPL5', 'LOXHD1', 'LRTOMT', 'MARVELD2', 'MIR96', 'MITF', 'MSRB3', 'MT-RNR1', 'MT-TS1', 'MYH14',", "'EYA1', 'EYA4', 'GIPC3', 'GJB2', 'GJB6', 'GPSM2', 'GRHL2', 'GRXCR1', 'GSDME', 'HGF',", "'KCNQ4', 'LARS2', 'LHFPL5', 'LOXHD1', 'LRTOMT', 'MARVELD2', 'MIR96', 'MITF', 'MSRB3', 'MT-RNR1',", "rec): \"\"\"hl_reportable\"\"\" return (len(set(rec.Genes) & { 'ABHD12', 'ACTG1', 'ADGRV1', 'AIFM1',", "'RDX', 'RIPOR2', 'S1PR2', 'SERPINB6', 'SIX1', 'SLC17A8', 'SLC26A4', 'SLC52A2', 'SLITRK6', 'SMPX',", "'AIFM1', 'ATP6V1B1', 'BCS1L', 'BSND', 'CABP2', 'CACNA1D', 'CDC14A', 'CDH23', 'CEACAM16', 'CEP78',", "'ACTG1', 'ADGRV1', 'AIFM1', 'ATP6V1B1', 'BCS1L', 'BSND', 'CABP2', 'CACNA1D', 'CDC14A', 'CDH23',", "'TRIOBP', 'TUBB4B', 'USH1C', 'USH1G', 'USH2A', 'WFS1', 'WHRN', } ) >", "'SYNE4', 'TBC1D24', 'TECTA', 'TIMM8A', 'TMC1', 'TMIE', 'TMPRSS3', 'TPRN', 'TRIOBP', 'TUBB4B',", "'EDN3', 'EDNRB', 'EPS8', 'EPS8L2', 'ESPN', 'ESRRB', 'EYA1', 'EYA4', 'GIPC3', 'GJB2',", "'SMPX', 'SOX10', 'STRC', 'SYNE4', 'TBC1D24', 'TECTA', 'TIMM8A', 'TMC1', 'TMIE', 'TMPRSS3',", "'PRPS1', 'PTPRQ', 'RDX', 'RIPOR2', 'S1PR2', 'SERPINB6', 'SIX1', 'SLC17A8', 'SLC26A4', 'SLC52A2',", "'SLC52A2', 'SLITRK6', 'SMPX', 'SOX10', 'STRC', 'SYNE4', 'TBC1D24', 'TECTA', 'TIMM8A', 'TMC1',", "'POU4F3', 'PRPS1', 'PTPRQ', 'RDX', 'RIPOR2', 'S1PR2', 'SERPINB6', 'SIX1', 'SLC17A8', 'SLC26A4'," ]
[ "interpolation because it could be too optimistic measures[\"auc_prc_weighted\"] = multi_class_prc_auc_score(labels,", "label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label = label_binarizer.transform(label) predict = label_binarizer.transform(predict)", "if tp + fp == 0: pass else: # False", "return measures def calculate_cm_states(labels, predictions, pos_class, neg_class): tp = 0", "fp, tn, fn def save_classification_report(labels, predictions): return sklearn.metrics.classification_report(y_true=labels, y_pred=predictions, output_dict=True)", "1.0 else: labels[index] = 0.0 return labels def save_confusion_matrix(labels, predictions,", "average='weighted'), \"recall_micro\": sklearn.metrics.recall_score(labels, predictions, average='micro'), \"recall_macro\": sklearn.metrics.recall_score(labels, predictions, average='macro'), \"recall_weighted\":", "False discovery rate measures[str(pos_class) + \"_fdr\"] = fp / (tp", "# Negative predictive value measures[str(pos_class) + \"_npv\"] = tn /", "fn = calculate_cm_states(labels, predictions, pos_class, neg_class) measures[str(pos_class) + \"_tp\"] =", "print(\"Warning: Auc prc score can not be calculated ...\") save_confusion_matrix(labels,", "return str(n) \"\"\" return str(n) cm = reduce(lambda x, y:", "the trapezoidal rule / linear interpolation because it could be", "label = label_binarizer.transform(label) predict = label_binarizer.transform(predict) return sklearn.metrics.average_precision_score(label, predict, average=average)", "+ \"_support\"] = report[str(pos_class)]['support'] if pos_class == 1: neg_class =", "optimistic measures[\"auc_prc_weighted\"] = multi_class_prc_auc_score(labels, predictions, 'weighted') measures[\"auc_prc_macro\"] = multi_class_prc_auc_score(labels, predictions,", "fp measures[str(pos_class) + \"_tn\"] = tn measures[str(pos_class) + \"_fn\"] =", "pyplot from functools import reduce # import numpy as np", "pos_class, neg_class): tp = 0 fp = 0 tn =", "str(np.round(n / 1000000, 1)) + 'M' elif n > 1000:", "+ fn == 0: pass else: # False negative rate", "except ValueError: print(\"Warning: Auc prc score can not be calculated", "+ \"_tp\"] = tp measures[str(pos_class) + \"_fp\"] = fp measures[str(pos_class)", "predictions, verbose=False): measures = { \"accuracy\": sklearn.metrics.accuracy_score(labels, predictions), \"balanced_accuracy\": sklearn.metrics.balanced_accuracy_score(labels,", "\"_tnr\"] = tn / (tn + fp) # Fall out", "negative rate measures[str(pos_class) + \"_tnr\"] = tn / (tn +", "label = label_binarizer.transform(label) predict = label_binarizer.transform(predict) return sklearn.metrics.roc_auc_score(label, predict, average=average)", "output_dict=True) def multi_class_roc_auc_score(label, predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label", "metrics_from_prediction_and_label(labels, predictions, verbose=False): measures = { \"accuracy\": sklearn.metrics.accuracy_score(labels, predictions), \"balanced_accuracy\":", "= 0 tn = 0 fn = 0 for i", "1 if labels[i] == predictions[i] == neg_class: tn += 1", "\"f1_score_weighted\": sklearn.metrics.f1_score(labels, predictions, average='weighted') } try: measures[\"roc_auc_weighted\"] = multi_class_roc_auc_score(labels, predictions,", "= pyplot.subplots(figsize=(7, 4.5)) g = sns.heatmap(cm, annot=annot, fmt='', cmap='Blues', cbar=False,", "import pandas import seaborn as sns import matplotlib.pyplot as pyplot", "print(\"Warning: Roc auc score can not be calculated ...\") try:", "for pos_class in classes: measures[str(pos_class) + \"_precision\"] = report[str(pos_class)]['precision'] measures[str(pos_class)", "1)) + 'M' elif n > 1000: return str(np.round(n /", "'weighted') measures[\"auc_prc_macro\"] = multi_class_prc_auc_score(labels, predictions, 'macro') measures[\"auc_prc_micro\"] = multi_class_prc_auc_score(labels, predictions,", "= 0 fp = 0 tn = 0 fn =", "+= 1 if predictions[i] == neg_class and labels[i] != predictions[i]:", "annot = cm.applymap(prettify) cm = (cm.T / cm.sum(axis=1)).T fig, g", "\"precision_macro\": sklearn.metrics.precision_score(labels, predictions, average='macro'), \"precision_weighted\": sklearn.metrics.precision_score(labels, predictions, average='weighted'), \"recall_micro\": sklearn.metrics.recall_score(labels,", "labels def save_confusion_matrix(labels, predictions, path=\"../../../results/cm.pdf\"): classes = sklearn.utils.multiclass.unique_labels(labels, predictions) cms", "tn / (tn + fn) if tp + fn ==", "Roc auc score can not be calculated ...\") try: #", "'macro') measures[\"auc_prc_micro\"] = multi_class_prc_auc_score(labels, predictions, 'micro') except ValueError: print(\"Warning: Auc", "tp = 0 fp = 0 tn = 0 fn", "...\") save_confusion_matrix(labels, predictions) report = save_classification_report(labels, predictions) classes = list(sorted(set(labels)))", "/ (tn + fn) if tp + fn == 0:", "pass else: # Negative predictive value measures[str(pos_class) + \"_npv\"] =", "predictions), \"precision_micro\": sklearn.metrics.precision_score(labels, predictions, average='micro'), \"precision_macro\": sklearn.metrics.precision_score(labels, predictions, average='macro'), \"precision_weighted\":", "neg_class: tn += 1 if predictions[i] == neg_class and labels[i]", "if predictions[i] == pos_class and labels[i] != predictions[i]: fp +=", "sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label = label_binarizer.transform(label) predict = label_binarizer.transform(predict) return sklearn.metrics.roc_auc_score(label,", "= label_binarizer.transform(label) predict = label_binarizer.transform(predict) return sklearn.metrics.roc_auc_score(label, predict, average=average) def", "it could be too optimistic measures[\"auc_prc_weighted\"] = multi_class_prc_auc_score(labels, predictions, 'weighted')", "4.5)) g = sns.heatmap(cm, annot=annot, fmt='', cmap='Blues', cbar=False, rasterized=True, linewidths=0.1)", "= 0 fn = 0 for i in range(len(predictions)): if", "return sklearn.metrics.classification_report(y_true=labels, y_pred=predictions, output_dict=True) def multi_class_roc_auc_score(label, predict, average): label_binarizer =", "labels[index] >= 0.5: labels[index] = 1.0 else: labels[index] = 0.0", "tn, fn def save_classification_report(labels, predictions): return sklearn.metrics.classification_report(y_true=labels, y_pred=predictions, output_dict=True) def", "0 fn = 0 for i in range(len(predictions)): if labels[i]", "measures[\"auc_prc_micro\"] = multi_class_prc_auc_score(labels, predictions, 'micro') except ValueError: print(\"Warning: Auc prc", "pos_class: tp += 1 if predictions[i] == pos_class and labels[i]", "== pos_class: tp += 1 if predictions[i] == pos_class and", "\"precision_micro\": sklearn.metrics.precision_score(labels, predictions, average='micro'), \"precision_macro\": sklearn.metrics.precision_score(labels, predictions, average='macro'), \"precision_weighted\": sklearn.metrics.precision_score(labels,", "from functools import reduce # import numpy as np def", "return sklearn.metrics.average_precision_score(label, predict, average=average) def label_binarizer(labels): for index in range(0,", "sklearn.metrics.f1_score(labels, predictions, average='weighted') } try: measures[\"roc_auc_weighted\"] = multi_class_roc_auc_score(labels, predictions, 'weighted')", "ValueError: print(\"Warning: Roc auc score can not be calculated ...\")", "+ \"_fpr\"] = fp / (fp + tn) if tn", "# False discovery rate measures[str(pos_class) + \"_fdr\"] = fp /", "fp += 1 if labels[i] == predictions[i] == neg_class: tn", "average precision at different threshold values as the auc of", "== neg_class and labels[i] != predictions[i]: fn += 1 return", "label_binarizer(labels): for index in range(0, len(labels)): if labels[index] >= 0.5:", "== 1: neg_class = 0 else: neg_class = 1 tp,", "def save_classification_report(labels, predictions): return sklearn.metrics.classification_report(y_true=labels, y_pred=predictions, output_dict=True) def multi_class_roc_auc_score(label, predict,", "reduce # import numpy as np def metrics_from_prediction_and_label(labels, predictions, verbose=False):", "fp / (fp + tn) if tn + fn ==", "= reduce(lambda x, y: x.add(y, fill_value=0), cms) annot = cm.applymap(prettify)", "predictive value measures[str(pos_class) + \"_npv\"] = tn / (tn +", "import seaborn as sns import matplotlib.pyplot as pyplot from functools", "measures[str(pos_class) + \"_tn\"] = tn measures[str(pos_class) + \"_fn\"] = fn", "the auc-pr-curve with the trapezoidal rule / linear interpolation because", "sklearn.metrics.precision_score(labels, predictions, average='micro'), \"precision_macro\": sklearn.metrics.precision_score(labels, predictions, average='macro'), \"precision_weighted\": sklearn.metrics.precision_score(labels, predictions,", "0: pass else: # False negative rate measures[str(pos_class) + \"_fnr\"]", "= multi_class_roc_auc_score(labels, predictions, 'micro') except ValueError: print(\"Warning: Roc auc score", "!= predictions[i]: fp += 1 if labels[i] == predictions[i] ==", "fn = 0 for i in range(len(predictions)): if labels[i] ==", "annot=annot, fmt='', cmap='Blues', cbar=False, rasterized=True, linewidths=0.1) _ = g.set(ylabel='Actual', xlabel='Prediction')", "def prettify(n): \"\"\" if n > 1000000: return str(np.round(n /", "can not be calculated ...\") try: # note we use", "pass else: # Specificity or true negative rate measures[str(pos_class) +", "save_confusion_matrix(labels, predictions, path=\"../../../results/cm.pdf\"): classes = sklearn.utils.multiclass.unique_labels(labels, predictions) cms = []", "average='weighted') } try: measures[\"roc_auc_weighted\"] = multi_class_roc_auc_score(labels, predictions, 'weighted') measures[\"roc_auc_macro\"] =", "tn / (tn + fp) # Fall out or false", "0 else: neg_class = 1 tp, fp, tn, fn =", "if labels[index] >= 0.5: labels[index] = 1.0 else: labels[index] =", "tp += 1 if predictions[i] == pos_class and labels[i] !=", "fn == 0: pass else: # Negative predictive value measures[str(pos_class)", "len(labels)): if labels[index] >= 0.5: labels[index] = 1.0 else: labels[index]", "predictions, pos_class, neg_class) measures[str(pos_class) + \"_tp\"] = tp measures[str(pos_class) +", "rate measures[str(pos_class) + \"_tnr\"] = tn / (tn + fp)", "0: pass else: # False discovery rate measures[str(pos_class) + \"_fdr\"]", "label_binarizer.transform(predict) return sklearn.metrics.average_precision_score(label, predict, average=average) def label_binarizer(labels): for index in", "\"\"\" if n > 1000000: return str(np.round(n / 1000000, 1))", "predictions[i]: fp += 1 if labels[i] == predictions[i] == neg_class:", "(tn + fp) # Fall out or false positive rate", "'K' else: return str(n) \"\"\" return str(n) cm = reduce(lambda", "1 tp, fp, tn, fn = calculate_cm_states(labels, predictions, pos_class, neg_class)", "if pos_class == 1: neg_class = 0 else: neg_class =", "tn + fn == 0: pass else: # Negative predictive", "/ linear interpolation because it could be too optimistic measures[\"auc_prc_weighted\"]", "out or false positive rate measures[str(pos_class) + \"_fpr\"] = fp", "0.0 return labels def save_confusion_matrix(labels, predictions, path=\"../../../results/cm.pdf\"): classes = sklearn.utils.multiclass.unique_labels(labels,", "= fn if tn + fp == 0: pass else:", "return str(np.round(n / 1000000, 1)) + 'M' elif n >", "+ \"_fn\"] = fn if tn + fp == 0:", "g = sns.heatmap(cm, annot=annot, fmt='', cmap='Blues', cbar=False, rasterized=True, linewidths=0.1) _", "= multi_class_roc_auc_score(labels, predictions, 'weighted') measures[\"roc_auc_macro\"] = multi_class_roc_auc_score(labels, predictions, 'macro') measures[\"roc_auc_micro\"]", "be too optimistic measures[\"auc_prc_weighted\"] = multi_class_prc_auc_score(labels, predictions, 'weighted') measures[\"auc_prc_macro\"] =", "g = pyplot.subplots(figsize=(7, 4.5)) g = sns.heatmap(cm, annot=annot, fmt='', cmap='Blues',", "cm.applymap(prettify) cm = (cm.T / cm.sum(axis=1)).T fig, g = pyplot.subplots(figsize=(7,", "+ tn) if tn + fn == 0: pass else:", "= report[str(pos_class)]['recall'] measures[str(pos_class) + \"_f1-score\"] = report[str(pos_class)]['f1-score'] measures[str(pos_class) + \"_support\"]", "as pyplot from functools import reduce # import numpy as", "predictions) report = save_classification_report(labels, predictions) classes = list(sorted(set(labels))) for pos_class", "'micro') except ValueError: print(\"Warning: Auc prc score can not be", "!= predictions[i]: fn += 1 return tp, fp, tn, fn", "= multi_class_prc_auc_score(labels, predictions, 'weighted') measures[\"auc_prc_macro\"] = multi_class_prc_auc_score(labels, predictions, 'macro') measures[\"auc_prc_micro\"]", "fn def save_classification_report(labels, predictions): return sklearn.metrics.classification_report(y_true=labels, y_pred=predictions, output_dict=True) def multi_class_roc_auc_score(label,", "= 1.0 else: labels[index] = 0.0 return labels def save_confusion_matrix(labels,", "1 if predictions[i] == neg_class and labels[i] != predictions[i]: fn", "list(sorted(set(labels))) for pos_class in classes: measures[str(pos_class) + \"_precision\"] = report[str(pos_class)]['precision']", "positive rate measures[str(pos_class) + \"_fpr\"] = fp / (fp +", "of the pr-curve # and not the auc-pr-curve with the", "multi_class_roc_auc_score(labels, predictions, 'macro') measures[\"roc_auc_micro\"] = multi_class_roc_auc_score(labels, predictions, 'micro') except ValueError:", "negative rate measures[str(pos_class) + \"_fnr\"] = fn / (tp +", "measures[str(pos_class) + \"_fdr\"] = fp / (tp + fp) return", "pos_class, neg_class) measures[str(pos_class) + \"_tp\"] = tp measures[str(pos_class) + \"_fp\"]", "report[str(pos_class)]['f1-score'] measures[str(pos_class) + \"_support\"] = report[str(pos_class)]['support'] if pos_class == 1:", "/ (tp + fn) if tp + fp == 0:", "discovery rate measures[str(pos_class) + \"_fdr\"] = fp / (tp +", "score can not be calculated ...\") try: # note we", "report[str(pos_class)]['support'] if pos_class == 1: neg_class = 0 else: neg_class", "measures[str(pos_class) + \"_npv\"] = tn / (tn + fn) if", "+ fp) return measures def calculate_cm_states(labels, predictions, pos_class, neg_class): tp", "sklearn import pandas import seaborn as sns import matplotlib.pyplot as", "Negative predictive value measures[str(pos_class) + \"_npv\"] = tn / (tn", "numpy as np def metrics_from_prediction_and_label(labels, predictions, verbose=False): measures = {", "\"f1_score_micro\": sklearn.metrics.f1_score(labels, predictions, average='micro'), \"f1_score_macro\": sklearn.metrics.f1_score(labels, predictions, average='macro'), \"f1_score_weighted\": sklearn.metrics.f1_score(labels,", "+ \"_tn\"] = tn measures[str(pos_class) + \"_fn\"] = fn if", "1000, 1)) + 'K' else: return str(n) \"\"\" return str(n)", "predict = label_binarizer.transform(predict) return sklearn.metrics.roc_auc_score(label, predict, average=average) def multi_class_prc_auc_score(label, predict,", "with the trapezoidal rule / linear interpolation because it could", "if tp + fn == 0: pass else: # False", "cmap='Blues', cbar=False, rasterized=True, linewidths=0.1) _ = g.set(ylabel='Actual', xlabel='Prediction') for _,", "return tp, fp, tn, fn def save_classification_report(labels, predictions): return sklearn.metrics.classification_report(y_true=labels,", "rate measures[str(pos_class) + \"_fpr\"] = fp / (fp + tn)", "trapezoidal rule / linear interpolation because it could be too", "np def metrics_from_prediction_and_label(labels, predictions, verbose=False): measures = { \"accuracy\": sklearn.metrics.accuracy_score(labels,", "calculated ...\") save_confusion_matrix(labels, predictions) report = save_classification_report(labels, predictions) classes =", "return str(np.round(n / 1000, 1)) + 'K' else: return str(n)", "} try: measures[\"roc_auc_weighted\"] = multi_class_roc_auc_score(labels, predictions, 'weighted') measures[\"roc_auc_macro\"] = multi_class_roc_auc_score(labels,", "+ fn == 0: pass else: # Negative predictive value", "sklearn.metrics.classification_report(y_true=labels, y_pred=predictions, output_dict=True) def multi_class_roc_auc_score(label, predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer()", "# note we use the average precision at different threshold", "sklearn.metrics.recall_score(labels, predictions, average='macro'), \"recall_weighted\": sklearn.metrics.recall_score(labels, predictions, average='weighted'), \"f1_score_micro\": sklearn.metrics.f1_score(labels, predictions,", "label_binarizer.transform(label) predict = label_binarizer.transform(predict) return sklearn.metrics.average_precision_score(label, predict, average=average) def label_binarizer(labels):", "predictions, average='macro'), \"recall_weighted\": sklearn.metrics.recall_score(labels, predictions, average='weighted'), \"f1_score_micro\": sklearn.metrics.f1_score(labels, predictions, average='micro'),", "predictions, path=\"../../../results/cm.pdf\"): classes = sklearn.utils.multiclass.unique_labels(labels, predictions) cms = [] cm", "= tn measures[str(pos_class) + \"_fn\"] = fn if tn +", "tp, fp, tn, fn def save_classification_report(labels, predictions): return sklearn.metrics.classification_report(y_true=labels, y_pred=predictions,", "sns import matplotlib.pyplot as pyplot from functools import reduce #", "\"f1_score_macro\": sklearn.metrics.f1_score(labels, predictions, average='macro'), \"f1_score_weighted\": sklearn.metrics.f1_score(labels, predictions, average='weighted') } try:", "auc-pr-curve with the trapezoidal rule / linear interpolation because it", "tp + fp == 0: pass else: # False discovery", "index in range(0, len(labels)): if labels[index] >= 0.5: labels[index] =", "measures def calculate_cm_states(labels, predictions, pos_class, neg_class): tp = 0 fp", "try: measures[\"roc_auc_weighted\"] = multi_class_roc_auc_score(labels, predictions, 'weighted') measures[\"roc_auc_macro\"] = multi_class_roc_auc_score(labels, predictions,", "+ \"_f1-score\"] = report[str(pos_class)]['f1-score'] measures[str(pos_class) + \"_support\"] = report[str(pos_class)]['support'] if", "+ \"_recall\"] = report[str(pos_class)]['recall'] measures[str(pos_class) + \"_f1-score\"] = report[str(pos_class)]['f1-score'] measures[str(pos_class)", "(cm.T / cm.sum(axis=1)).T fig, g = pyplot.subplots(figsize=(7, 4.5)) g =", "\"_fnr\"] = fn / (tp + fn) if tp +", "sklearn.metrics.confusion_matrix(labels, predictions) cm_df = pandas.DataFrame(cm, index=classes, columns=classes) cms.append(cm_df) def prettify(n):", "precision at different threshold values as the auc of the", "average='weighted'), \"f1_score_micro\": sklearn.metrics.f1_score(labels, predictions, average='micro'), \"f1_score_macro\": sklearn.metrics.f1_score(labels, predictions, average='macro'), \"f1_score_weighted\":", "multi_class_roc_auc_score(labels, predictions, 'micro') except ValueError: print(\"Warning: Roc auc score can", "predictions, average='weighted'), \"recall_micro\": sklearn.metrics.recall_score(labels, predictions, average='micro'), \"recall_macro\": sklearn.metrics.recall_score(labels, predictions, average='macro'),", "= fp / (fp + tn) if tn + fn", "if n > 1000000: return str(np.round(n / 1000000, 1)) +", "classes = list(sorted(set(labels))) for pos_class in classes: measures[str(pos_class) + \"_precision\"]", "measures[str(pos_class) + \"_fnr\"] = fn / (tp + fn) if", "= report[str(pos_class)]['f1-score'] measures[str(pos_class) + \"_support\"] = report[str(pos_class)]['support'] if pos_class ==", "report[str(pos_class)]['precision'] measures[str(pos_class) + \"_recall\"] = report[str(pos_class)]['recall'] measures[str(pos_class) + \"_f1-score\"] =", "fp == 0: pass else: # Specificity or true negative", "not the auc-pr-curve with the trapezoidal rule / linear interpolation", "= 0.0 return labels def save_confusion_matrix(labels, predictions, path=\"../../../results/cm.pdf\"): classes =", "threshold values as the auc of the pr-curve # and", "measures[str(pos_class) + \"_f1-score\"] = report[str(pos_class)]['f1-score'] measures[str(pos_class) + \"_support\"] = report[str(pos_class)]['support']", "0: pass else: # Negative predictive value measures[str(pos_class) + \"_npv\"]", "neg_class): tp = 0 fp = 0 tn = 0", "use the average precision at different threshold values as the", "y_pred=predictions, output_dict=True) def multi_class_roc_auc_score(label, predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label)", "(fp + tn) if tn + fn == 0: pass", "predictions, average='micro'), \"recall_macro\": sklearn.metrics.recall_score(labels, predictions, average='macro'), \"recall_weighted\": sklearn.metrics.recall_score(labels, predictions, average='weighted'),", "neg_class) measures[str(pos_class) + \"_tp\"] = tp measures[str(pos_class) + \"_fp\"] =", "== predictions[i] == neg_class: tn += 1 if predictions[i] ==", "label_binarizer.fit(label) label = label_binarizer.transform(label) predict = label_binarizer.transform(predict) return sklearn.metrics.average_precision_score(label, predict,", "sklearn.metrics.precision_score(labels, predictions, average='macro'), \"precision_weighted\": sklearn.metrics.precision_score(labels, predictions, average='weighted'), \"recall_micro\": sklearn.metrics.recall_score(labels, predictions,", "tn += 1 if predictions[i] == neg_class and labels[i] !=", "predictions[i]: fn += 1 return tp, fp, tn, fn def", "def label_binarizer(labels): for index in range(0, len(labels)): if labels[index] >=", "multi_class_roc_auc_score(labels, predictions, 'weighted') measures[\"roc_auc_macro\"] = multi_class_roc_auc_score(labels, predictions, 'macro') measures[\"roc_auc_micro\"] =", "...\") try: # note we use the average precision at", "index=classes, columns=classes) cms.append(cm_df) def prettify(n): \"\"\" if n > 1000000:", "= cm.applymap(prettify) cm = (cm.T / cm.sum(axis=1)).T fig, g =", "# Specificity or true negative rate measures[str(pos_class) + \"_tnr\"] =", "1 return tp, fp, tn, fn def save_classification_report(labels, predictions): return", "true negative rate measures[str(pos_class) + \"_tnr\"] = tn / (tn", "'M' elif n > 1000: return str(np.round(n / 1000, 1))", "if tn + fn == 0: pass else: # Negative", "xlabel='Prediction') for _, spine in g.spines.items(): spine.set_visible(True) pyplot.xticks(rotation=45) fig.tight_layout() fig.savefig(path)", "measures = { \"accuracy\": sklearn.metrics.accuracy_score(labels, predictions), \"balanced_accuracy\": sklearn.metrics.balanced_accuracy_score(labels, predictions), \"precision_micro\":", "calculated ...\") try: # note we use the average precision", "# Fall out or false positive rate measures[str(pos_class) + \"_fpr\"]", "/ (fp + tn) if tn + fn == 0:", "label_binarizer.transform(label) predict = label_binarizer.transform(predict) return sklearn.metrics.roc_auc_score(label, predict, average=average) def multi_class_prc_auc_score(label,", "> 1000: return str(np.round(n / 1000, 1)) + 'K' else:", "not be calculated ...\") try: # note we use the", "+ 'K' else: return str(n) \"\"\" return str(n) cm =", "label_binarizer.fit(label) label = label_binarizer.transform(label) predict = label_binarizer.transform(predict) return sklearn.metrics.roc_auc_score(label, predict,", "== 0: pass else: # Negative predictive value measures[str(pos_class) +", "matplotlib.pyplot as pyplot from functools import reduce # import numpy", "for i in range(len(predictions)): if labels[i] == predictions[i] == pos_class:", "[] cm = sklearn.metrics.confusion_matrix(labels, predictions) cm_df = pandas.DataFrame(cm, index=classes, columns=classes)", "= fp / (tp + fp) return measures def calculate_cm_states(labels,", "str(n) cm = reduce(lambda x, y: x.add(y, fill_value=0), cms) annot", "measures[str(pos_class) + \"_tnr\"] = tn / (tn + fp) #", "_ = g.set(ylabel='Actual', xlabel='Prediction') for _, spine in g.spines.items(): spine.set_visible(True)", "= report[str(pos_class)]['precision'] measures[str(pos_class) + \"_recall\"] = report[str(pos_class)]['recall'] measures[str(pos_class) + \"_f1-score\"]", "0 for i in range(len(predictions)): if labels[i] == predictions[i] ==", "{ \"accuracy\": sklearn.metrics.accuracy_score(labels, predictions), \"balanced_accuracy\": sklearn.metrics.balanced_accuracy_score(labels, predictions), \"precision_micro\": sklearn.metrics.precision_score(labels, predictions,", "+ \"_npv\"] = tn / (tn + fn) if tp", "measures[str(pos_class) + \"_fn\"] = fn if tn + fp ==", "fn if tn + fp == 0: pass else: #", "pos_class and labels[i] != predictions[i]: fp += 1 if labels[i]", "predict, average=average) def multi_class_prc_auc_score(label, predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label)", "# and not the auc-pr-curve with the trapezoidal rule /", "sklearn.metrics.balanced_accuracy_score(labels, predictions), \"precision_micro\": sklearn.metrics.precision_score(labels, predictions, average='micro'), \"precision_macro\": sklearn.metrics.precision_score(labels, predictions, average='macro'),", "could be too optimistic measures[\"auc_prc_weighted\"] = multi_class_prc_auc_score(labels, predictions, 'weighted') measures[\"auc_prc_macro\"]", "measures[\"auc_prc_macro\"] = multi_class_prc_auc_score(labels, predictions, 'macro') measures[\"auc_prc_micro\"] = multi_class_prc_auc_score(labels, predictions, 'micro')", "and labels[i] != predictions[i]: fp += 1 if labels[i] ==", "= sns.heatmap(cm, annot=annot, fmt='', cmap='Blues', cbar=False, rasterized=True, linewidths=0.1) _ =", "# import numpy as np def metrics_from_prediction_and_label(labels, predictions, verbose=False): measures", "fp / (tp + fp) return measures def calculate_cm_states(labels, predictions,", "import sklearn import pandas import seaborn as sns import matplotlib.pyplot", "different threshold values as the auc of the pr-curve #", "as np def metrics_from_prediction_and_label(labels, predictions, verbose=False): measures = { \"accuracy\":", "1 if predictions[i] == pos_class and labels[i] != predictions[i]: fp", "multi_class_prc_auc_score(labels, predictions, 'weighted') measures[\"auc_prc_macro\"] = multi_class_prc_auc_score(labels, predictions, 'macro') measures[\"auc_prc_micro\"] =", "rule / linear interpolation because it could be too optimistic", "neg_class = 0 else: neg_class = 1 tp, fp, tn,", "\"_recall\"] = report[str(pos_class)]['recall'] measures[str(pos_class) + \"_f1-score\"] = report[str(pos_class)]['f1-score'] measures[str(pos_class) +", "+ fp) # Fall out or false positive rate measures[str(pos_class)", "report[str(pos_class)]['recall'] measures[str(pos_class) + \"_f1-score\"] = report[str(pos_class)]['f1-score'] measures[str(pos_class) + \"_support\"] =", "\"_npv\"] = tn / (tn + fn) if tp +", "> 1000000: return str(np.round(n / 1000000, 1)) + 'M' elif", "\"_f1-score\"] = report[str(pos_class)]['f1-score'] measures[str(pos_class) + \"_support\"] = report[str(pos_class)]['support'] if pos_class", "= label_binarizer.transform(predict) return sklearn.metrics.roc_auc_score(label, predict, average=average) def multi_class_prc_auc_score(label, predict, average):", "sns.heatmap(cm, annot=annot, fmt='', cmap='Blues', cbar=False, rasterized=True, linewidths=0.1) _ = g.set(ylabel='Actual',", "predictions[i] == pos_class and labels[i] != predictions[i]: fp += 1", "average=average) def multi_class_prc_auc_score(label, predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label", "labels[i] != predictions[i]: fp += 1 if labels[i] == predictions[i]", "measures[str(pos_class) + \"_precision\"] = report[str(pos_class)]['precision'] measures[str(pos_class) + \"_recall\"] = report[str(pos_class)]['recall']", "path=\"../../../results/cm.pdf\"): classes = sklearn.utils.multiclass.unique_labels(labels, predictions) cms = [] cm =", "\"recall_micro\": sklearn.metrics.recall_score(labels, predictions, average='micro'), \"recall_macro\": sklearn.metrics.recall_score(labels, predictions, average='macro'), \"recall_weighted\": sklearn.metrics.recall_score(labels,", "= report[str(pos_class)]['support'] if pos_class == 1: neg_class = 0 else:", "== 0: pass else: # False negative rate measures[str(pos_class) +", "at different threshold values as the auc of the pr-curve", "seaborn as sns import matplotlib.pyplot as pyplot from functools import", "= pandas.DataFrame(cm, index=classes, columns=classes) cms.append(cm_df) def prettify(n): \"\"\" if n", "average='micro'), \"recall_macro\": sklearn.metrics.recall_score(labels, predictions, average='macro'), \"recall_weighted\": sklearn.metrics.recall_score(labels, predictions, average='weighted'), \"f1_score_micro\":", "sklearn.metrics.recall_score(labels, predictions, average='weighted'), \"f1_score_micro\": sklearn.metrics.f1_score(labels, predictions, average='micro'), \"f1_score_macro\": sklearn.metrics.f1_score(labels, predictions,", "can not be calculated ...\") save_confusion_matrix(labels, predictions) report = save_classification_report(labels,", "pandas.DataFrame(cm, index=classes, columns=classes) cms.append(cm_df) def prettify(n): \"\"\" if n >", "predictions[i] == pos_class: tp += 1 if predictions[i] == pos_class", "predictions[i] == neg_class: tn += 1 if predictions[i] == neg_class", "in range(len(predictions)): if labels[i] == predictions[i] == pos_class: tp +=", "g.set(ylabel='Actual', xlabel='Prediction') for _, spine in g.spines.items(): spine.set_visible(True) pyplot.xticks(rotation=45) fig.tight_layout()", "fn == 0: pass else: # False negative rate measures[str(pos_class)", "calculate_cm_states(labels, predictions, pos_class, neg_class) measures[str(pos_class) + \"_tp\"] = tp measures[str(pos_class)", "and not the auc-pr-curve with the trapezoidal rule / linear", "def calculate_cm_states(labels, predictions, pos_class, neg_class): tp = 0 fp =", "+ \"_fp\"] = fp measures[str(pos_class) + \"_tn\"] = tn measures[str(pos_class)", "sklearn.metrics.recall_score(labels, predictions, average='micro'), \"recall_macro\": sklearn.metrics.recall_score(labels, predictions, average='macro'), \"recall_weighted\": sklearn.metrics.recall_score(labels, predictions,", "fn) if tp + fp == 0: pass else: #", "average='macro'), \"precision_weighted\": sklearn.metrics.precision_score(labels, predictions, average='weighted'), \"recall_micro\": sklearn.metrics.recall_score(labels, predictions, average='micro'), \"recall_macro\":", "the average precision at different threshold values as the auc", "\"_support\"] = report[str(pos_class)]['support'] if pos_class == 1: neg_class = 0", "else: # Specificity or true negative rate measures[str(pos_class) + \"_tnr\"]", "sklearn.metrics.average_precision_score(label, predict, average=average) def label_binarizer(labels): for index in range(0, len(labels)):", "report = save_classification_report(labels, predictions) classes = list(sorted(set(labels))) for pos_class in", "sklearn.metrics.accuracy_score(labels, predictions), \"balanced_accuracy\": sklearn.metrics.balanced_accuracy_score(labels, predictions), \"precision_micro\": sklearn.metrics.precision_score(labels, predictions, average='micro'), \"precision_macro\":", "fp = 0 tn = 0 fn = 0 for", "= label_binarizer.transform(label) predict = label_binarizer.transform(predict) return sklearn.metrics.average_precision_score(label, predict, average=average) def", "score can not be calculated ...\") save_confusion_matrix(labels, predictions) report =", "multi_class_prc_auc_score(label, predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label = label_binarizer.transform(label)", "\"_fn\"] = fn if tn + fp == 0: pass", "measures[\"roc_auc_macro\"] = multi_class_roc_auc_score(labels, predictions, 'macro') measures[\"roc_auc_micro\"] = multi_class_roc_auc_score(labels, predictions, 'micro')", "note we use the average precision at different threshold values", "+ fn) if tp + fn == 0: pass else:", "return sklearn.metrics.roc_auc_score(label, predict, average=average) def multi_class_prc_auc_score(label, predict, average): label_binarizer =", "or false positive rate measures[str(pos_class) + \"_fpr\"] = fp /", "reduce(lambda x, y: x.add(y, fill_value=0), cms) annot = cm.applymap(prettify) cm", "cm.sum(axis=1)).T fig, g = pyplot.subplots(figsize=(7, 4.5)) g = sns.heatmap(cm, annot=annot,", "\"accuracy\": sklearn.metrics.accuracy_score(labels, predictions), \"balanced_accuracy\": sklearn.metrics.balanced_accuracy_score(labels, predictions), \"precision_micro\": sklearn.metrics.precision_score(labels, predictions, average='micro'),", "pos_class == 1: neg_class = 0 else: neg_class = 1", "= { \"accuracy\": sklearn.metrics.accuracy_score(labels, predictions), \"balanced_accuracy\": sklearn.metrics.balanced_accuracy_score(labels, predictions), \"precision_micro\": sklearn.metrics.precision_score(labels,", "predictions, average='micro'), \"precision_macro\": sklearn.metrics.precision_score(labels, predictions, average='macro'), \"precision_weighted\": sklearn.metrics.precision_score(labels, predictions, average='weighted'),", "+ \"_fnr\"] = fn / (tp + fn) if tp", "import reduce # import numpy as np def metrics_from_prediction_and_label(labels, predictions,", "functools import reduce # import numpy as np def metrics_from_prediction_and_label(labels,", "measures[\"auc_prc_weighted\"] = multi_class_prc_auc_score(labels, predictions, 'weighted') measures[\"auc_prc_macro\"] = multi_class_prc_auc_score(labels, predictions, 'macro')", "average=average) def label_binarizer(labels): for index in range(0, len(labels)): if labels[index]", "labels[i] == predictions[i] == pos_class: tp += 1 if predictions[i]", "predictions, 'micro') except ValueError: print(\"Warning: Roc auc score can not", "measures[str(pos_class) + \"_fp\"] = fp measures[str(pos_class) + \"_tn\"] = tn", "measures[str(pos_class) + \"_fpr\"] = fp / (fp + tn) if", "cm = sklearn.metrics.confusion_matrix(labels, predictions) cm_df = pandas.DataFrame(cm, index=classes, columns=classes) cms.append(cm_df)", "rasterized=True, linewidths=0.1) _ = g.set(ylabel='Actual', xlabel='Prediction') for _, spine in", "import matplotlib.pyplot as pyplot from functools import reduce # import", "cbar=False, rasterized=True, linewidths=0.1) _ = g.set(ylabel='Actual', xlabel='Prediction') for _, spine", "+ fn) if tp + fp == 0: pass else:", "in classes: measures[str(pos_class) + \"_precision\"] = report[str(pos_class)]['precision'] measures[str(pos_class) + \"_recall\"]", "+ 'M' elif n > 1000: return str(np.round(n / 1000,", "+= 1 return tp, fp, tn, fn def save_classification_report(labels, predictions):", "prettify(n): \"\"\" if n > 1000000: return str(np.round(n / 1000000,", "if labels[i] == predictions[i] == pos_class: tp += 1 if", "false positive rate measures[str(pos_class) + \"_fpr\"] = fp / (fp", "cms.append(cm_df) def prettify(n): \"\"\" if n > 1000000: return str(np.round(n", "average): label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label = label_binarizer.transform(label) predict =", "+= 1 if labels[i] == predictions[i] == neg_class: tn +=", "else: # False negative rate measures[str(pos_class) + \"_fnr\"] = fn", "predictions) cm_df = pandas.DataFrame(cm, index=classes, columns=classes) cms.append(cm_df) def prettify(n): \"\"\"", "str(n) \"\"\" return str(n) cm = reduce(lambda x, y: x.add(y,", "sklearn.metrics.f1_score(labels, predictions, average='macro'), \"f1_score_weighted\": sklearn.metrics.f1_score(labels, predictions, average='weighted') } try: measures[\"roc_auc_weighted\"]", "range(0, len(labels)): if labels[index] >= 0.5: labels[index] = 1.0 else:", "/ 1000, 1)) + 'K' else: return str(n) \"\"\" return", "0.5: labels[index] = 1.0 else: labels[index] = 0.0 return labels", "cms = [] cm = sklearn.metrics.confusion_matrix(labels, predictions) cm_df = pandas.DataFrame(cm,", "not be calculated ...\") save_confusion_matrix(labels, predictions) report = save_classification_report(labels, predictions)", "+ \"_precision\"] = report[str(pos_class)]['precision'] measures[str(pos_class) + \"_recall\"] = report[str(pos_class)]['recall'] measures[str(pos_class)", "x, y: x.add(y, fill_value=0), cms) annot = cm.applymap(prettify) cm =", "\"balanced_accuracy\": sklearn.metrics.balanced_accuracy_score(labels, predictions), \"precision_micro\": sklearn.metrics.precision_score(labels, predictions, average='micro'), \"precision_macro\": sklearn.metrics.precision_score(labels, predictions,", "predictions, average='macro'), \"f1_score_weighted\": sklearn.metrics.f1_score(labels, predictions, average='weighted') } try: measures[\"roc_auc_weighted\"] =", "= sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label = label_binarizer.transform(label) predict = label_binarizer.transform(predict) return", "= [] cm = sklearn.metrics.confusion_matrix(labels, predictions) cm_df = pandas.DataFrame(cm, index=classes,", "= label_binarizer.transform(predict) return sklearn.metrics.average_precision_score(label, predict, average=average) def label_binarizer(labels): for index", "0 fp = 0 tn = 0 fn = 0", "else: neg_class = 1 tp, fp, tn, fn = calculate_cm_states(labels,", "= multi_class_roc_auc_score(labels, predictions, 'macro') measures[\"roc_auc_micro\"] = multi_class_roc_auc_score(labels, predictions, 'micro') except", "False negative rate measures[str(pos_class) + \"_fnr\"] = fn / (tp", "def metrics_from_prediction_and_label(labels, predictions, verbose=False): measures = { \"accuracy\": sklearn.metrics.accuracy_score(labels, predictions),", "y: x.add(y, fill_value=0), cms) annot = cm.applymap(prettify) cm = (cm.T", "def multi_class_roc_auc_score(label, predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label =", "/ cm.sum(axis=1)).T fig, g = pyplot.subplots(figsize=(7, 4.5)) g = sns.heatmap(cm,", "fp) return measures def calculate_cm_states(labels, predictions, pos_class, neg_class): tp =", "1000: return str(np.round(n / 1000, 1)) + 'K' else: return", "\"_tn\"] = tn measures[str(pos_class) + \"_fn\"] = fn if tn", "else: # False discovery rate measures[str(pos_class) + \"_fdr\"] = fp", "cm = reduce(lambda x, y: x.add(y, fill_value=0), cms) annot =", "(tp + fn) if tp + fp == 0: pass", "linear interpolation because it could be too optimistic measures[\"auc_prc_weighted\"] =", "the pr-curve # and not the auc-pr-curve with the trapezoidal", "except ValueError: print(\"Warning: Roc auc score can not be calculated", "+ fp == 0: pass else: # False discovery rate", "else: return str(n) \"\"\" return str(n) cm = reduce(lambda x,", "labels[i] == predictions[i] == neg_class: tn += 1 if predictions[i]", "calculate_cm_states(labels, predictions, pos_class, neg_class): tp = 0 fp = 0", "predictions[i] == neg_class and labels[i] != predictions[i]: fn += 1", "be calculated ...\") save_confusion_matrix(labels, predictions) report = save_classification_report(labels, predictions) classes", "\"_fp\"] = fp measures[str(pos_class) + \"_tn\"] = tn measures[str(pos_class) +", "multi_class_prc_auc_score(labels, predictions, 'micro') except ValueError: print(\"Warning: Auc prc score can", "+= 1 if predictions[i] == pos_class and labels[i] != predictions[i]:", "return str(n) cm = reduce(lambda x, y: x.add(y, fill_value=0), cms)", "== neg_class: tn += 1 if predictions[i] == neg_class and", "values as the auc of the pr-curve # and not", "average='micro'), \"precision_macro\": sklearn.metrics.precision_score(labels, predictions, average='macro'), \"precision_weighted\": sklearn.metrics.precision_score(labels, predictions, average='weighted'), \"recall_micro\":", "+ \"_tnr\"] = tn / (tn + fp) # Fall", "= fn / (tp + fn) if tp + fp", "== 0: pass else: # False discovery rate measures[str(pos_class) +", "too optimistic measures[\"auc_prc_weighted\"] = multi_class_prc_auc_score(labels, predictions, 'weighted') measures[\"auc_prc_macro\"] = multi_class_prc_auc_score(labels,", "== 0: pass else: # Specificity or true negative rate", "/ (tn + fp) # Fall out or false positive", "save_classification_report(labels, predictions): return sklearn.metrics.classification_report(y_true=labels, y_pred=predictions, output_dict=True) def multi_class_roc_auc_score(label, predict, average):", "we use the average precision at different threshold values as", "\"_fdr\"] = fp / (tp + fp) return measures def", "str(np.round(n / 1000, 1)) + 'K' else: return str(n) \"\"\"", "0: pass else: # Specificity or true negative rate measures[str(pos_class)", "if labels[i] == predictions[i] == neg_class: tn += 1 if", "cm_df = pandas.DataFrame(cm, index=classes, columns=classes) cms.append(cm_df) def prettify(n): \"\"\" if", "= sklearn.utils.multiclass.unique_labels(labels, predictions) cms = [] cm = sklearn.metrics.confusion_matrix(labels, predictions)", "pyplot.subplots(figsize=(7, 4.5)) g = sns.heatmap(cm, annot=annot, fmt='', cmap='Blues', cbar=False, rasterized=True,", "fp) # Fall out or false positive rate measures[str(pos_class) +", "predictions): return sklearn.metrics.classification_report(y_true=labels, y_pred=predictions, output_dict=True) def multi_class_roc_auc_score(label, predict, average): label_binarizer", "elif n > 1000: return str(np.round(n / 1000, 1)) +", "tn) if tn + fn == 0: pass else: #", "predictions, average='weighted') } try: measures[\"roc_auc_weighted\"] = multi_class_roc_auc_score(labels, predictions, 'weighted') measures[\"roc_auc_macro\"]", "sklearn.metrics.precision_score(labels, predictions, average='weighted'), \"recall_micro\": sklearn.metrics.recall_score(labels, predictions, average='micro'), \"recall_macro\": sklearn.metrics.recall_score(labels, predictions,", "tp, fp, tn, fn = calculate_cm_states(labels, predictions, pos_class, neg_class) measures[str(pos_class)", "fill_value=0), cms) annot = cm.applymap(prettify) cm = (cm.T / cm.sum(axis=1)).T", "as the auc of the pr-curve # and not the", "fn) if tp + fn == 0: pass else: #", "\"recall_macro\": sklearn.metrics.recall_score(labels, predictions, average='macro'), \"recall_weighted\": sklearn.metrics.recall_score(labels, predictions, average='weighted'), \"f1_score_micro\": sklearn.metrics.f1_score(labels,", "average='micro'), \"f1_score_macro\": sklearn.metrics.f1_score(labels, predictions, average='macro'), \"f1_score_weighted\": sklearn.metrics.f1_score(labels, predictions, average='weighted') }", "predictions), \"balanced_accuracy\": sklearn.metrics.balanced_accuracy_score(labels, predictions), \"precision_micro\": sklearn.metrics.precision_score(labels, predictions, average='micro'), \"precision_macro\": sklearn.metrics.precision_score(labels,", "if tn + fp == 0: pass else: # Specificity", "'micro') except ValueError: print(\"Warning: Roc auc score can not be", "labels[index] = 0.0 return labels def save_confusion_matrix(labels, predictions, path=\"../../../results/cm.pdf\"): classes", "tp measures[str(pos_class) + \"_fp\"] = fp measures[str(pos_class) + \"_tn\"] =", "1: neg_class = 0 else: neg_class = 1 tp, fp,", "tn, fn = calculate_cm_states(labels, predictions, pos_class, neg_class) measures[str(pos_class) + \"_tp\"]", "auc of the pr-curve # and not the auc-pr-curve with", "measures[str(pos_class) + \"_support\"] = report[str(pos_class)]['support'] if pos_class == 1: neg_class", "\"_fpr\"] = fp / (fp + tn) if tn +", "= tp measures[str(pos_class) + \"_fp\"] = fp measures[str(pos_class) + \"_tn\"]", "def multi_class_prc_auc_score(label, predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label =", "predict = label_binarizer.transform(predict) return sklearn.metrics.average_precision_score(label, predict, average=average) def label_binarizer(labels): for", "classes = sklearn.utils.multiclass.unique_labels(labels, predictions) cms = [] cm = sklearn.metrics.confusion_matrix(labels,", "else: labels[index] = 0.0 return labels def save_confusion_matrix(labels, predictions, path=\"../../../results/cm.pdf\"):", "/ 1000000, 1)) + 'M' elif n > 1000: return", "sklearn.metrics.f1_score(labels, predictions, average='micro'), \"f1_score_macro\": sklearn.metrics.f1_score(labels, predictions, average='macro'), \"f1_score_weighted\": sklearn.metrics.f1_score(labels, predictions,", "n > 1000: return str(np.round(n / 1000, 1)) + 'K'", "classes: measures[str(pos_class) + \"_precision\"] = report[str(pos_class)]['precision'] measures[str(pos_class) + \"_recall\"] =", "for index in range(0, len(labels)): if labels[index] >= 0.5: labels[index]", "pass else: # False discovery rate measures[str(pos_class) + \"_fdr\"] =", "1000000, 1)) + 'M' elif n > 1000: return str(np.round(n", "predictions) classes = list(sorted(set(labels))) for pos_class in classes: measures[str(pos_class) +", "(tp + fp) return measures def calculate_cm_states(labels, predictions, pos_class, neg_class):", "fmt='', cmap='Blues', cbar=False, rasterized=True, linewidths=0.1) _ = g.set(ylabel='Actual', xlabel='Prediction') for", "predict, average=average) def label_binarizer(labels): for index in range(0, len(labels)): if", "predictions, average='macro'), \"precision_weighted\": sklearn.metrics.precision_score(labels, predictions, average='weighted'), \"recall_micro\": sklearn.metrics.recall_score(labels, predictions, average='micro'),", "= calculate_cm_states(labels, predictions, pos_class, neg_class) measures[str(pos_class) + \"_tp\"] = tp", "labels[i] != predictions[i]: fn += 1 return tp, fp, tn,", "0 tn = 0 fn = 0 for i in", "fig, g = pyplot.subplots(figsize=(7, 4.5)) g = sns.heatmap(cm, annot=annot, fmt='',", "and labels[i] != predictions[i]: fn += 1 return tp, fp,", "rate measures[str(pos_class) + \"_fdr\"] = fp / (tp + fp)", "range(len(predictions)): if labels[i] == predictions[i] == pos_class: tp += 1", "= tn / (tn + fn) if tp + fn", "\"precision_weighted\": sklearn.metrics.precision_score(labels, predictions, average='weighted'), \"recall_micro\": sklearn.metrics.recall_score(labels, predictions, average='micro'), \"recall_macro\": sklearn.metrics.recall_score(labels,", "ValueError: print(\"Warning: Auc prc score can not be calculated ...\")", "prc score can not be calculated ...\") save_confusion_matrix(labels, predictions) report", "\"recall_weighted\": sklearn.metrics.recall_score(labels, predictions, average='weighted'), \"f1_score_micro\": sklearn.metrics.f1_score(labels, predictions, average='micro'), \"f1_score_macro\": sklearn.metrics.f1_score(labels,", "try: # note we use the average precision at different", "= list(sorted(set(labels))) for pos_class in classes: measures[str(pos_class) + \"_precision\"] =", "measures[str(pos_class) + \"_recall\"] = report[str(pos_class)]['recall'] measures[str(pos_class) + \"_f1-score\"] = report[str(pos_class)]['f1-score']", "= fp measures[str(pos_class) + \"_tn\"] = tn measures[str(pos_class) + \"_fn\"]", "tp + fn == 0: pass else: # False negative", "measures[\"roc_auc_micro\"] = multi_class_roc_auc_score(labels, predictions, 'micro') except ValueError: print(\"Warning: Roc auc", "def save_confusion_matrix(labels, predictions, path=\"../../../results/cm.pdf\"): classes = sklearn.utils.multiclass.unique_labels(labels, predictions) cms =", "because it could be too optimistic measures[\"auc_prc_weighted\"] = multi_class_prc_auc_score(labels, predictions,", "fp == 0: pass else: # False discovery rate measures[str(pos_class)", "fn += 1 return tp, fp, tn, fn def save_classification_report(labels,", "multi_class_prc_auc_score(labels, predictions, 'macro') measures[\"auc_prc_micro\"] = multi_class_prc_auc_score(labels, predictions, 'micro') except ValueError:", "i in range(len(predictions)): if labels[i] == predictions[i] == pos_class: tp", "= save_classification_report(labels, predictions) classes = list(sorted(set(labels))) for pos_class in classes:", "pass else: # False negative rate measures[str(pos_class) + \"_fnr\"] =", "multi_class_roc_auc_score(label, predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label = label_binarizer.transform(label)", "else: # Negative predictive value measures[str(pos_class) + \"_npv\"] = tn", "n > 1000000: return str(np.round(n / 1000000, 1)) + 'M'", "tn = 0 fn = 0 for i in range(len(predictions)):", "= 0 else: neg_class = 1 tp, fp, tn, fn", "average='macro'), \"recall_weighted\": sklearn.metrics.recall_score(labels, predictions, average='weighted'), \"f1_score_micro\": sklearn.metrics.f1_score(labels, predictions, average='micro'), \"f1_score_macro\":", "rate measures[str(pos_class) + \"_fnr\"] = fn / (tp + fn)", "predictions, 'micro') except ValueError: print(\"Warning: Auc prc score can not", "as sns import matplotlib.pyplot as pyplot from functools import reduce", "= g.set(ylabel='Actual', xlabel='Prediction') for _, spine in g.spines.items(): spine.set_visible(True) pyplot.xticks(rotation=45)", "= 1 tp, fp, tn, fn = calculate_cm_states(labels, predictions, pos_class,", "(tn + fn) if tp + fn == 0: pass", "cm = (cm.T / cm.sum(axis=1)).T fig, g = pyplot.subplots(figsize=(7, 4.5))", "sklearn.metrics.roc_auc_score(label, predict, average=average) def multi_class_prc_auc_score(label, predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer()", "sklearn.utils.multiclass.unique_labels(labels, predictions) cms = [] cm = sklearn.metrics.confusion_matrix(labels, predictions) cm_df", "measures[str(pos_class) + \"_tp\"] = tp measures[str(pos_class) + \"_fp\"] = fp", "predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label = label_binarizer.transform(label) predict", "Auc prc score can not be calculated ...\") save_confusion_matrix(labels, predictions)", "cms) annot = cm.applymap(prettify) cm = (cm.T / cm.sum(axis=1)).T fig,", "pr-curve # and not the auc-pr-curve with the trapezoidal rule", "= tn / (tn + fp) # Fall out or", "= sklearn.metrics.confusion_matrix(labels, predictions) cm_df = pandas.DataFrame(cm, index=classes, columns=classes) cms.append(cm_df) def", "predictions, pos_class, neg_class): tp = 0 fp = 0 tn", "predictions, 'weighted') measures[\"roc_auc_macro\"] = multi_class_roc_auc_score(labels, predictions, 'macro') measures[\"roc_auc_micro\"] = multi_class_roc_auc_score(labels,", "in range(0, len(labels)): if labels[index] >= 0.5: labels[index] = 1.0", "x.add(y, fill_value=0), cms) annot = cm.applymap(prettify) cm = (cm.T /", "be calculated ...\") try: # note we use the average", "linewidths=0.1) _ = g.set(ylabel='Actual', xlabel='Prediction') for _, spine in g.spines.items():", "pandas import seaborn as sns import matplotlib.pyplot as pyplot from", "verbose=False): measures = { \"accuracy\": sklearn.metrics.accuracy_score(labels, predictions), \"balanced_accuracy\": sklearn.metrics.balanced_accuracy_score(labels, predictions),", "measures[\"roc_auc_weighted\"] = multi_class_roc_auc_score(labels, predictions, 'weighted') measures[\"roc_auc_macro\"] = multi_class_roc_auc_score(labels, predictions, 'macro')", "Specificity or true negative rate measures[str(pos_class) + \"_tnr\"] = tn", "pos_class in classes: measures[str(pos_class) + \"_precision\"] = report[str(pos_class)]['precision'] measures[str(pos_class) +", "if predictions[i] == neg_class and labels[i] != predictions[i]: fn +=", "= multi_class_prc_auc_score(labels, predictions, 'micro') except ValueError: print(\"Warning: Auc prc score", "predictions) cms = [] cm = sklearn.metrics.confusion_matrix(labels, predictions) cm_df =", "/ (tp + fp) return measures def calculate_cm_states(labels, predictions, pos_class,", "predictions, 'macro') measures[\"roc_auc_micro\"] = multi_class_roc_auc_score(labels, predictions, 'micro') except ValueError: print(\"Warning:", "\"_precision\"] = report[str(pos_class)]['precision'] measures[str(pos_class) + \"_recall\"] = report[str(pos_class)]['recall'] measures[str(pos_class) +", "for _, spine in g.spines.items(): spine.set_visible(True) pyplot.xticks(rotation=45) fig.tight_layout() fig.savefig(path) pyplot.close()", "sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label = label_binarizer.transform(label) predict = label_binarizer.transform(predict) return sklearn.metrics.average_precision_score(label,", "predictions, average='weighted'), \"f1_score_micro\": sklearn.metrics.f1_score(labels, predictions, average='micro'), \"f1_score_macro\": sklearn.metrics.f1_score(labels, predictions, average='macro'),", "== pos_class and labels[i] != predictions[i]: fp += 1 if", "1)) + 'K' else: return str(n) \"\"\" return str(n) cm", "predictions, 'weighted') measures[\"auc_prc_macro\"] = multi_class_prc_auc_score(labels, predictions, 'macro') measures[\"auc_prc_micro\"] = multi_class_prc_auc_score(labels,", "= (cm.T / cm.sum(axis=1)).T fig, g = pyplot.subplots(figsize=(7, 4.5)) g", "import numpy as np def metrics_from_prediction_and_label(labels, predictions, verbose=False): measures =", "predictions, average='micro'), \"f1_score_macro\": sklearn.metrics.f1_score(labels, predictions, average='macro'), \"f1_score_weighted\": sklearn.metrics.f1_score(labels, predictions, average='weighted')", "average='macro'), \"f1_score_weighted\": sklearn.metrics.f1_score(labels, predictions, average='weighted') } try: measures[\"roc_auc_weighted\"] = multi_class_roc_auc_score(labels,", "'macro') measures[\"roc_auc_micro\"] = multi_class_roc_auc_score(labels, predictions, 'micro') except ValueError: print(\"Warning: Roc", "neg_class and labels[i] != predictions[i]: fn += 1 return tp,", "save_confusion_matrix(labels, predictions) report = save_classification_report(labels, predictions) classes = list(sorted(set(labels))) for", ">= 0.5: labels[index] = 1.0 else: labels[index] = 0.0 return", "1000000: return str(np.round(n / 1000000, 1)) + 'M' elif n", "columns=classes) cms.append(cm_df) def prettify(n): \"\"\" if n > 1000000: return", "+ \"_fdr\"] = fp / (tp + fp) return measures", "predictions, 'macro') measures[\"auc_prc_micro\"] = multi_class_prc_auc_score(labels, predictions, 'micro') except ValueError: print(\"Warning:", "neg_class = 1 tp, fp, tn, fn = calculate_cm_states(labels, predictions,", "Fall out or false positive rate measures[str(pos_class) + \"_fpr\"] =", "= multi_class_prc_auc_score(labels, predictions, 'macro') measures[\"auc_prc_micro\"] = multi_class_prc_auc_score(labels, predictions, 'micro') except", "= 0 for i in range(len(predictions)): if labels[i] == predictions[i]", "or true negative rate measures[str(pos_class) + \"_tnr\"] = tn /", "# False negative rate measures[str(pos_class) + \"_fnr\"] = fn /", "labels[index] = 1.0 else: labels[index] = 0.0 return labels def", "save_classification_report(labels, predictions) classes = list(sorted(set(labels))) for pos_class in classes: measures[str(pos_class)", "\"\"\" return str(n) cm = reduce(lambda x, y: x.add(y, fill_value=0),", "'weighted') measures[\"roc_auc_macro\"] = multi_class_roc_auc_score(labels, predictions, 'macro') measures[\"roc_auc_micro\"] = multi_class_roc_auc_score(labels, predictions,", "== predictions[i] == pos_class: tp += 1 if predictions[i] ==", "fn / (tp + fn) if tp + fp ==", "the auc of the pr-curve # and not the auc-pr-curve", "label_binarizer.transform(predict) return sklearn.metrics.roc_auc_score(label, predict, average=average) def multi_class_prc_auc_score(label, predict, average): label_binarizer", "tn measures[str(pos_class) + \"_fn\"] = fn if tn + fp", "fp, tn, fn = calculate_cm_states(labels, predictions, pos_class, neg_class) measures[str(pos_class) +", "+ fp == 0: pass else: # Specificity or true", "auc score can not be calculated ...\") try: # note", "tn + fp == 0: pass else: # Specificity or", "\"_tp\"] = tp measures[str(pos_class) + \"_fp\"] = fp measures[str(pos_class) +", "return labels def save_confusion_matrix(labels, predictions, path=\"../../../results/cm.pdf\"): classes = sklearn.utils.multiclass.unique_labels(labels, predictions)", "value measures[str(pos_class) + \"_npv\"] = tn / (tn + fn)" ]
[ "= models.TextField() class Meta: app_label = get_tenant_model()._meta.app_label return TenantModel @tag(\"bug\")", "# The goal is that the next line doesn't raise", "should get a ProgrammingError with self.assertRaises(ProgrammingError): management.call_command(\"migrate\", all_schemas=True, verbosity=0) #", "setUpClass(cls): tenant1 = TenantModel(schema_name=\"tenant1\") tenant1.save(verbosity=0) @classmethod def tearDownClass(cls): for tenant", "warnings.simplefilter(\"ignore\") # Avoid warnings about model being registered twice with", "except for tenant1, THIS IS THE CASE WE WANT TO", "verbosity=0) @tag(\"bug\") class UnappliedMigrationTestCase(TransactionTestCase): \"\"\" Provoke a handled ProgrammingError by", "THE CASE WE WANT TO TEST # This should work", "TenantModel @tag(\"bug\") class MigrationZeroRoundTripTestCase(TransactionTestCase): \"\"\" Provoke a handled ProgrammingError by", "The goal is that the next line doesn't raise ProgrammingError", "test_database_checks_with_zero_migrations(self): management.call_command(\"migrate\", \"shared_public\", \"zero\", verbosity=0) # The goal is that", "\"Error while attempting to retrieve dynamic schemas. \" \"Perhaps you", "dynamic schemas. \" \"Perhaps you need to migrate the 'public'", "MigrationZeroRoundTripTestCase(TransactionTestCase): \"\"\" Provoke a handled ProgrammingError by migrating models from", "next line doesn't raise ProgrammingError check_schema_names(apps.get_app_config(\"django_pgschemas\")) management.call_command(\"migrate\", verbosity=0) @tag(\"bug\") class", "Meta: app_label = get_tenant_model()._meta.app_label return TenantModel @tag(\"bug\") class MigrationZeroRoundTripTestCase(TransactionTestCase): \"\"\"", "in TenantModel.objects.all(): tenant.delete(force_drop=True) def test_migrate_with_exclusions(self): # We first unapply a", "get a ProgrammingError with self.assertRaises(ProgrammingError): management.call_command(\"migrate\", all_schemas=True, verbosity=0) # We", "then migrate on all schemas except for tenant1, THIS IS", "TenantModel = get_tenant_model() def patched_get_tenant_model(*args, **kwargs): class TenantModel(TenantMixin): dummy =", "Provoke a handled ProgrammingError by migrating models from empty database.", "'public' schema first?\", ) @tag(\"bug\") class MigrateIgnoringExcludedSchemasTestCase(TransactionTestCase): @classmethod def setUpClass(cls):", "schemas=[\"tenant1\"], verbosity=0) # We then migrate on all schemas except", "We then migrate on all schemas except for tenant1, THIS", "schemas except for tenant1, THIS IS THE CASE WE WANT", "we should get a ProgrammingError with self.assertRaises(ProgrammingError): management.call_command(\"migrate\", all_schemas=True, verbosity=0)", "from django.test import TransactionTestCase, tag from django_pgschemas.checks import check_schema_names from", "@classmethod def tearDownClass(cls): for tenant in TenantModel.objects.all(): tenant.delete(force_drop=True) def test_migrate_with_exclusions(self):", "django.test import TransactionTestCase, tag from django_pgschemas.checks import check_schema_names from django_pgschemas.models", "get_tenant_model() def patched_get_tenant_model(*args, **kwargs): class TenantModel(TenantMixin): dummy = models.TextField() class", "TenantModel(schema_name=\"tenant1\") tenant1.save(verbosity=0) @classmethod def tearDownClass(cls): for tenant in TenantModel.objects.all(): tenant.delete(force_drop=True)", "without errors management.call_command(\"migrate\", all_schemas=True, excluded_schemas=[\"tenant1\"], verbosity=0) # If we try", "django.apps import apps from django.core import management from django.core.management.base import", "django.core import management from django.core.management.base import CommandError from django.db import", "model changes. \"\"\" @classmethod def setUpClass(cls): tenant1 = TenantModel(schema_name=\"tenant1\") tenant1.save(verbosity=0)", "from django_pgschemas.utils import get_tenant_model TenantModel = get_tenant_model() def patched_get_tenant_model(*args, **kwargs):", "retrieve dynamic schemas. \" \"Perhaps you need to migrate the", "with fake so we can reapply it without fake #", "you need to migrate the 'public' schema first?\", ) @tag(\"bug\")", "all_schemas=True, excluded_schemas=[\"tenant1\"], verbosity=0) # If we try to global migrate", "get_tenant_model TenantModel = get_tenant_model() def patched_get_tenant_model(*args, **kwargs): class TenantModel(TenantMixin): dummy", "a ProgrammingError with self.assertRaises(ProgrammingError): management.call_command(\"migrate\", all_schemas=True, verbosity=0) # We finally", "with self.assertRaises(ProgrammingError): management.call_command(\"migrate\", all_schemas=True, verbosity=0) # We finally apply the", "IS THE CASE WE WANT TO TEST # This should", "tenant.delete(force_drop=True) @patch(\"django_pgschemas.management.commands.get_tenant_model\", patched_get_tenant_model) def test_whowill_with_pending_migrations(self): with warnings.catch_warnings(): warnings.simplefilter(\"ignore\") # Avoid", "in TenantModel.objects.all(): tenant.delete(force_drop=True) @patch(\"django_pgschemas.management.commands.get_tenant_model\", patched_get_tenant_model) def test_whowill_with_pending_migrations(self): with warnings.catch_warnings(): warnings.simplefilter(\"ignore\")", "command with pending model changes. \"\"\" @classmethod def setUpClass(cls): tenant1", "ProgrammingError from django.test import TransactionTestCase, tag from django_pgschemas.checks import check_schema_names", "need to migrate the 'public' schema first?\", ) @tag(\"bug\") class", "excluded_schemas=[\"tenant1\"], verbosity=0) # If we try to global migrate now,", "import patch from django.apps import apps from django.core import management", "try to global migrate now, we should get a ProgrammingError", "import check_schema_names from django_pgschemas.models import TenantMixin from django_pgschemas.utils import get_tenant_model", "# This should work without errors management.call_command(\"migrate\", fake=True, all_schemas=True, verbosity=0)", "Avoid warnings about model being registered twice with self.assertRaises(CommandError) as", "attempting to retrieve dynamic schemas. \" \"Perhaps you need to", "class Meta: app_label = get_tenant_model()._meta.app_label return TenantModel @tag(\"bug\") class MigrationZeroRoundTripTestCase(TransactionTestCase):", "import management from django.core.management.base import CommandError from django.db import models", "migration again with fake # This should work without errors", "= get_tenant_model()._meta.app_label return TenantModel @tag(\"bug\") class MigrationZeroRoundTripTestCase(TransactionTestCase): \"\"\" Provoke a", "test_whowill_with_pending_migrations(self): with warnings.catch_warnings(): warnings.simplefilter(\"ignore\") # Avoid warnings about model being", "on all schemas except for tenant1, THIS IS THE CASE", "models from empty database. \"\"\" def test_database_checks_with_zero_migrations(self): management.call_command(\"migrate\", \"shared_public\", \"zero\",", "\"Perhaps you need to migrate the 'public' schema first?\", )", "self.assertEqual( str(ctx.exception), \"Error while attempting to retrieve dynamic schemas. \"", ") @tag(\"bug\") class MigrateIgnoringExcludedSchemasTestCase(TransactionTestCase): @classmethod def setUpClass(cls): tenant1 = TenantModel(schema_name=\"tenant1\")", "now, we should get a ProgrammingError with self.assertRaises(ProgrammingError): management.call_command(\"migrate\", all_schemas=True,", "self.assertRaises(ProgrammingError): management.call_command(\"migrate\", all_schemas=True, verbosity=0) # We finally apply the migration", "app_label = get_tenant_model()._meta.app_label return TenantModel @tag(\"bug\") class MigrationZeroRoundTripTestCase(TransactionTestCase): \"\"\" Provoke", "fake # This should work without errors management.call_command(\"migrate\", \"app_tenants\", \"0001_initial\",", "This should work without errors management.call_command(\"migrate\", \"app_tenants\", \"0001_initial\", fake=True, schemas=[\"tenant1\"],", "fake so we can reapply it without fake # This", "pending model changes. \"\"\" @classmethod def setUpClass(cls): tenant1 = TenantModel(schema_name=\"tenant1\")", "models.TextField() class Meta: app_label = get_tenant_model()._meta.app_label return TenantModel @tag(\"bug\") class", "\"zero\", verbosity=0) # The goal is that the next line", "\"\"\" Provoke a handled ProgrammingError by migrating models from empty", "return TenantModel @tag(\"bug\") class MigrationZeroRoundTripTestCase(TransactionTestCase): \"\"\" Provoke a handled ProgrammingError", "migrate now, we should get a ProgrammingError with self.assertRaises(ProgrammingError): management.call_command(\"migrate\",", "\" \"Perhaps you need to migrate the 'public' schema first?\",", "@classmethod def tearDownClass(cls): for tenant in TenantModel.objects.all(): tenant.delete(force_drop=True) @patch(\"django_pgschemas.management.commands.get_tenant_model\", patched_get_tenant_model)", "work without errors management.call_command(\"migrate\", all_schemas=True, excluded_schemas=[\"tenant1\"], verbosity=0) # If we", "verbosity=0) # We then migrate on all schemas except for", "fake # This should work without errors management.call_command(\"migrate\", fake=True, all_schemas=True,", "TenantModel(TenantMixin): dummy = models.TextField() class Meta: app_label = get_tenant_model()._meta.app_label return", "fake=True, schemas=[\"tenant1\"], verbosity=0) # We then migrate on all schemas", "migrate on all schemas except for tenant1, THIS IS THE", "# If we try to global migrate now, we should", "it without fake # This should work without errors management.call_command(\"migrate\",", "tenant.delete(force_drop=True) def test_migrate_with_exclusions(self): # We first unapply a migration with", "tearDownClass(cls): for tenant in TenantModel.objects.all(): tenant.delete(force_drop=True) @patch(\"django_pgschemas.management.commands.get_tenant_model\", patched_get_tenant_model) def test_whowill_with_pending_migrations(self):", "str(ctx.exception), \"Error while attempting to retrieve dynamic schemas. \" \"Perhaps", "to migrate the 'public' schema first?\", ) @tag(\"bug\") class MigrateIgnoringExcludedSchemasTestCase(TransactionTestCase):", "management.call_command(\"migrate\", \"shared_public\", \"zero\", verbosity=0) # The goal is that the", "from django_pgschemas.models import TenantMixin from django_pgschemas.utils import get_tenant_model TenantModel =", "get_tenant_model()._meta.app_label return TenantModel @tag(\"bug\") class MigrationZeroRoundTripTestCase(TransactionTestCase): \"\"\" Provoke a handled", "to retrieve dynamic schemas. \" \"Perhaps you need to migrate", "If we try to global migrate now, we should get", "ProgrammingError by running tenant command with pending model changes. \"\"\"", "so we can reapply it without fake # This should", "We finally apply the migration again with fake # This", "doesn't raise ProgrammingError check_schema_names(apps.get_app_config(\"django_pgschemas\")) management.call_command(\"migrate\", verbosity=0) @tag(\"bug\") class UnappliedMigrationTestCase(TransactionTestCase): \"\"\"", "@tag(\"bug\") class MigrateIgnoringExcludedSchemasTestCase(TransactionTestCase): @classmethod def setUpClass(cls): tenant1 = TenantModel(schema_name=\"tenant1\") tenant1.save(verbosity=0)", "@classmethod def setUpClass(cls): tenant1 = TenantModel(schema_name=\"tenant1\") tenant1.save(verbosity=0) @classmethod def tearDownClass(cls):", "ctx: management.call_command(\"whowill\", all_schemas=True, verbosity=0) self.assertEqual( str(ctx.exception), \"Error while attempting to", "can reapply it without fake # This should work without", "without fake # This should work without errors management.call_command(\"migrate\", \"app_tenants\",", "all_schemas=True, verbosity=0) # We finally apply the migration again with", "import apps from django.core import management from django.core.management.base import CommandError", "django_pgschemas.checks import check_schema_names from django_pgschemas.models import TenantMixin from django_pgschemas.utils import", "management.call_command(\"migrate\", verbosity=0) @tag(\"bug\") class UnappliedMigrationTestCase(TransactionTestCase): \"\"\" Provoke a handled ProgrammingError", "should work without errors management.call_command(\"migrate\", \"app_tenants\", \"0001_initial\", fake=True, schemas=[\"tenant1\"], verbosity=0)", "being registered twice with self.assertRaises(CommandError) as ctx: management.call_command(\"whowill\", all_schemas=True, verbosity=0)", "schemas. \" \"Perhaps you need to migrate the 'public' schema", "for tenant in TenantModel.objects.all(): tenant.delete(force_drop=True) @patch(\"django_pgschemas.management.commands.get_tenant_model\", patched_get_tenant_model) def test_whowill_with_pending_migrations(self): with", "the next line doesn't raise ProgrammingError check_schema_names(apps.get_app_config(\"django_pgschemas\")) management.call_command(\"migrate\", verbosity=0) @tag(\"bug\")", "from django.db import models from django.db.utils import ProgrammingError from django.test", "as ctx: management.call_command(\"whowill\", all_schemas=True, verbosity=0) self.assertEqual( str(ctx.exception), \"Error while attempting", "verbosity=0) # The goal is that the next line doesn't", "database. \"\"\" def test_database_checks_with_zero_migrations(self): management.call_command(\"migrate\", \"shared_public\", \"zero\", verbosity=0) # The", "migrating models from empty database. \"\"\" def test_database_checks_with_zero_migrations(self): management.call_command(\"migrate\", \"shared_public\",", "with pending model changes. \"\"\" @classmethod def setUpClass(cls): tenant1 =", "work without errors management.call_command(\"migrate\", \"app_tenants\", \"0001_initial\", fake=True, schemas=[\"tenant1\"], verbosity=0) #", "def test_migrate_with_exclusions(self): # We first unapply a migration with fake", "management.call_command(\"migrate\", all_schemas=True, verbosity=0) # We finally apply the migration again", "warnings.catch_warnings(): warnings.simplefilter(\"ignore\") # Avoid warnings about model being registered twice", "= get_tenant_model() def patched_get_tenant_model(*args, **kwargs): class TenantModel(TenantMixin): dummy = models.TextField()", "patch from django.apps import apps from django.core import management from", "while attempting to retrieve dynamic schemas. \" \"Perhaps you need", "should work without errors management.call_command(\"migrate\", all_schemas=True, excluded_schemas=[\"tenant1\"], verbosity=0) # If", "verbosity=0) # If we try to global migrate now, we", "check_schema_names(apps.get_app_config(\"django_pgschemas\")) management.call_command(\"migrate\", verbosity=0) @tag(\"bug\") class UnappliedMigrationTestCase(TransactionTestCase): \"\"\" Provoke a handled", "tenant1.save(verbosity=0) @classmethod def tearDownClass(cls): for tenant in TenantModel.objects.all(): tenant.delete(force_drop=True) def", "by migrating models from empty database. \"\"\" def test_database_checks_with_zero_migrations(self): management.call_command(\"migrate\",", "raise ProgrammingError check_schema_names(apps.get_app_config(\"django_pgschemas\")) management.call_command(\"migrate\", verbosity=0) @tag(\"bug\") class UnappliedMigrationTestCase(TransactionTestCase): \"\"\" Provoke", "# Avoid warnings about model being registered twice with self.assertRaises(CommandError)", "line doesn't raise ProgrammingError check_schema_names(apps.get_app_config(\"django_pgschemas\")) management.call_command(\"migrate\", verbosity=0) @tag(\"bug\") class UnappliedMigrationTestCase(TransactionTestCase):", "verbosity=0) self.assertEqual( str(ctx.exception), \"Error while attempting to retrieve dynamic schemas.", "first?\", ) @tag(\"bug\") class MigrateIgnoringExcludedSchemasTestCase(TransactionTestCase): @classmethod def setUpClass(cls): tenant1 =", "UnappliedMigrationTestCase(TransactionTestCase): \"\"\" Provoke a handled ProgrammingError by running tenant command", "TenantModel.objects.all(): tenant.delete(force_drop=True) @patch(\"django_pgschemas.management.commands.get_tenant_model\", patched_get_tenant_model) def test_whowill_with_pending_migrations(self): with warnings.catch_warnings(): warnings.simplefilter(\"ignore\") #", "\"\"\" @classmethod def setUpClass(cls): tenant1 = TenantModel(schema_name=\"tenant1\") tenant1.save(verbosity=0) @classmethod def", "tenant in TenantModel.objects.all(): tenant.delete(force_drop=True) def test_migrate_with_exclusions(self): # We first unapply", "We first unapply a migration with fake so we can", "import TransactionTestCase, tag from django_pgschemas.checks import check_schema_names from django_pgschemas.models import", "all schemas except for tenant1, THIS IS THE CASE WE", "TO TEST # This should work without errors management.call_command(\"migrate\", all_schemas=True,", "finally apply the migration again with fake # This should", "errors management.call_command(\"migrate\", all_schemas=True, excluded_schemas=[\"tenant1\"], verbosity=0) # If we try to", "for tenant in TenantModel.objects.all(): tenant.delete(force_drop=True) def test_migrate_with_exclusions(self): # We first", "we try to global migrate now, we should get a", "\"\"\" Provoke a handled ProgrammingError by running tenant command with", "patched_get_tenant_model(*args, **kwargs): class TenantModel(TenantMixin): dummy = models.TextField() class Meta: app_label", "is that the next line doesn't raise ProgrammingError check_schema_names(apps.get_app_config(\"django_pgschemas\")) management.call_command(\"migrate\",", "management.call_command(\"whowill\", all_schemas=True, verbosity=0) self.assertEqual( str(ctx.exception), \"Error while attempting to retrieve", "import ProgrammingError from django.test import TransactionTestCase, tag from django_pgschemas.checks import", "TenantModel.objects.all(): tenant.delete(force_drop=True) def test_migrate_with_exclusions(self): # We first unapply a migration", "self.assertRaises(CommandError) as ctx: management.call_command(\"whowill\", all_schemas=True, verbosity=0) self.assertEqual( str(ctx.exception), \"Error while", "tearDownClass(cls): for tenant in TenantModel.objects.all(): tenant.delete(force_drop=True) def test_migrate_with_exclusions(self): # We", "reapply it without fake # This should work without errors", "tenant1, THIS IS THE CASE WE WANT TO TEST #", "migration with fake so we can reapply it without fake", "TenantMixin from django_pgschemas.utils import get_tenant_model TenantModel = get_tenant_model() def patched_get_tenant_model(*args,", "@tag(\"bug\") class MigrationZeroRoundTripTestCase(TransactionTestCase): \"\"\" Provoke a handled ProgrammingError by migrating", "TransactionTestCase, tag from django_pgschemas.checks import check_schema_names from django_pgschemas.models import TenantMixin", "warnings about model being registered twice with self.assertRaises(CommandError) as ctx:", "def tearDownClass(cls): for tenant in TenantModel.objects.all(): tenant.delete(force_drop=True) def test_migrate_with_exclusions(self): #", "management.call_command(\"migrate\", \"app_tenants\", \"0001_initial\", fake=True, schemas=[\"tenant1\"], verbosity=0) # We then migrate", "WANT TO TEST # This should work without errors management.call_command(\"migrate\",", "the 'public' schema first?\", ) @tag(\"bug\") class MigrateIgnoringExcludedSchemasTestCase(TransactionTestCase): @classmethod def", "running tenant command with pending model changes. \"\"\" @classmethod def", "tenant command with pending model changes. \"\"\" @classmethod def setUpClass(cls):", "CASE WE WANT TO TEST # This should work without", "import CommandError from django.db import models from django.db.utils import ProgrammingError", "class TenantModel(TenantMixin): dummy = models.TextField() class Meta: app_label = get_tenant_model()._meta.app_label", "import get_tenant_model TenantModel = get_tenant_model() def patched_get_tenant_model(*args, **kwargs): class TenantModel(TenantMixin):", "we can reapply it without fake # This should work", "global migrate now, we should get a ProgrammingError with self.assertRaises(ProgrammingError):", "schema first?\", ) @tag(\"bug\") class MigrateIgnoringExcludedSchemasTestCase(TransactionTestCase): @classmethod def setUpClass(cls): tenant1", "from unittest.mock import patch from django.apps import apps from django.core", "handled ProgrammingError by running tenant command with pending model changes.", "\"0001_initial\", fake=True, schemas=[\"tenant1\"], verbosity=0) # We then migrate on all", "again with fake # This should work without errors management.call_command(\"migrate\",", "without errors management.call_command(\"migrate\", \"app_tenants\", \"0001_initial\", fake=True, schemas=[\"tenant1\"], verbosity=0) # We", "models from django.db.utils import ProgrammingError from django.test import TransactionTestCase, tag", "@patch(\"django_pgschemas.management.commands.get_tenant_model\", patched_get_tenant_model) def test_whowill_with_pending_migrations(self): with warnings.catch_warnings(): warnings.simplefilter(\"ignore\") # Avoid warnings", "ProgrammingError with self.assertRaises(ProgrammingError): management.call_command(\"migrate\", all_schemas=True, verbosity=0) # We finally apply", "unittest.mock import patch from django.apps import apps from django.core import", "def setUpClass(cls): tenant1 = TenantModel(schema_name=\"tenant1\") tenant1.save(verbosity=0) @classmethod def tearDownClass(cls): for", "with self.assertRaises(CommandError) as ctx: management.call_command(\"whowill\", all_schemas=True, verbosity=0) self.assertEqual( str(ctx.exception), \"Error", "by running tenant command with pending model changes. \"\"\" @classmethod", "class MigrateIgnoringExcludedSchemasTestCase(TransactionTestCase): @classmethod def setUpClass(cls): tenant1 = TenantModel(schema_name=\"tenant1\") tenant1.save(verbosity=0) @classmethod", "check_schema_names from django_pgschemas.models import TenantMixin from django_pgschemas.utils import get_tenant_model TenantModel", "verbosity=0) # We finally apply the migration again with fake", "tag from django_pgschemas.checks import check_schema_names from django_pgschemas.models import TenantMixin from", "TEST # This should work without errors management.call_command(\"migrate\", all_schemas=True, excluded_schemas=[\"tenant1\"],", "registered twice with self.assertRaises(CommandError) as ctx: management.call_command(\"whowill\", all_schemas=True, verbosity=0) self.assertEqual(", "# This should work without errors management.call_command(\"migrate\", \"app_tenants\", \"0001_initial\", fake=True,", "This should work without errors management.call_command(\"migrate\", all_schemas=True, excluded_schemas=[\"tenant1\"], verbosity=0) #", "def test_database_checks_with_zero_migrations(self): management.call_command(\"migrate\", \"shared_public\", \"zero\", verbosity=0) # The goal is", "tenant1 = TenantModel(schema_name=\"tenant1\") tenant1.save(verbosity=0) @classmethod def tearDownClass(cls): for tenant in", "migrate the 'public' schema first?\", ) @tag(\"bug\") class MigrateIgnoringExcludedSchemasTestCase(TransactionTestCase): @classmethod", "the migration again with fake # This should work without", "handled ProgrammingError by migrating models from empty database. \"\"\" def", "@tag(\"bug\") class UnappliedMigrationTestCase(TransactionTestCase): \"\"\" Provoke a handled ProgrammingError by running", "from django.db.utils import ProgrammingError from django.test import TransactionTestCase, tag from", "management from django.core.management.base import CommandError from django.db import models from", "django.db.utils import ProgrammingError from django.test import TransactionTestCase, tag from django_pgschemas.checks", "= TenantModel(schema_name=\"tenant1\") tenant1.save(verbosity=0) @classmethod def tearDownClass(cls): for tenant in TenantModel.objects.all():", "import models from django.db.utils import ProgrammingError from django.test import TransactionTestCase,", "class MigrationZeroRoundTripTestCase(TransactionTestCase): \"\"\" Provoke a handled ProgrammingError by migrating models", "tenant1.save(verbosity=0) @classmethod def tearDownClass(cls): for tenant in TenantModel.objects.all(): tenant.delete(force_drop=True) @patch(\"django_pgschemas.management.commands.get_tenant_model\",", "django.db import models from django.db.utils import ProgrammingError from django.test import", "# This should work without errors management.call_command(\"migrate\", all_schemas=True, excluded_schemas=[\"tenant1\"], verbosity=0)", "WE WANT TO TEST # This should work without errors", "# We then migrate on all schemas except for tenant1,", "to global migrate now, we should get a ProgrammingError with", "CommandError from django.db import models from django.db.utils import ProgrammingError from", "with fake # This should work without errors management.call_command(\"migrate\", fake=True,", "changes. \"\"\" @classmethod def setUpClass(cls): tenant1 = TenantModel(schema_name=\"tenant1\") tenant1.save(verbosity=0) @classmethod", "import warnings from unittest.mock import patch from django.apps import apps", "from django.core import management from django.core.management.base import CommandError from django.db", "from empty database. \"\"\" def test_database_checks_with_zero_migrations(self): management.call_command(\"migrate\", \"shared_public\", \"zero\", verbosity=0)", "def test_whowill_with_pending_migrations(self): with warnings.catch_warnings(): warnings.simplefilter(\"ignore\") # Avoid warnings about model", "warnings from unittest.mock import patch from django.apps import apps from", "about model being registered twice with self.assertRaises(CommandError) as ctx: management.call_command(\"whowill\",", "for tenant1, THIS IS THE CASE WE WANT TO TEST", "THIS IS THE CASE WE WANT TO TEST # This", "management.call_command(\"migrate\", all_schemas=True, excluded_schemas=[\"tenant1\"], verbosity=0) # If we try to global", "twice with self.assertRaises(CommandError) as ctx: management.call_command(\"whowill\", all_schemas=True, verbosity=0) self.assertEqual( str(ctx.exception),", "all_schemas=True, verbosity=0) self.assertEqual( str(ctx.exception), \"Error while attempting to retrieve dynamic", "a handled ProgrammingError by migrating models from empty database. \"\"\"", "django.core.management.base import CommandError from django.db import models from django.db.utils import", "def tearDownClass(cls): for tenant in TenantModel.objects.all(): tenant.delete(force_drop=True) @patch(\"django_pgschemas.management.commands.get_tenant_model\", patched_get_tenant_model) def", "Provoke a handled ProgrammingError by running tenant command with pending", "MigrateIgnoringExcludedSchemasTestCase(TransactionTestCase): @classmethod def setUpClass(cls): tenant1 = TenantModel(schema_name=\"tenant1\") tenant1.save(verbosity=0) @classmethod def", "# We finally apply the migration again with fake #", "# We first unapply a migration with fake so we", "model being registered twice with self.assertRaises(CommandError) as ctx: management.call_command(\"whowill\", all_schemas=True,", "**kwargs): class TenantModel(TenantMixin): dummy = models.TextField() class Meta: app_label =", "from django.apps import apps from django.core import management from django.core.management.base", "def patched_get_tenant_model(*args, **kwargs): class TenantModel(TenantMixin): dummy = models.TextField() class Meta:", "empty database. \"\"\" def test_database_checks_with_zero_migrations(self): management.call_command(\"migrate\", \"shared_public\", \"zero\", verbosity=0) #", "that the next line doesn't raise ProgrammingError check_schema_names(apps.get_app_config(\"django_pgschemas\")) management.call_command(\"migrate\", verbosity=0)", "goal is that the next line doesn't raise ProgrammingError check_schema_names(apps.get_app_config(\"django_pgschemas\"))", "django_pgschemas.models import TenantMixin from django_pgschemas.utils import get_tenant_model TenantModel = get_tenant_model()", "dummy = models.TextField() class Meta: app_label = get_tenant_model()._meta.app_label return TenantModel", "\"shared_public\", \"zero\", verbosity=0) # The goal is that the next", "a handled ProgrammingError by running tenant command with pending model", "patched_get_tenant_model) def test_whowill_with_pending_migrations(self): with warnings.catch_warnings(): warnings.simplefilter(\"ignore\") # Avoid warnings about", "from django_pgschemas.checks import check_schema_names from django_pgschemas.models import TenantMixin from django_pgschemas.utils", "ProgrammingError by migrating models from empty database. \"\"\" def test_database_checks_with_zero_migrations(self):", "test_migrate_with_exclusions(self): # We first unapply a migration with fake so", "a migration with fake so we can reapply it without", "errors management.call_command(\"migrate\", \"app_tenants\", \"0001_initial\", fake=True, schemas=[\"tenant1\"], verbosity=0) # We then", "import TenantMixin from django_pgschemas.utils import get_tenant_model TenantModel = get_tenant_model() def", "apply the migration again with fake # This should work", "class UnappliedMigrationTestCase(TransactionTestCase): \"\"\" Provoke a handled ProgrammingError by running tenant", "\"app_tenants\", \"0001_initial\", fake=True, schemas=[\"tenant1\"], verbosity=0) # We then migrate on", "\"\"\" def test_database_checks_with_zero_migrations(self): management.call_command(\"migrate\", \"shared_public\", \"zero\", verbosity=0) # The goal", "with warnings.catch_warnings(): warnings.simplefilter(\"ignore\") # Avoid warnings about model being registered", "django_pgschemas.utils import get_tenant_model TenantModel = get_tenant_model() def patched_get_tenant_model(*args, **kwargs): class", "unapply a migration with fake so we can reapply it", "apps from django.core import management from django.core.management.base import CommandError from", "from django.core.management.base import CommandError from django.db import models from django.db.utils", "ProgrammingError check_schema_names(apps.get_app_config(\"django_pgschemas\")) management.call_command(\"migrate\", verbosity=0) @tag(\"bug\") class UnappliedMigrationTestCase(TransactionTestCase): \"\"\" Provoke a", "tenant in TenantModel.objects.all(): tenant.delete(force_drop=True) @patch(\"django_pgschemas.management.commands.get_tenant_model\", patched_get_tenant_model) def test_whowill_with_pending_migrations(self): with warnings.catch_warnings():", "first unapply a migration with fake so we can reapply" ]
[ "component to transform the input examples. The Transform component wraps", "Necessary iff multiple transform components are declared in the same", "the preprocessing_fn from input module file, preprocess both 'train' and", "schema: A Channel of type `standard_artifacts.Schema`. This should contain a", "is deprecated. Please update your ' 'usage as support for", "The file path to a python module file, from which", "2.0 (the \"License\"); # you may not use this file", "the Transform component has ' 'been renamed to \"examples\" and", "preprocessing_fn from input module file, preprocess both 'train' and 'eval'", "absolute_import from __future__ import division from __future__ import print_function from", "and is deprecated. Please update your ' 'usage as support", "the input examples. The Transform component wraps TensorFlow Transform (tf.Transform)", "== bool(preprocessing_fn): raise ValueError( \"Exactly one of 'module_file' or 'preprocessing_fn'", "' 'been renamed to \"examples\" and is deprecated. Please update", "Optional[types.Channel] = None, transformed_examples: Optional[types.Channel] = None, input_data: Optional[types.Channel] =", "output 'TransformPath' channel for output of 'tf.Transform', which includes an", "an exported Tensorflow graph suitable for both training and serving;", "the License. \"\"\"TFX Transform component definition.\"\"\" from __future__ import absolute_import", "If not specified, defaults to the value specified for pipeline's", "type=standard_artifacts.Examples, artifacts=[example_artifact]) spec = TransformSpec( examples=examples, schema=schema, module_file=module_file, preprocessing_fn=preprocessing_fn, transform_graph=transform_graph,", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "from input module file, preprocess both 'train' and 'eval' splits", "the function. Exactly one of 'module_file' or 'preprocessing_fn' must be", "exported Tensorflow graph suitable for both training and serving; transformed_examples:", "bool(preprocessing_fn): raise ValueError( \"Exactly one of 'module_file' or 'preprocessing_fn' must", "Copyright 2019 Google LLC. All Rights Reserved. # # Licensed", "will be removed soon.') examples = input_data if bool(module_file) ==", "python module file, from which the 'preprocessing_fn' function will be", "Text, Union import absl from tfx import types from tfx.components.base", "preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]: ... where the values", "the 'preprocessing_fn' function will be loaded. The function must have", "be supplied. transform_graph: Optional output 'TransformPath' channel for output of", "transformed_examples: Optional output 'ExamplesPath' channel for materialized transformed examples, which", "LLC. All Rights Reserved. # # Licensed under the Apache", "declared in the same pipeline. enable_cache: Optional boolean to indicate", "support for this argument will be removed soon.') examples =", "import TransformSpec class Transform(base_component.BaseComponent): \"\"\"A TFX component to transform the", "``` Please see https://www.tensorflow.org/tfx/transform for more details. \"\"\" SPEC_CLASS =", "provided in the `module_file` file to train the model. The", "python2, python3 # Copyright 2019 Google LLC. All Rights Reserved.", "use this file except in compliance with the License. #", "have the following signature. def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text,", "includes both 'train' and 'eval' splits. input_data: Backwards compatibility alias", "is enabled for the Transform component. If not specified, defaults", "'module_file' or 'preprocessing_fn' must be supplied. transform_graph: Optional output 'TransformPath'", "import base_component from tfx.components.base import executor_spec from tfx.components.transform import executor", "Optional unique instance name. Necessary iff multiple transform components are", "be loaded. The function must have the following signature. def", "transform_graph: Optional output 'TransformPath' channel for output of 'tf.Transform', which", "import data_types from tfx.types import artifact from tfx.types import artifact_utils", "'examples' argument. instance_name: Optional unique instance name. Necessary iff multiple", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "License. # You may obtain a copy of the License", "= None, transform_graph: Optional[types.Channel] = None, transformed_examples: Optional[types.Channel] = None,", "path to a python module file, from which the 'preprocessing_fn'", "transformed_examples = types.Channel( type=standard_artifacts.Examples, artifacts=[example_artifact]) spec = TransformSpec( examples=examples, schema=schema,", "a single schema artifact. module_file: The file path to a", "that file. An example of `preprocessing_fn()` can be found in", "`preprocessing_fn()` can be found in the [user-supplied code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py)) of the", "of the TFX Chicago Taxi pipeline example. ## Example ```", "serving. transform = Transform( examples=example_gen.outputs['examples'], schema=infer_schema.outputs['schema'], module_file=module_file) ``` Please see", "[user-supplied code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py)) of the TFX Chicago Taxi pipeline example. ##", "signature of the function. Exactly one of 'module_file' or 'preprocessing_fn'", "under the License is distributed on an \"AS IS\" BASIS,", "preprocessing_fn: Optional[Union[Text, data_types.RuntimeParameter]] = None, transform_graph: Optional[types.Channel] = None, transformed_examples:", "'been renamed to \"examples\" and is deprecated. Please update your", "preprocessing_fn: The path to python function that implements a 'preprocessing_fn'.", "\"\"\"TFX Transform component definition.\"\"\" from __future__ import absolute_import from __future__", "expected signature of the function. Exactly one of 'module_file' or", "License for the specific language governing permissions and # limitations", "argument to the Transform component has ' 'been renamed to", "types.Channel( type=standard_artifacts.TransformGraph, artifacts=[standard_artifacts.TransformGraph()]) if not transformed_examples: example_artifact = standard_artifacts.Examples() example_artifact.split_names", "to indicate if cache is enabled for the Transform component.", "of 'module_file' and 'preprocessing_fn' is supplied. \"\"\" if input_data: absl.logging.warning(", "python3 # Copyright 2019 Google LLC. All Rights Reserved. #", "Reserved. # # Licensed under the Apache License, Version 2.0", "'tf.Transform', which includes an exported Tensorflow graph suitable for both", "standard_artifacts.Examples() example_artifact.split_names = artifact_utils.encode_split_names( artifact.DEFAULT_EXAMPLE_SPLITS) transformed_examples = types.Channel( type=standard_artifacts.Examples, artifacts=[example_artifact])", "Transform component. Args: examples: A Channel of type `standard_artifacts.Examples` (required).", "data_types.RuntimeParameter]] = None, preprocessing_fn: Optional[Union[Text, data_types.RuntimeParameter]] = None, transform_graph: Optional[types.Channel]", "Optional boolean to indicate if cache is enabled for the", "of 'module_file' or 'preprocessing_fn' must be supplied.\" ) transform_graph =", "or 'preprocessing_fn' must be supplied. preprocessing_fn: The path to python", "'preprocessing_fn' is supplied. \"\"\" if input_data: absl.logging.warning( 'The \"input_data\" argument", "generate the `tf.Transform` output, and save both transform function and", "= None, instance_name: Optional[Text] = None, enable_cache: Optional[bool] = None):", "tfx.components.transform import executor from tfx.orchestration import data_types from tfx.types import", "License. \"\"\"TFX Transform component definition.\"\"\" from __future__ import absolute_import from", "and 'eval'. schema: A Channel of type `standard_artifacts.Schema`. This should", "TFX pipeline. This component will load the preprocessing_fn from input", "not specified, defaults to the value specified for pipeline's enable_cache", "training and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=infer_schema.outputs['schema'], module_file=module_file) ```", "from __future__ import absolute_import from __future__ import division from __future__", "data in a TFX pipeline. This component will load the", "\"\"\"Construct a Transform component. Args: examples: A Channel of type", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "splits. input_data: Backwards compatibility alias for the 'examples' argument. instance_name:", "your ' 'usage as support for this argument will be", "or 'preprocessing_fn' must be supplied. transform_graph: Optional output 'TransformPath' channel", "the model. The Transform executor will look specifically for the", "following signature. def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]: ...", "# Lint as: python2, python3 # Copyright 2019 Google LLC.", "= None, input_data: Optional[types.Channel] = None, instance_name: Optional[Text] = None,", "compatibility alias for the 'examples' argument. instance_name: Optional unique instance", "\"Exactly one of 'module_file' or 'preprocessing_fn' must be supplied.\" )", "of 'tf.Transform', which includes an exported Tensorflow graph suitable for", "not transformed_examples: example_artifact = standard_artifacts.Examples() example_artifact.split_names = artifact_utils.encode_split_names( artifact.DEFAULT_EXAMPLE_SPLITS) transformed_examples", "'eval' splits. input_data: Backwards compatibility alias for the 'examples' argument.", "a 'preprocessing_fn'. See 'module_file' for expected signature of the function.", "preprocess data in a TFX pipeline. This component will load", "name. Necessary iff multiple transform components are declared in the", "None, preprocessing_fn: Optional[Union[Text, data_types.RuntimeParameter]] = None, transform_graph: Optional[types.Channel] = None,", "transformed_examples: Optional[types.Channel] = None, input_data: Optional[types.Channel] = None, instance_name: Optional[Text]", "renamed to \"examples\" and is deprecated. Please update your '", "input examples. The Transform component wraps TensorFlow Transform (tf.Transform) to", "channel for materialized transformed examples, which includes both 'train' and", "the Transform component. If not specified, defaults to the value", "artifact.DEFAULT_EXAMPLE_SPLITS) transformed_examples = types.Channel( type=standard_artifacts.Examples, artifacts=[example_artifact]) spec = TransformSpec( examples=examples,", "# limitations under the License. \"\"\"TFX Transform component definition.\"\"\" from", "None, module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None, preprocessing_fn: Optional[Union[Text, data_types.RuntimeParameter]] =", "Tensorflow graph suitable for both training and serving; transformed_examples: Optional", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "where the values of input and returned Dict are either", "executor_spec.ExecutorClassSpec(executor.Executor) def __init__( self, examples: types.Channel = None, schema: types.Channel", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "if input_data: absl.logging.warning( 'The \"input_data\" argument to the Transform component", "= Transform( examples=example_gen.outputs['examples'], schema=infer_schema.outputs['schema'], module_file=module_file) ``` Please see https://www.tensorflow.org/tfx/transform for", "absl.logging.warning( 'The \"input_data\" argument to the Transform component has '", ") transform_graph = transform_graph or types.Channel( type=standard_artifacts.TransformGraph, artifacts=[standard_artifacts.TransformGraph()]) if not", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "None, instance_name: Optional[Text] = None, enable_cache: Optional[bool] = None): \"\"\"Construct", "to in writing, software # distributed under the License is", "will use the estimator provided in the `module_file` file to", "the values of input and returned Dict are either tf.Tensor", "Lint as: python2, python3 # Copyright 2019 Google LLC. All", "transform components are declared in the same pipeline. enable_cache: Optional", "`standard_artifacts.Schema`. This should contain a single schema artifact. module_file: The", "types.Channel( type=standard_artifacts.Examples, artifacts=[example_artifact]) spec = TransformSpec( examples=examples, schema=schema, module_file=module_file, preprocessing_fn=preprocessing_fn,", "# See the License for the specific language governing permissions", "input module file, preprocess both 'train' and 'eval' splits of", "limitations under the License. \"\"\"TFX Transform component definition.\"\"\" from __future__", "transform = Transform( examples=example_gen.outputs['examples'], schema=infer_schema.outputs['schema'], module_file=module_file) ``` Please see https://www.tensorflow.org/tfx/transform", "Optional[Text] = None, enable_cache: Optional[bool] = None): \"\"\"Construct a Transform", "Exactly one of 'module_file' or 'preprocessing_fn' must be supplied. preprocessing_fn:", "input_data: Optional[types.Channel] = None, instance_name: Optional[Text] = None, enable_cache: Optional[bool]", "supplied. transform_graph: Optional output 'TransformPath' channel for output of 'tf.Transform',", "or agreed to in writing, software # distributed under the", "from __future__ import division from __future__ import print_function from typing", "tfx.types import standard_artifacts from tfx.types.standard_component_specs import TransformSpec class Transform(base_component.BaseComponent): \"\"\"A", "if not transformed_examples: example_artifact = standard_artifacts.Examples() example_artifact.split_names = artifact_utils.encode_split_names( artifact.DEFAULT_EXAMPLE_SPLITS)", "required by applicable law or agreed to in writing, software", "as support for this argument will be removed soon.') examples", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "should contain a single schema artifact. module_file: The file path", "is supplied. \"\"\" if input_data: absl.logging.warning( 'The \"input_data\" argument to", "and 'preprocessing_fn' is supplied. \"\"\" if input_data: absl.logging.warning( 'The \"input_data\"", "with the License. # You may obtain a copy of", "function that implements a 'preprocessing_fn'. See 'module_file' for expected signature", "has ' 'been renamed to \"examples\" and is deprecated. Please", "Taxi pipeline example. ## Example ``` # Performs transformations and", "None, input_data: Optional[types.Channel] = None, instance_name: Optional[Text] = None, enable_cache:", "\"\"\" if input_data: absl.logging.warning( 'The \"input_data\" argument to the Transform", "An example of `preprocessing_fn()` can be found in the [user-supplied", "estimator provided in the `module_file` file to train the model.", "EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor) def __init__( self, examples: types.Channel = None,", "tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn' must be supplied.", "artifact_utils.encode_split_names( artifact.DEFAULT_EXAMPLE_SPLITS) transformed_examples = types.Channel( type=standard_artifacts.Examples, artifacts=[example_artifact]) spec = TransformSpec(", "splits of input examples, generate the `tf.Transform` output, and save", "within that file. An example of `preprocessing_fn()` can be found", "component. Args: examples: A Channel of type `standard_artifacts.Examples` (required). This", "and returned Dict are either tf.Tensor or tf.SparseTensor. Exactly one", "more details. \"\"\" SPEC_CLASS = TransformSpec EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor) def", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "examples=example_gen.outputs['examples'], schema=infer_schema.outputs['schema'], module_file=module_file) ``` Please see https://www.tensorflow.org/tfx/transform for more details.", "one of 'module_file' or 'preprocessing_fn' must be supplied. transform_graph: Optional", "Args: examples: A Channel of type `standard_artifacts.Examples` (required). This should", "Transform(base_component.BaseComponent): \"\"\"A TFX component to transform the input examples. The", "loaded. The function must have the following signature. def preprocessing_fn(inputs:", "ValueError( \"Exactly one of 'module_file' or 'preprocessing_fn' must be supplied.\"", "distributed under the License is distributed on an \"AS IS\"", "can be found in the [user-supplied code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py)) of the TFX", "function within that file. An example of `preprocessing_fn()` can be", "the two splits 'train' and 'eval'. schema: A Channel of", "implements a 'preprocessing_fn'. See 'module_file' for expected signature of the", "bool(module_file) == bool(preprocessing_fn): raise ValueError( \"Exactly one of 'module_file' or", "the same pipeline. enable_cache: Optional boolean to indicate if cache", "__init__( self, examples: types.Channel = None, schema: types.Channel = None,", "are declared in the same pipeline. enable_cache: Optional boolean to", "will be loaded. The function must have the following signature.", "are either tf.Tensor or tf.SparseTensor. Exactly one of 'module_file' or", "https://www.tensorflow.org/tfx/transform for more details. \"\"\" SPEC_CLASS = TransformSpec EXECUTOR_SPEC =", "express or implied. # See the License for the specific", "supplied. preprocessing_fn: The path to python function that implements a", "except in compliance with the License. # You may obtain", "Transform( examples=example_gen.outputs['examples'], schema=infer_schema.outputs['schema'], module_file=module_file) ``` Please see https://www.tensorflow.org/tfx/transform for more", "' 'usage as support for this argument will be removed", "input_data if bool(module_file) == bool(preprocessing_fn): raise ValueError( \"Exactly one of", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "must be supplied. preprocessing_fn: The path to python function that", "boolean to indicate if cache is enabled for the Transform", "\"\"\"A TFX component to transform the input examples. The Transform", "= artifact_utils.encode_split_names( artifact.DEFAULT_EXAMPLE_SPLITS) transformed_examples = types.Channel( type=standard_artifacts.Examples, artifacts=[example_artifact]) spec =", "tfx.orchestration import data_types from tfx.types import artifact from tfx.types import", "in training and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=infer_schema.outputs['schema'], module_file=module_file)", "writing, software # distributed under the License is distributed on", "from tfx.types.standard_component_specs import TransformSpec class Transform(base_component.BaseComponent): \"\"\"A TFX component to", "for expected signature of the function. Exactly one of 'module_file'", "you may not use this file except in compliance with", "... where the values of input and returned Dict are", "be supplied.\" ) transform_graph = transform_graph or types.Channel( type=standard_artifacts.TransformGraph, artifacts=[standard_artifacts.TransformGraph()])", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "pipeline. This component will load the preprocessing_fn from input module", "'train' and 'eval' splits of input examples, generate the `tf.Transform`", "artifact from tfx.types import artifact_utils from tfx.types import standard_artifacts from", "executor will use the estimator provided in the `module_file` file", "two splits 'train' and 'eval'. schema: A Channel of type", "This component will load the preprocessing_fn from input module file,", "parameter. Raises: ValueError: When both or neither of 'module_file' and", "CONDITIONS OF ANY KIND, either express or implied. # See", "data_types from tfx.types import artifact from tfx.types import artifact_utils from", "orchestrator desired locations. ## Providing a preprocessing function The TFX", "single schema artifact. module_file: The file path to a python", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "a TFX pipeline. This component will load the preprocessing_fn from", "materialized transformed examples, which includes both 'train' and 'eval' splits.", "look specifically for the `preprocessing_fn()` function within that file. An", "schema=schema, module_file=module_file, preprocessing_fn=preprocessing_fn, transform_graph=transform_graph, transformed_examples=transformed_examples) super(Transform, self).__init__( spec=spec, instance_name=instance_name, enable_cache=enable_cache)", "import division from __future__ import print_function from typing import Optional,", "from which the 'preprocessing_fn' function will be loaded. The function", "from tfx.components.base import executor_spec from tfx.components.transform import executor from tfx.orchestration", "class Transform(base_component.BaseComponent): \"\"\"A TFX component to transform the input examples.", "'module_file' or 'preprocessing_fn' must be supplied. preprocessing_fn: The path to", "the `module_file` file to train the model. The Transform executor", "base_component from tfx.components.base import executor_spec from tfx.components.transform import executor from", "must be supplied. transform_graph: Optional output 'TransformPath' channel for output", "Optional[bool] = None): \"\"\"Construct a Transform component. Args: examples: A", "values of input and returned Dict are either tf.Tensor or", "## Providing a preprocessing function The TFX executor will use", "pipeline's enable_cache parameter. Raises: ValueError: When both or neither of", "argument will be removed soon.') examples = input_data if bool(module_file)", "artifact_utils from tfx.types import standard_artifacts from tfx.types.standard_component_specs import TransformSpec class", "the estimator provided in the `module_file` file to train the", "removed soon.') examples = input_data if bool(module_file) == bool(preprocessing_fn): raise", "to a python module file, from which the 'preprocessing_fn' function", "graph suitable for both training and serving; transformed_examples: Optional output", "wraps TensorFlow Transform (tf.Transform) to preprocess data in a TFX", "import Optional, Text, Union import absl from tfx import types", "iff multiple transform components are declared in the same pipeline.", "permissions and # limitations under the License. \"\"\"TFX Transform component", "= None, preprocessing_fn: Optional[Union[Text, data_types.RuntimeParameter]] = None, transform_graph: Optional[types.Channel] =", "use the estimator provided in the `module_file` file to train", "'The \"input_data\" argument to the Transform component has ' 'been", "OR CONDITIONS OF ANY KIND, either express or implied. #", "Transform executor will look specifically for the `preprocessing_fn()` function within", "examples=examples, schema=schema, module_file=module_file, preprocessing_fn=preprocessing_fn, transform_graph=transform_graph, transformed_examples=transformed_examples) super(Transform, self).__init__( spec=spec, instance_name=instance_name,", "both 'train' and 'eval' splits. input_data: Backwards compatibility alias for", "transform function and transformed examples to orchestrator desired locations. ##", "the License is distributed on an \"AS IS\" BASIS, #", "__future__ import print_function from typing import Optional, Text, Union import", "import artifact_utils from tfx.types import standard_artifacts from tfx.types.standard_component_specs import TransformSpec", "``` # Performs transformations and feature engineering in training and", "or 'preprocessing_fn' must be supplied.\" ) transform_graph = transform_graph or", "tfx.components.base import executor_spec from tfx.components.transform import executor from tfx.orchestration import", "`tf.Transform` output, and save both transform function and transformed examples", "Optional, Text, Union import absl from tfx import types from", "alias for the 'examples' argument. instance_name: Optional unique instance name.", "argument. instance_name: Optional unique instance name. Necessary iff multiple transform", "both or neither of 'module_file' and 'preprocessing_fn' is supplied. \"\"\"", "'module_file' and 'preprocessing_fn' is supplied. \"\"\" if input_data: absl.logging.warning( 'The", "TFX executor will use the estimator provided in the `module_file`", "`module_file` file to train the model. The Transform executor will", "multiple transform components are declared in the same pipeline. enable_cache:", "'preprocessing_fn' must be supplied. transform_graph: Optional output 'TransformPath' channel for", "= None): \"\"\"Construct a Transform component. Args: examples: A Channel", "A Channel of type `standard_artifacts.Schema`. This should contain a single", "types from tfx.components.base import base_component from tfx.components.base import executor_spec from", "Any]: ... where the values of input and returned Dict", "to the value specified for pipeline's enable_cache parameter. Raises: ValueError:", "law or agreed to in writing, software # distributed under", "model. The Transform executor will look specifically for the `preprocessing_fn()`", "and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=infer_schema.outputs['schema'], module_file=module_file) ``` Please", "import artifact from tfx.types import artifact_utils from tfx.types import standard_artifacts", "None, enable_cache: Optional[bool] = None): \"\"\"Construct a Transform component. Args:", "Dict are either tf.Tensor or tf.SparseTensor. Exactly one of 'module_file'", "update your ' 'usage as support for this argument will", "returned Dict are either tf.Tensor or tf.SparseTensor. Exactly one of", "for more details. \"\"\" SPEC_CLASS = TransformSpec EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)", "import executor_spec from tfx.components.transform import executor from tfx.orchestration import data_types", "output 'ExamplesPath' channel for materialized transformed examples, which includes both", "schema artifact. module_file: The file path to a python module", "import standard_artifacts from tfx.types.standard_component_specs import TransformSpec class Transform(base_component.BaseComponent): \"\"\"A TFX", "transform_graph: Optional[types.Channel] = None, transformed_examples: Optional[types.Channel] = None, input_data: Optional[types.Channel]", "This should contain a single schema artifact. module_file: The file", "indicate if cache is enabled for the Transform component. If", "components are declared in the same pipeline. enable_cache: Optional boolean", "will load the preprocessing_fn from input module file, preprocess both", "function will be loaded. The function must have the following", "this argument will be removed soon.') examples = input_data if", "be supplied. preprocessing_fn: The path to python function that implements", "TFX component to transform the input examples. The Transform component", "channel for output of 'tf.Transform', which includes an exported Tensorflow", "the 'examples' argument. instance_name: Optional unique instance name. Necessary iff", "The Transform executor will look specifically for the `preprocessing_fn()` function", "may obtain a copy of the License at # #", "to \"examples\" and is deprecated. Please update your ' 'usage", "examples to orchestrator desired locations. ## Providing a preprocessing function", "either tf.Tensor or tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn'", "Exactly one of 'module_file' or 'preprocessing_fn' must be supplied. transform_graph:", "language governing permissions and # limitations under the License. \"\"\"TFX", "that implements a 'preprocessing_fn'. See 'module_file' for expected signature of", "example_artifact = standard_artifacts.Examples() example_artifact.split_names = artifact_utils.encode_split_names( artifact.DEFAULT_EXAMPLE_SPLITS) transformed_examples = types.Channel(", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "examples: types.Channel = None, schema: types.Channel = None, module_file: Optional[Union[Text,", "details. \"\"\" SPEC_CLASS = TransformSpec EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor) def __init__(", "a python module file, from which the 'preprocessing_fn' function will", "for the 'examples' argument. instance_name: Optional unique instance name. Necessary", "may not use this file except in compliance with the", "Chicago Taxi pipeline example. ## Example ``` # Performs transformations", "path to python function that implements a 'preprocessing_fn'. See 'module_file'", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "from typing import Optional, Text, Union import absl from tfx", "executor from tfx.orchestration import data_types from tfx.types import artifact from", "Backwards compatibility alias for the 'examples' argument. instance_name: Optional unique", "this file except in compliance with the License. # You", "division from __future__ import print_function from typing import Optional, Text,", "cache is enabled for the Transform component. If not specified,", "input examples, generate the `tf.Transform` output, and save both transform", "import absolute_import from __future__ import division from __future__ import print_function", "file. An example of `preprocessing_fn()` can be found in the", "schema: types.Channel = None, module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None, preprocessing_fn:", "if cache is enabled for the Transform component. If not", "tf.Tensor or tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn' must", "input_data: absl.logging.warning( 'The \"input_data\" argument to the Transform component has", "load the preprocessing_fn from input module file, preprocess both 'train'", "Transform component. If not specified, defaults to the value specified", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "to preprocess data in a TFX pipeline. This component will", "in the [user-supplied code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py)) of the TFX Chicago Taxi pipeline", "locations. ## Providing a preprocessing function The TFX executor will", "# # Licensed under the Apache License, Version 2.0 (the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "artifacts=[example_artifact]) spec = TransformSpec( examples=examples, schema=schema, module_file=module_file, preprocessing_fn=preprocessing_fn, transform_graph=transform_graph, transformed_examples=transformed_examples)", "example. ## Example ``` # Performs transformations and feature engineering", "The Transform component wraps TensorFlow Transform (tf.Transform) to preprocess data", "\"\"\" SPEC_CLASS = TransformSpec EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor) def __init__( self,", "example_artifact.split_names = artifact_utils.encode_split_names( artifact.DEFAULT_EXAMPLE_SPLITS) transformed_examples = types.Channel( type=standard_artifacts.Examples, artifacts=[example_artifact]) spec", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "defaults to the value specified for pipeline's enable_cache parameter. Raises:", "or neither of 'module_file' and 'preprocessing_fn' is supplied. \"\"\" if", "Performs transformations and feature engineering in training and serving. transform", "one of 'module_file' or 'preprocessing_fn' must be supplied.\" ) transform_graph", "must be supplied.\" ) transform_graph = transform_graph or types.Channel( type=standard_artifacts.TransformGraph,", "Dict[Text, Any]) -> Dict[Text, Any]: ... where the values of", "of input and returned Dict are either tf.Tensor or tf.SparseTensor.", "governing permissions and # limitations under the License. \"\"\"TFX Transform", "same pipeline. enable_cache: Optional boolean to indicate if cache is", "of type `standard_artifacts.Schema`. This should contain a single schema artifact.", "supplied.\" ) transform_graph = transform_graph or types.Channel( type=standard_artifacts.TransformGraph, artifacts=[standard_artifacts.TransformGraph()]) if", "executor will look specifically for the `preprocessing_fn()` function within that", "desired locations. ## Providing a preprocessing function The TFX executor", "'module_file' or 'preprocessing_fn' must be supplied.\" ) transform_graph = transform_graph", "must have the following signature. def preprocessing_fn(inputs: Dict[Text, Any]) ->", "= None, module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None, preprocessing_fn: Optional[Union[Text, data_types.RuntimeParameter]]", "pipeline. enable_cache: Optional boolean to indicate if cache is enabled", "component will load the preprocessing_fn from input module file, preprocess", "both training and serving; transformed_examples: Optional output 'ExamplesPath' channel for", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "TransformSpec( examples=examples, schema=schema, module_file=module_file, preprocessing_fn=preprocessing_fn, transform_graph=transform_graph, transformed_examples=transformed_examples) super(Transform, self).__init__( spec=spec,", "\"input_data\" argument to the Transform component has ' 'been renamed", "engineering in training and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=infer_schema.outputs['schema'],", "preprocess both 'train' and 'eval' splits of input examples, generate", "or implied. # See the License for the specific language", "Rights Reserved. # # Licensed under the Apache License, Version", "Channel of type `standard_artifacts.Schema`. This should contain a single schema", "found in the [user-supplied code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py)) of the TFX Chicago Taxi", "transform the input examples. The Transform component wraps TensorFlow Transform", "should contain the two splits 'train' and 'eval'. schema: A", "which includes an exported Tensorflow graph suitable for both training", "'train' and 'eval' splits. input_data: Backwards compatibility alias for the", "examples: A Channel of type `standard_artifacts.Examples` (required). This should contain", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "None, transformed_examples: Optional[types.Channel] = None, input_data: Optional[types.Channel] = None, instance_name:", "feature engineering in training and serving. transform = Transform( examples=example_gen.outputs['examples'],", "tfx.types.standard_component_specs import TransformSpec class Transform(base_component.BaseComponent): \"\"\"A TFX component to transform", "the following signature. def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]:", "specified, defaults to the value specified for pipeline's enable_cache parameter.", "= None, enable_cache: Optional[bool] = None): \"\"\"Construct a Transform component.", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "TransformSpec EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor) def __init__( self, examples: types.Channel =", "'preprocessing_fn' must be supplied.\" ) transform_graph = transform_graph or types.Channel(", "specified for pipeline's enable_cache parameter. Raises: ValueError: When both or", "suitable for both training and serving; transformed_examples: Optional output 'ExamplesPath'", "the `preprocessing_fn()` function within that file. An example of `preprocessing_fn()`", "contain a single schema artifact. module_file: The file path to", "(the \"License\"); # you may not use this file except", "'train' and 'eval'. schema: A Channel of type `standard_artifacts.Schema`. This", "# you may not use this file except in compliance", "serving; transformed_examples: Optional output 'ExamplesPath' channel for materialized transformed examples,", "tfx.types import artifact from tfx.types import artifact_utils from tfx.types import", "'ExamplesPath' channel for materialized transformed examples, which includes both 'train'", "the value specified for pipeline's enable_cache parameter. Raises: ValueError: When", "output of 'tf.Transform', which includes an exported Tensorflow graph suitable", "contain the two splits 'train' and 'eval'. schema: A Channel", "Optional[Union[Text, data_types.RuntimeParameter]] = None, transform_graph: Optional[types.Channel] = None, transformed_examples: Optional[types.Channel]", "spec = TransformSpec( examples=examples, schema=schema, module_file=module_file, preprocessing_fn=preprocessing_fn, transform_graph=transform_graph, transformed_examples=transformed_examples) super(Transform,", "# # Unless required by applicable law or agreed to", "both 'train' and 'eval' splits of input examples, generate the", "schema=infer_schema.outputs['schema'], module_file=module_file) ``` Please see https://www.tensorflow.org/tfx/transform for more details. \"\"\"", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]: ... where the", "value specified for pipeline's enable_cache parameter. Raises: ValueError: When both", "in a TFX pipeline. This component will load the preprocessing_fn", "or tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn' must be", "for materialized transformed examples, which includes both 'train' and 'eval'", "Version 2.0 (the \"License\"); # you may not use this", "Providing a preprocessing function The TFX executor will use the", "type `standard_artifacts.Schema`. This should contain a single schema artifact. module_file:", "(required). This should contain the two splits 'train' and 'eval'.", "and 'eval' splits. input_data: Backwards compatibility alias for the 'examples'", "transformed examples, which includes both 'train' and 'eval' splits. input_data:", "to python function that implements a 'preprocessing_fn'. See 'module_file' for", "= TransformSpec EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor) def __init__( self, examples: types.Channel", "import absl from tfx import types from tfx.components.base import base_component", "None, schema: types.Channel = None, module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None,", "transformed_examples: example_artifact = standard_artifacts.Examples() example_artifact.split_names = artifact_utils.encode_split_names( artifact.DEFAULT_EXAMPLE_SPLITS) transformed_examples =", "raise ValueError( \"Exactly one of 'module_file' or 'preprocessing_fn' must be", "Google LLC. All Rights Reserved. # # Licensed under the", "instance_name: Optional[Text] = None, enable_cache: Optional[bool] = None): \"\"\"Construct a", "Raises: ValueError: When both or neither of 'module_file' and 'preprocessing_fn'", "from tfx.types import artifact_utils from tfx.types import standard_artifacts from tfx.types.standard_component_specs", "'TransformPath' channel for output of 'tf.Transform', which includes an exported", "of the function. Exactly one of 'module_file' or 'preprocessing_fn' must", "to train the model. The Transform executor will look specifically", "transform_graph = transform_graph or types.Channel( type=standard_artifacts.TransformGraph, artifacts=[standard_artifacts.TransformGraph()]) if not transformed_examples:", "__future__ import absolute_import from __future__ import division from __future__ import", "tfx.components.base import base_component from tfx.components.base import executor_spec from tfx.components.transform import", "under the License. \"\"\"TFX Transform component definition.\"\"\" from __future__ import", "'eval'. schema: A Channel of type `standard_artifacts.Schema`. This should contain", "python function that implements a 'preprocessing_fn'. See 'module_file' for expected", "implied. # See the License for the specific language governing", "TensorFlow Transform (tf.Transform) to preprocess data in a TFX pipeline.", "under the Apache License, Version 2.0 (the \"License\"); # you", "for pipeline's enable_cache parameter. Raises: ValueError: When both or neither", "examples, generate the `tf.Transform` output, and save both transform function", "definition.\"\"\" from __future__ import absolute_import from __future__ import division from", "from __future__ import print_function from typing import Optional, Text, Union", "Transform component wraps TensorFlow Transform (tf.Transform) to preprocess data in", "function. Exactly one of 'module_file' or 'preprocessing_fn' must be supplied.", "from tfx.orchestration import data_types from tfx.types import artifact from tfx.types", "module file, from which the 'preprocessing_fn' function will be loaded.", "`preprocessing_fn()` function within that file. An example of `preprocessing_fn()` can", "code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py)) of the TFX Chicago Taxi pipeline example. ## Example", "unique instance name. Necessary iff multiple transform components are declared", "by applicable law or agreed to in writing, software #", "self, examples: types.Channel = None, schema: types.Channel = None, module_file:", "Please see https://www.tensorflow.org/tfx/transform for more details. \"\"\" SPEC_CLASS = TransformSpec", "2019 Google LLC. All Rights Reserved. # # Licensed under", "types.Channel = None, schema: types.Channel = None, module_file: Optional[Union[Text, data_types.RuntimeParameter]]", "= standard_artifacts.Examples() example_artifact.split_names = artifact_utils.encode_split_names( artifact.DEFAULT_EXAMPLE_SPLITS) transformed_examples = types.Channel( type=standard_artifacts.Examples,", "file, preprocess both 'train' and 'eval' splits of input examples,", "a Transform component. Args: examples: A Channel of type `standard_artifacts.Examples`", "Union import absl from tfx import types from tfx.components.base import", "of type `standard_artifacts.Examples` (required). This should contain the two splits", "examples. The Transform component wraps TensorFlow Transform (tf.Transform) to preprocess", "enabled for the Transform component. If not specified, defaults to", "which the 'preprocessing_fn' function will be loaded. The function must", "enable_cache: Optional[bool] = None): \"\"\"Construct a Transform component. Args: examples:", "examples, which includes both 'train' and 'eval' splits. input_data: Backwards", "Transform component definition.\"\"\" from __future__ import absolute_import from __future__ import", "to the Transform component has ' 'been renamed to \"examples\"", "The function must have the following signature. def preprocessing_fn(inputs: Dict[Text,", "for the Transform component. If not specified, defaults to the", "a preprocessing function The TFX executor will use the estimator", "as: python2, python3 # Copyright 2019 Google LLC. All Rights", "'preprocessing_fn'. See 'module_file' for expected signature of the function. Exactly", "to transform the input examples. The Transform component wraps TensorFlow", "the `tf.Transform` output, and save both transform function and transformed", "neither of 'module_file' and 'preprocessing_fn' is supplied. \"\"\" if input_data:", "specifically for the `preprocessing_fn()` function within that file. An example", "'eval' splits of input examples, generate the `tf.Transform` output, and", "The TFX executor will use the estimator provided in the", "Example ``` # Performs transformations and feature engineering in training", "A Channel of type `standard_artifacts.Examples` (required). This should contain the", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "TransformSpec class Transform(base_component.BaseComponent): \"\"\"A TFX component to transform the input", "Unless required by applicable law or agreed to in writing,", "component. If not specified, defaults to the value specified for", "will look specifically for the `preprocessing_fn()` function within that file.", "and serving; transformed_examples: Optional output 'ExamplesPath' channel for materialized transformed", "artifact. module_file: The file path to a python module file,", "## Example ``` # Performs transformations and feature engineering in", "file path to a python module file, from which the", "file, from which the 'preprocessing_fn' function will be loaded. The", "instance name. Necessary iff multiple transform components are declared in", "enable_cache: Optional boolean to indicate if cache is enabled for", "import executor from tfx.orchestration import data_types from tfx.types import artifact", "the specific language governing permissions and # limitations under the", "TFX Chicago Taxi pipeline example. ## Example ``` # Performs", "supplied. \"\"\" if input_data: absl.logging.warning( 'The \"input_data\" argument to the", "which includes both 'train' and 'eval' splits. input_data: Backwards compatibility", "applicable law or agreed to in writing, software # distributed", "example of `preprocessing_fn()` can be found in the [user-supplied code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))", "absl from tfx import types from tfx.components.base import base_component from", "enable_cache parameter. Raises: ValueError: When both or neither of 'module_file'", "train the model. The Transform executor will look specifically for", "executor_spec from tfx.components.transform import executor from tfx.orchestration import data_types from", "for output of 'tf.Transform', which includes an exported Tensorflow graph", "'usage as support for this argument will be removed soon.')", "When both or neither of 'module_file' and 'preprocessing_fn' is supplied.", "None, transform_graph: Optional[types.Channel] = None, transformed_examples: Optional[types.Channel] = None, input_data:", "= executor_spec.ExecutorClassSpec(executor.Executor) def __init__( self, examples: types.Channel = None, schema:", "one of 'module_file' or 'preprocessing_fn' must be supplied. preprocessing_fn: The", "the [user-supplied code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py)) of the TFX Chicago Taxi pipeline example.", "types.Channel = None, module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None, preprocessing_fn: Optional[Union[Text,", "import types from tfx.components.base import base_component from tfx.components.base import executor_spec", "'preprocessing_fn' function will be loaded. The function must have the", "soon.') examples = input_data if bool(module_file) == bool(preprocessing_fn): raise ValueError(", "in writing, software # distributed under the License is distributed", "save both transform function and transformed examples to orchestrator desired", "= None, schema: types.Channel = None, module_file: Optional[Union[Text, data_types.RuntimeParameter]] =", "data_types.RuntimeParameter]] = None, transform_graph: Optional[types.Channel] = None, transformed_examples: Optional[types.Channel] =", "for this argument will be removed soon.') examples = input_data", "splits 'train' and 'eval'. schema: A Channel of type `standard_artifacts.Schema`.", "component has ' 'been renamed to \"examples\" and is deprecated.", "type `standard_artifacts.Examples` (required). This should contain the two splits 'train'", "signature. def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]: ... where", "transformations and feature engineering in training and serving. transform =", "import print_function from typing import Optional, Text, Union import absl", "examples = input_data if bool(module_file) == bool(preprocessing_fn): raise ValueError( \"Exactly", "file to train the model. The Transform executor will look", "for both training and serving; transformed_examples: Optional output 'ExamplesPath' channel", "be removed soon.') examples = input_data if bool(module_file) == bool(preprocessing_fn):", "'preprocessing_fn' must be supplied. preprocessing_fn: The path to python function", "be found in the [user-supplied code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py)) of the TFX Chicago", "deprecated. Please update your ' 'usage as support for this", "of 'module_file' or 'preprocessing_fn' must be supplied. transform_graph: Optional output", "'module_file' for expected signature of the function. Exactly one of", "(tf.Transform) to preprocess data in a TFX pipeline. This component", "from tfx import types from tfx.components.base import base_component from tfx.components.base", "if bool(module_file) == bool(preprocessing_fn): raise ValueError( \"Exactly one of 'module_file'", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None, preprocessing_fn: Optional[Union[Text, data_types.RuntimeParameter]] = None,", "License, Version 2.0 (the \"License\"); # you may not use", "None): \"\"\"Construct a Transform component. Args: examples: A Channel of", "input and returned Dict are either tf.Tensor or tf.SparseTensor. Exactly", "# You may obtain a copy of the License at", "Optional output 'ExamplesPath' channel for materialized transformed examples, which includes", "Transform (tf.Transform) to preprocess data in a TFX pipeline. This", "preprocessing function The TFX executor will use the estimator provided", "from tfx.types import artifact from tfx.types import artifact_utils from tfx.types", "\"examples\" and is deprecated. Please update your ' 'usage as", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "from tfx.components.base import base_component from tfx.components.base import executor_spec from tfx.components.transform", "of 'module_file' or 'preprocessing_fn' must be supplied. preprocessing_fn: The path", "from tfx.components.transform import executor from tfx.orchestration import data_types from tfx.types", "ValueError: When both or neither of 'module_file' and 'preprocessing_fn' is", "function and transformed examples to orchestrator desired locations. ## Providing", "Dict[Text, Any]: ... where the values of input and returned", "= input_data if bool(module_file) == bool(preprocessing_fn): raise ValueError( \"Exactly one", "Optional output 'TransformPath' channel for output of 'tf.Transform', which includes", "input_data: Backwards compatibility alias for the 'examples' argument. instance_name: Optional", "The path to python function that implements a 'preprocessing_fn'. See", "-> Dict[Text, Any]: ... where the values of input and", "both transform function and transformed examples to orchestrator desired locations.", "Any]) -> Dict[Text, Any]: ... where the values of input", "= None, transformed_examples: Optional[types.Channel] = None, input_data: Optional[types.Channel] = None,", "the License for the specific language governing permissions and #", "def __init__( self, examples: types.Channel = None, schema: types.Channel =", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "of `preprocessing_fn()` can be found in the [user-supplied code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py)) of", "= types.Channel( type=standard_artifacts.Examples, artifacts=[example_artifact]) spec = TransformSpec( examples=examples, schema=schema, module_file=module_file,", "Optional[types.Channel] = None, instance_name: Optional[Text] = None, enable_cache: Optional[bool] =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "and 'eval' splits of input examples, generate the `tf.Transform` output,", "`standard_artifacts.Examples` (required). This should contain the two splits 'train' and", "in the `module_file` file to train the model. The Transform", "see https://www.tensorflow.org/tfx/transform for more details. \"\"\" SPEC_CLASS = TransformSpec EXECUTOR_SPEC", "transform_graph or types.Channel( type=standard_artifacts.TransformGraph, artifacts=[standard_artifacts.TransformGraph()]) if not transformed_examples: example_artifact =", "pipeline example. ## Example ``` # Performs transformations and feature", "typing import Optional, Text, Union import absl from tfx import", "from tfx.types import standard_artifacts from tfx.types.standard_component_specs import TransformSpec class Transform(base_component.BaseComponent):", "# Performs transformations and feature engineering in training and serving.", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the TFX Chicago Taxi pipeline example. ## Example ``` #", "of input examples, generate the `tf.Transform` output, and save both", "= TransformSpec( examples=examples, schema=schema, module_file=module_file, preprocessing_fn=preprocessing_fn, transform_graph=transform_graph, transformed_examples=transformed_examples) super(Transform, self).__init__(", "for the `preprocessing_fn()` function within that file. An example of", "Optional[Union[Text, data_types.RuntimeParameter]] = None, preprocessing_fn: Optional[Union[Text, data_types.RuntimeParameter]] = None, transform_graph:", "and feature engineering in training and serving. transform = Transform(", "print_function from typing import Optional, Text, Union import absl from", "artifacts=[standard_artifacts.TransformGraph()]) if not transformed_examples: example_artifact = standard_artifacts.Examples() example_artifact.split_names = artifact_utils.encode_split_names(", "This should contain the two splits 'train' and 'eval'. schema:", "instance_name: Optional unique instance name. Necessary iff multiple transform components", "module_file=module_file) ``` Please see https://www.tensorflow.org/tfx/transform for more details. \"\"\" SPEC_CLASS", "\"License\"); # you may not use this file except in", "= transform_graph or types.Channel( type=standard_artifacts.TransformGraph, artifacts=[standard_artifacts.TransformGraph()]) if not transformed_examples: example_artifact", "Channel of type `standard_artifacts.Examples` (required). This should contain the two", "in the same pipeline. enable_cache: Optional boolean to indicate if", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "standard_artifacts from tfx.types.standard_component_specs import TransformSpec class Transform(base_component.BaseComponent): \"\"\"A TFX component", "and save both transform function and transformed examples to orchestrator", "module_file: The file path to a python module file, from", "includes an exported Tensorflow graph suitable for both training and", "Please update your ' 'usage as support for this argument", "# distributed under the License is distributed on an \"AS", "<filename>tfx/components/transform/component.py # Lint as: python2, python3 # Copyright 2019 Google", "type=standard_artifacts.TransformGraph, artifacts=[standard_artifacts.TransformGraph()]) if not transformed_examples: example_artifact = standard_artifacts.Examples() example_artifact.split_names =", "# Unless required by applicable law or agreed to in", "or types.Channel( type=standard_artifacts.TransformGraph, artifacts=[standard_artifacts.TransformGraph()]) if not transformed_examples: example_artifact = standard_artifacts.Examples()", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "tfx import types from tfx.components.base import base_component from tfx.components.base import", "and # limitations under the License. \"\"\"TFX Transform component definition.\"\"\"", "tfx.types import artifact_utils from tfx.types import standard_artifacts from tfx.types.standard_component_specs import", "and transformed examples to orchestrator desired locations. ## Providing a", "training and serving; transformed_examples: Optional output 'ExamplesPath' channel for materialized", "You may obtain a copy of the License at #", "__future__ import division from __future__ import print_function from typing import", "output, and save both transform function and transformed examples to", "function The TFX executor will use the estimator provided in", "SPEC_CLASS = TransformSpec EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor) def __init__( self, examples:", "Transform component has ' 'been renamed to \"examples\" and is", "transformed examples to orchestrator desired locations. ## Providing a preprocessing", "component definition.\"\"\" from __future__ import absolute_import from __future__ import division", "# Copyright 2019 Google LLC. All Rights Reserved. # #", "See 'module_file' for expected signature of the function. Exactly one", "Optional[types.Channel] = None, input_data: Optional[types.Channel] = None, instance_name: Optional[Text] =", "the Apache License, Version 2.0 (the \"License\"); # you may", "component wraps TensorFlow Transform (tf.Transform) to preprocess data in a", "to orchestrator desired locations. ## Providing a preprocessing function The", "module file, preprocess both 'train' and 'eval' splits of input", "function must have the following signature. def preprocessing_fn(inputs: Dict[Text, Any])" ]
[ "number ino = None # The object type, either b1000", "= None # Group ID of owner gid = None", "a tuple (seconds, nanoseconds) ctime = None # The last", "None flag_assume_valid = None flag_extended = None flag_stage = None", "hex string object = None flag_assume_valid = None flag_extended =", "time a file's metadata changed. This is a tuple (seconds,", "gid = None # Size of this object in bytes", "None flag_extended = None flag_stage = None # Length of", "mode_type = None # The object permissions as an integer", "# The object's hash as a hex string object =", "class GitIndexEntry(object): # The last time a file's metadata changed.", "# The object permissions as an integer mode_permissions = None", "device containing this file dev = None # The file's", "None # Length of the name if < OxFFF, -1", "changed. This is a tuple (seconds, nanoseconds) ctime = None", "mtime = None # the ID of device containing this", "if < OxFFF, -1 otherwise flag_name_length = None name =", "# https://github.com/git/git/blob/master/Documentation/technical/index-format.txt class GitIndexEntry(object): # The last time a file's", "b1000 (regular), b1010 (symlink), b1110 (gitlink) mode_type = None #", "file dev = None # The file's inode number ino", "= None # The file's inode number ino = None", "The object type, either b1000 (regular), b1010 (symlink), b1110 (gitlink)", "containing this file dev = None # The file's inode", "file's metadata changed. This is a tuple (seconds, nanoseconds) ctime", "b1010 (symlink), b1110 (gitlink) mode_type = None # The object", "the name if < OxFFF, -1 otherwise flag_name_length = None", "https://github.com/git/git/blob/master/Documentation/technical/index-format.txt class GitIndexEntry(object): # The last time a file's metadata", "ID of device containing this file dev = None #", "as a hex string object = None flag_assume_valid = None", "# the ID of device containing this file dev =", "ino = None # The object type, either b1000 (regular),", "as an integer mode_permissions = None # User ID of", "Size of this object in bytes size = None #", "type, either b1000 (regular), b1010 (symlink), b1110 (gitlink) mode_type =", "integer mode_permissions = None # User ID of owner uui", "tuple (seconds, nanoseconds) mtime = None # the ID of", "Group ID of owner gid = None # Size of", "(symlink), b1110 (gitlink) mode_type = None # The object permissions", "data changed. This is a tuple (seconds, nanoseconds) mtime =", "last time a file's metadata changed. This is a tuple", "= None # User ID of owner uui = None", "ctime = None # The last time a file's data", "a hex string object = None flag_assume_valid = None flag_extended", "= None # The last time a file's data changed.", "The object permissions as an integer mode_permissions = None #", "# Group ID of owner gid = None # Size", "in bytes size = None # The object's hash as", "a file's data changed. This is a tuple (seconds, nanoseconds)", "This is a tuple (seconds, nanoseconds) ctime = None #", "object type, either b1000 (regular), b1010 (symlink), b1110 (gitlink) mode_type", "this file dev = None # The file's inode number", "file's inode number ino = None # The object type,", "None # Group ID of owner gid = None #", "mode_permissions = None # User ID of owner uui =", "= None # The object permissions as an integer mode_permissions", "ID of owner gid = None # Size of this", "is a tuple (seconds, nanoseconds) ctime = None # The", "The file's inode number ino = None # The object", "inode number ino = None # The object type, either", "time a file's data changed. This is a tuple (seconds,", "(regular), b1010 (symlink), b1110 (gitlink) mode_type = None # The", "nanoseconds) mtime = None # the ID of device containing", "uui = None # Group ID of owner gid =", "User ID of owner uui = None # Group ID", "None flag_stage = None # Length of the name if", "< OxFFF, -1 otherwise flag_name_length = None name = None", "The last time a file's metadata changed. This is a", "This is a tuple (seconds, nanoseconds) mtime = None #", "(seconds, nanoseconds) ctime = None # The last time a", "is a tuple (seconds, nanoseconds) mtime = None # the", "size = None # The object's hash as a hex", "None # The file's inode number ino = None #", "a tuple (seconds, nanoseconds) mtime = None # the ID", "flag_assume_valid = None flag_extended = None flag_stage = None #", "= None # Size of this object in bytes size", "flag_stage = None # Length of the name if <", "= None # The object's hash as a hex string", "string object = None flag_assume_valid = None flag_extended = None", "object permissions as an integer mode_permissions = None # User", "of owner gid = None # Size of this object", "of this object in bytes size = None # The", "# User ID of owner uui = None # Group", "bytes size = None # The object's hash as a", "None # The object's hash as a hex string object", "nanoseconds) ctime = None # The last time a file's", "None # Size of this object in bytes size =", "the ID of device containing this file dev = None", "GitIndexEntry(object): # The last time a file's metadata changed. This", "this object in bytes size = None # The object's", "hash as a hex string object = None flag_assume_valid =", "file's data changed. This is a tuple (seconds, nanoseconds) mtime", "# The file's inode number ino = None # The", "= None # The object type, either b1000 (regular), b1010", "The object's hash as a hex string object = None", "= None flag_assume_valid = None flag_extended = None flag_stage =", "Length of the name if < OxFFF, -1 otherwise flag_name_length", "= None flag_extended = None flag_stage = None # Length", "The last time a file's data changed. This is a", "flag_extended = None flag_stage = None # Length of the", "permissions as an integer mode_permissions = None # User ID", "tuple (seconds, nanoseconds) ctime = None # The last time", "b1110 (gitlink) mode_type = None # The object permissions as", "(seconds, nanoseconds) mtime = None # the ID of device", "of device containing this file dev = None # The", "# The last time a file's data changed. This is", "of the name if < OxFFF, -1 otherwise flag_name_length =", "owner gid = None # Size of this object in", "changed. This is a tuple (seconds, nanoseconds) mtime = None", "metadata changed. This is a tuple (seconds, nanoseconds) ctime =", "# The last time a file's metadata changed. This is", "None # User ID of owner uui = None #", "dev = None # The file's inode number ino =", "a file's metadata changed. This is a tuple (seconds, nanoseconds)", "either b1000 (regular), b1010 (symlink), b1110 (gitlink) mode_type = None", "owner uui = None # Group ID of owner gid", "ID of owner uui = None # Group ID of", "# Size of this object in bytes size = None", "None # The object permissions as an integer mode_permissions =", "(gitlink) mode_type = None # The object permissions as an", "= None # the ID of device containing this file", "of owner uui = None # Group ID of owner", "object = None flag_assume_valid = None flag_extended = None flag_stage", "None # The object type, either b1000 (regular), b1010 (symlink),", "# The object type, either b1000 (regular), b1010 (symlink), b1110", "= None # Length of the name if < OxFFF,", "# Length of the name if < OxFFF, -1 otherwise", "object in bytes size = None # The object's hash", "name if < OxFFF, -1 otherwise flag_name_length = None name", "= None flag_stage = None # Length of the name", "last time a file's data changed. This is a tuple", "None # The last time a file's data changed. This", "an integer mode_permissions = None # User ID of owner", "None # the ID of device containing this file dev", "object's hash as a hex string object = None flag_assume_valid" ]
[ "coding: utf-8 -*- \"\"\" Created on Wed Oct 13 14:47:13", "@author: huzongxiang \"\"\" import tensorflow as tf from tensorflow.keras import", "config.update({\"batch\": self.batch_size}) return config class PartitionPaddingPair(layers.Layer): def __init__(self, batch_size, **kwargs):", "= tf.where(tf.reduce_sum(features_padded, (1, 2)) != 0) nonempty_examples = tf.squeeze(nonempty_examples, axis=-1)", "[(0, max_num - n), (0, 0)]) for f, n in", "axis=0) return features_batch def get_config(self): config = super().get_config() config.update({\"batch_size\": self.batch_size})", "tensorflow.keras import layers class PartitionPadding(layers.Layer): def __init__(self, batch_size, **kwargs): super().__init__(**kwargs)", "for f in features] max_num = tf.reduce_max(num_features) features_padded = tf.stack(", "nonempty_examples = tf.unique(graph_indices)[0] features_batch = tf.gather(features_padded, nonempty_examples, axis=0) return features_batch", "layers class PartitionPadding(layers.Layer): def __init__(self, batch_size, **kwargs): super().__init__(**kwargs) self.batch_size =", "for f, n in zip(features, num_features) ], axis=0, ) #", "features_batch def get_config(self): config = super().get_config() config.update({\"batch\": self.batch_size}) return config", "f in features] max_num = tf.reduce_max(num_features) features_padded = tf.stack( [", "Remove empty subgraphs (usually for last batch) nonempty_examples = tf.unique(graph_indices)[0]", "features] max_num = tf.reduce_max(num_features) features_padded = tf.stack( [ tf.pad(f, [(0,", "call(self, inputs): features, graph_indices = inputs # Obtain subgraphs features", "= tf.gather(features_padded, nonempty_examples, axis=0) return features_batch def get_config(self): config =", "last batch) nonempty_examples = tf.unique(graph_indices)[0] features_batch = tf.gather(features_padded, nonempty_examples, axis=0)", "for last batch) nonempty_examples = tf.where(tf.reduce_sum(features_padded, (1, 2)) != 0)", "tf.pad(f, [(0, max_num - n), (0, 0)]) for f, n", "Pad and stack subgraphs num_features = [tf.shape(f)[0] for f in", "huzongxiang \"\"\" import tensorflow as tf from tensorflow.keras import layers", "subgraphs features = tf.dynamic_partition( features, graph_indices, self.batch_size ) # Pad", "= inputs # Obtain subgraphs features = tf.dynamic_partition( features, graph_indices,", "= tf.stack( [ tf.pad(f, [(0, max_num - n), (0, 0)])", "from tensorflow.keras import layers class PartitionPadding(layers.Layer): def __init__(self, batch_size, **kwargs):", "features = tf.dynamic_partition( features, graph_indices, self.batch_size ) # Pad and", "(usually for last batch) nonempty_examples = tf.where(tf.reduce_sum(features_padded, (1, 2)) !=", "= batch_size def call(self, inputs): features, graph_indices = inputs #", "def get_config(self): config = super().get_config() config.update({\"batch\": self.batch_size}) return config class", "[tf.shape(f)[0] for f in features] max_num = tf.reduce_max(num_features) features_padded =", "# -*- coding: utf-8 -*- \"\"\" Created on Wed Oct", "\"\"\" Created on Wed Oct 13 14:47:13 2021 @author: huzongxiang", "n), (0, 0)]) for f, n in zip(features, num_features) ],", "for last batch) nonempty_examples = tf.unique(graph_indices)[0] features_batch = tf.gather(features_padded, nonempty_examples,", "], axis=0, ) # Remove empty subgraphs (usually for last", "nonempty_examples, axis=0) return features_batch def get_config(self): config = super().get_config() config.update({\"batch\":", "last batch) nonempty_examples = tf.where(tf.reduce_sum(features_padded, (1, 2)) != 0) nonempty_examples", "import layers class PartitionPadding(layers.Layer): def __init__(self, batch_size, **kwargs): super().__init__(**kwargs) self.batch_size", "class PartitionPadding(layers.Layer): def __init__(self, batch_size, **kwargs): super().__init__(**kwargs) self.batch_size = batch_size", "features, graph_indices = inputs # Obtain subgraphs features = tf.dynamic_partition(", "and stack subgraphs num_features = [tf.shape(f)[0] for f in features]", "tf from tensorflow.keras import layers class PartitionPadding(layers.Layer): def __init__(self, batch_size,", "super().get_config() config.update({\"batch\": self.batch_size}) return config class PartitionPaddingPair(layers.Layer): def __init__(self, batch_size,", "config class PartitionPaddingPair(layers.Layer): def __init__(self, batch_size, **kwargs): super().__init__(**kwargs) self.batch_size =", "as tf from tensorflow.keras import layers class PartitionPadding(layers.Layer): def __init__(self,", "tf.stack( [ tf.pad(f, [(0, max_num - n), (0, 0)]) for", "# Pad and stack subgraphs num_features = [tf.shape(f)[0] for f", "inputs # Obtain subgraphs features = tf.dynamic_partition( features, graph_indices, self.batch_size", "nonempty_examples = tf.where(tf.reduce_sum(features_padded, (1, 2)) != 0) nonempty_examples = tf.squeeze(nonempty_examples,", "tensorflow as tf from tensorflow.keras import layers class PartitionPadding(layers.Layer): def", "= tf.reduce_max(num_features) features_padded = tf.stack( [ tf.pad(f, [(0, max_num -", "(0, 0)]) for f, n in zip(features, num_features) ], axis=0,", "self.batch_size ) # Pad and stack subgraphs num_features = [tf.shape(f)[0]", "batch) nonempty_examples = tf.where(tf.reduce_sum(features_padded, (1, 2)) != 0) nonempty_examples =", "= [tf.shape(f)[0] for f in features] max_num = tf.reduce_max(num_features) features_padded", "tf.squeeze(nonempty_examples, axis=-1) features_batch = tf.gather(features_padded, nonempty_examples, axis=0) return features_batch def", "Remove empty subgraphs (usually for last batch) nonempty_examples = tf.where(tf.reduce_sum(features_padded,", "return features_batch def get_config(self): config = super().get_config() config.update({\"batch\": self.batch_size}) return", "graph_indices = inputs # Obtain subgraphs features = tf.dynamic_partition( features,", "zip(features, num_features) ], axis=0, ) # Remove empty subgraphs (usually", "utf-8 -*- \"\"\" Created on Wed Oct 13 14:47:13 2021", "[ tf.pad(f, [(0, max_num - n), (0, 0)]) for f,", "0)]) for f, n in zip(features, num_features) ], axis=0, )", "features, graph_indices, self.batch_size ) # Pad and stack subgraphs num_features", "-*- coding: utf-8 -*- \"\"\" Created on Wed Oct 13", "config = super().get_config() config.update({\"batch\": self.batch_size}) return config class PartitionPaddingPair(layers.Layer): def", "class PartitionPaddingPair(layers.Layer): def __init__(self, batch_size, **kwargs): super().__init__(**kwargs) self.batch_size = batch_size", "in features] max_num = tf.reduce_max(num_features) features_padded = tf.stack( [ tf.pad(f,", "tf.where(tf.reduce_sum(features_padded, (1, 2)) != 0) nonempty_examples = tf.squeeze(nonempty_examples, axis=-1) features_batch", "- n), (0, 0)]) for f, n in zip(features, num_features)", "axis=0) return features_batch def get_config(self): config = super().get_config() config.update({\"batch\": self.batch_size})", "= super().get_config() config.update({\"batch\": self.batch_size}) return config class PartitionPaddingPair(layers.Layer): def __init__(self,", "subgraphs (usually for last batch) nonempty_examples = tf.where(tf.reduce_sum(features_padded, (1, 2))", "Wed Oct 13 14:47:13 2021 @author: huzongxiang \"\"\" import tensorflow", "batch) nonempty_examples = tf.unique(graph_indices)[0] features_batch = tf.gather(features_padded, nonempty_examples, axis=0) return", "!= 0) nonempty_examples = tf.squeeze(nonempty_examples, axis=-1) features_batch = tf.gather(features_padded, nonempty_examples,", "inputs): features, graph_indices = inputs # Obtain subgraphs features =", "return config class PartitionPaddingPair(layers.Layer): def __init__(self, batch_size, **kwargs): super().__init__(**kwargs) self.batch_size", "empty subgraphs (usually for last batch) nonempty_examples = tf.where(tf.reduce_sum(features_padded, (1,", "tf.reduce_max(num_features) features_padded = tf.stack( [ tf.pad(f, [(0, max_num - n),", "super().__init__(**kwargs) self.batch_size = batch_size def call(self, inputs): features, graph_indices =", "tf.dynamic_partition( features, graph_indices, self.batch_size ) # Pad and stack subgraphs", "(1, 2)) != 0) nonempty_examples = tf.squeeze(nonempty_examples, axis=-1) features_batch =", "0) nonempty_examples = tf.squeeze(nonempty_examples, axis=-1) features_batch = tf.gather(features_padded, nonempty_examples, axis=0)", "PartitionPaddingPair(layers.Layer): def __init__(self, batch_size, **kwargs): super().__init__(**kwargs) self.batch_size = batch_size def", "nonempty_examples = tf.squeeze(nonempty_examples, axis=-1) features_batch = tf.gather(features_padded, nonempty_examples, axis=0) return", "14:47:13 2021 @author: huzongxiang \"\"\" import tensorflow as tf from", "self.batch_size}) return config class PartitionPaddingPair(layers.Layer): def __init__(self, batch_size, **kwargs): super().__init__(**kwargs)", "Oct 13 14:47:13 2021 @author: huzongxiang \"\"\" import tensorflow as", "-*- \"\"\" Created on Wed Oct 13 14:47:13 2021 @author:", "batch_size def call(self, inputs): features, graph_indices = inputs # Obtain", "def call(self, inputs): features, graph_indices = inputs # Obtain subgraphs", "(usually for last batch) nonempty_examples = tf.unique(graph_indices)[0] features_batch = tf.gather(features_padded,", "features_padded = tf.stack( [ tf.pad(f, [(0, max_num - n), (0,", "get_config(self): config = super().get_config() config.update({\"batch\": self.batch_size}) return config class PartitionPaddingPair(layers.Layer):", "2021 @author: huzongxiang \"\"\" import tensorflow as tf from tensorflow.keras", ") # Remove empty subgraphs (usually for last batch) nonempty_examples", "on Wed Oct 13 14:47:13 2021 @author: huzongxiang \"\"\" import", "self.batch_size = batch_size def call(self, inputs): features, graph_indices = inputs", "tf.unique(graph_indices)[0] features_batch = tf.gather(features_padded, nonempty_examples, axis=0) return features_batch def get_config(self):", "return features_batch def get_config(self): config = super().get_config() config.update({\"batch_size\": self.batch_size}) return", "import tensorflow as tf from tensorflow.keras import layers class PartitionPadding(layers.Layer):", "f, n in zip(features, num_features) ], axis=0, ) # Remove", "subgraphs (usually for last batch) nonempty_examples = tf.unique(graph_indices)[0] features_batch =", "features_batch = tf.gather(features_padded, nonempty_examples, axis=0) return features_batch def get_config(self): config", "**kwargs): super().__init__(**kwargs) self.batch_size = batch_size def call(self, inputs): features, graph_indices", "# Obtain subgraphs features = tf.dynamic_partition( features, graph_indices, self.batch_size )", "n in zip(features, num_features) ], axis=0, ) # Remove empty", "subgraphs num_features = [tf.shape(f)[0] for f in features] max_num =", "= tf.squeeze(nonempty_examples, axis=-1) features_batch = tf.gather(features_padded, nonempty_examples, axis=0) return features_batch", "# Remove empty subgraphs (usually for last batch) nonempty_examples =", "empty subgraphs (usually for last batch) nonempty_examples = tf.unique(graph_indices)[0] features_batch", "= tf.unique(graph_indices)[0] features_batch = tf.gather(features_padded, nonempty_examples, axis=0) return features_batch def", "axis=-1) features_batch = tf.gather(features_padded, nonempty_examples, axis=0) return features_batch def get_config(self):", "num_features = [tf.shape(f)[0] for f in features] max_num = tf.reduce_max(num_features)", "nonempty_examples, axis=0) return features_batch def get_config(self): config = super().get_config() config.update({\"batch_size\":", "max_num = tf.reduce_max(num_features) features_padded = tf.stack( [ tf.pad(f, [(0, max_num", "graph_indices, self.batch_size ) # Pad and stack subgraphs num_features =", "def __init__(self, batch_size, **kwargs): super().__init__(**kwargs) self.batch_size = batch_size def call(self,", "= tf.dynamic_partition( features, graph_indices, self.batch_size ) # Pad and stack", ") # Pad and stack subgraphs num_features = [tf.shape(f)[0] for", "__init__(self, batch_size, **kwargs): super().__init__(**kwargs) self.batch_size = batch_size def call(self, inputs):", "in zip(features, num_features) ], axis=0, ) # Remove empty subgraphs", "batch_size, **kwargs): super().__init__(**kwargs) self.batch_size = batch_size def call(self, inputs): features,", "PartitionPadding(layers.Layer): def __init__(self, batch_size, **kwargs): super().__init__(**kwargs) self.batch_size = batch_size def", "Obtain subgraphs features = tf.dynamic_partition( features, graph_indices, self.batch_size ) #", "num_features) ], axis=0, ) # Remove empty subgraphs (usually for", "stack subgraphs num_features = [tf.shape(f)[0] for f in features] max_num", "Created on Wed Oct 13 14:47:13 2021 @author: huzongxiang \"\"\"", "tf.gather(features_padded, nonempty_examples, axis=0) return features_batch def get_config(self): config = super().get_config()", "13 14:47:13 2021 @author: huzongxiang \"\"\" import tensorflow as tf", "axis=0, ) # Remove empty subgraphs (usually for last batch)", "\"\"\" import tensorflow as tf from tensorflow.keras import layers class", "max_num - n), (0, 0)]) for f, n in zip(features,", "2)) != 0) nonempty_examples = tf.squeeze(nonempty_examples, axis=-1) features_batch = tf.gather(features_padded,", "features_batch def get_config(self): config = super().get_config() config.update({\"batch_size\": self.batch_size}) return config" ]
[ "+ \" (:memory:)\" # SITE = Site(globals(), title=Site.title+\" (:memory:)\") DATABASES['default']['NAME']", "from .demo import * SITE.verbose_name = SITE.verbose_name + \" (:memory:)\"", "<gh_stars>1-10 from .demo import * SITE.verbose_name = SITE.verbose_name + \"", "import * SITE.verbose_name = SITE.verbose_name + \" (:memory:)\" # SITE", "= SITE.verbose_name + \" (:memory:)\" # SITE = Site(globals(), title=Site.title+\"", "\" (:memory:)\" # SITE = Site(globals(), title=Site.title+\" (:memory:)\") DATABASES['default']['NAME'] =", "SITE.verbose_name + \" (:memory:)\" # SITE = Site(globals(), title=Site.title+\" (:memory:)\")", "(:memory:)\" # SITE = Site(globals(), title=Site.title+\" (:memory:)\") DATABASES['default']['NAME'] = ':memory:'", ".demo import * SITE.verbose_name = SITE.verbose_name + \" (:memory:)\" #", "* SITE.verbose_name = SITE.verbose_name + \" (:memory:)\" # SITE =", "SITE.verbose_name = SITE.verbose_name + \" (:memory:)\" # SITE = Site(globals()," ]
[ "django.urls import path from . import views app_name = 'reservation'", "from . import views app_name = 'reservation' urlpatterns = [", "views app_name = 'reservation' urlpatterns = [ path('', views.reserve_table, name", "app_name = 'reservation' urlpatterns = [ path('', views.reserve_table, name =", "'reservation' urlpatterns = [ path('', views.reserve_table, name = 'reserve_table'), ]", "= 'reservation' urlpatterns = [ path('', views.reserve_table, name = 'reserve_table'),", "import path from . import views app_name = 'reservation' urlpatterns", "from django.urls import path from . import views app_name =", "import views app_name = 'reservation' urlpatterns = [ path('', views.reserve_table,", ". import views app_name = 'reservation' urlpatterns = [ path('',", "path from . import views app_name = 'reservation' urlpatterns =" ]
[ "'iDeep', 'packages': [ 'ideep4py', ], 'specifier': '>=2.0.0.post3, <2.1', 'help': 'https://docs.chainer.org/en/latest/tips.html',", "__version__ = '7.8.0' _optional_dependencies = [ { 'name': 'CuPy', 'packages':", "_optional_dependencies = [ { 'name': 'CuPy', 'packages': [ 'cupy-cuda120', 'cupy-cuda114',", "'cupy-cuda100', 'cupy-cuda92', 'cupy-cuda91', 'cupy-cuda90', 'cupy-cuda80', 'cupy', ], 'specifier': '>=7.7.0,<8.0.0', 'help':", "'cupy-cuda91', 'cupy-cuda90', 'cupy-cuda80', 'cupy', ], 'specifier': '>=7.7.0,<8.0.0', 'help': 'https://docs.cupy.dev/en/latest/install.html', },", "'specifier': '>=7.7.0,<8.0.0', 'help': 'https://docs.cupy.dev/en/latest/install.html', }, { 'name': 'iDeep', 'packages': [", "= [ { 'name': 'CuPy', 'packages': [ 'cupy-cuda120', 'cupy-cuda114', 'cupy-cuda113',", "[ { 'name': 'CuPy', 'packages': [ 'cupy-cuda120', 'cupy-cuda114', 'cupy-cuda113', 'cupy-cuda112',", "[ 'ideep4py', ], 'specifier': '>=2.0.0.post3, <2.1', 'help': 'https://docs.chainer.org/en/latest/tips.html', }, ]", "'cupy-cuda120', 'cupy-cuda114', 'cupy-cuda113', 'cupy-cuda112', 'cupy-cuda111', 'cupy-cuda110', 'cupy-cuda102', 'cupy-cuda101', 'cupy-cuda100', 'cupy-cuda92',", "'cupy-cuda80', 'cupy', ], 'specifier': '>=7.7.0,<8.0.0', 'help': 'https://docs.cupy.dev/en/latest/install.html', }, { 'name':", "'cupy-cuda102', 'cupy-cuda101', 'cupy-cuda100', 'cupy-cuda92', 'cupy-cuda91', 'cupy-cuda90', 'cupy-cuda80', 'cupy', ], 'specifier':", "'packages': [ 'ideep4py', ], 'specifier': '>=2.0.0.post3, <2.1', 'help': 'https://docs.chainer.org/en/latest/tips.html', },", "'>=7.7.0,<8.0.0', 'help': 'https://docs.cupy.dev/en/latest/install.html', }, { 'name': 'iDeep', 'packages': [ 'ideep4py',", "'cupy-cuda110', 'cupy-cuda102', 'cupy-cuda101', 'cupy-cuda100', 'cupy-cuda92', 'cupy-cuda91', 'cupy-cuda90', 'cupy-cuda80', 'cupy', ],", "= '7.8.0' _optional_dependencies = [ { 'name': 'CuPy', 'packages': [", "[ 'cupy-cuda120', 'cupy-cuda114', 'cupy-cuda113', 'cupy-cuda112', 'cupy-cuda111', 'cupy-cuda110', 'cupy-cuda102', 'cupy-cuda101', 'cupy-cuda100',", "'7.8.0' _optional_dependencies = [ { 'name': 'CuPy', 'packages': [ 'cupy-cuda120',", "'cupy-cuda101', 'cupy-cuda100', 'cupy-cuda92', 'cupy-cuda91', 'cupy-cuda90', 'cupy-cuda80', 'cupy', ], 'specifier': '>=7.7.0,<8.0.0',", "'https://docs.cupy.dev/en/latest/install.html', }, { 'name': 'iDeep', 'packages': [ 'ideep4py', ], 'specifier':", "'cupy', ], 'specifier': '>=7.7.0,<8.0.0', 'help': 'https://docs.cupy.dev/en/latest/install.html', }, { 'name': 'iDeep',", "'help': 'https://docs.cupy.dev/en/latest/install.html', }, { 'name': 'iDeep', 'packages': [ 'ideep4py', ],", "'cupy-cuda111', 'cupy-cuda110', 'cupy-cuda102', 'cupy-cuda101', 'cupy-cuda100', 'cupy-cuda92', 'cupy-cuda91', 'cupy-cuda90', 'cupy-cuda80', 'cupy',", "}, { 'name': 'iDeep', 'packages': [ 'ideep4py', ], 'specifier': '>=2.0.0.post3,", "'cupy-cuda114', 'cupy-cuda113', 'cupy-cuda112', 'cupy-cuda111', 'cupy-cuda110', 'cupy-cuda102', 'cupy-cuda101', 'cupy-cuda100', 'cupy-cuda92', 'cupy-cuda91',", "{ 'name': 'CuPy', 'packages': [ 'cupy-cuda120', 'cupy-cuda114', 'cupy-cuda113', 'cupy-cuda112', 'cupy-cuda111',", "'cupy-cuda113', 'cupy-cuda112', 'cupy-cuda111', 'cupy-cuda110', 'cupy-cuda102', 'cupy-cuda101', 'cupy-cuda100', 'cupy-cuda92', 'cupy-cuda91', 'cupy-cuda90',", "'name': 'CuPy', 'packages': [ 'cupy-cuda120', 'cupy-cuda114', 'cupy-cuda113', 'cupy-cuda112', 'cupy-cuda111', 'cupy-cuda110',", "'cupy-cuda92', 'cupy-cuda91', 'cupy-cuda90', 'cupy-cuda80', 'cupy', ], 'specifier': '>=7.7.0,<8.0.0', 'help': 'https://docs.cupy.dev/en/latest/install.html',", "'cupy-cuda112', 'cupy-cuda111', 'cupy-cuda110', 'cupy-cuda102', 'cupy-cuda101', 'cupy-cuda100', 'cupy-cuda92', 'cupy-cuda91', 'cupy-cuda90', 'cupy-cuda80',", "{ 'name': 'iDeep', 'packages': [ 'ideep4py', ], 'specifier': '>=2.0.0.post3, <2.1',", "], 'specifier': '>=7.7.0,<8.0.0', 'help': 'https://docs.cupy.dev/en/latest/install.html', }, { 'name': 'iDeep', 'packages':", "'name': 'iDeep', 'packages': [ 'ideep4py', ], 'specifier': '>=2.0.0.post3, <2.1', 'help':", "'packages': [ 'cupy-cuda120', 'cupy-cuda114', 'cupy-cuda113', 'cupy-cuda112', 'cupy-cuda111', 'cupy-cuda110', 'cupy-cuda102', 'cupy-cuda101',", "'cupy-cuda90', 'cupy-cuda80', 'cupy', ], 'specifier': '>=7.7.0,<8.0.0', 'help': 'https://docs.cupy.dev/en/latest/install.html', }, {", "'CuPy', 'packages': [ 'cupy-cuda120', 'cupy-cuda114', 'cupy-cuda113', 'cupy-cuda112', 'cupy-cuda111', 'cupy-cuda110', 'cupy-cuda102'," ]
[ "out_img = resize_img(image, **kwargs) else: out_img, box_loc = resize_img(image, box_loc,", "0 if box_loc is None: return out_img else: return out_img,", "bbox, **default_args) #隨機選擇augmentation方法 aug_img = rand_aug_image(img, **default_args) #aug_img, bbox =", "= random_zoom_out(img, bbox, **default_args) #隨機選擇augmentation方法 aug_img = rand_aug_image(img, **default_args) #aug_img,", "box_loc[:,0] = box_loc[:,0] + dx box_loc[:,1] = box_loc[:,1] + dy", "# coding=UTF-8 # This Python file uses the following encoding:", "axis=-2), box_loc[:, 4:5]]) rot_box = np.floor(rot_box) rot_box[...,0:4] = np.clip(rot_box[...,0:4], [0,0,0,0],", "for func in take_func: if box_loc is None: out_img =", "out_img, box_loc = func(out_img, box_loc, **kwargs) if box_loc is None:", "value~1倍 def random_zoom_out(image, box_loc=None, **kwargs): h, w, _ = image.shape", "**default_args) #aug_img, bbox = resize_img(img, bbox, **default_args) #補形狀至指定大小 #aug_img =", "#aug_img = padding_img(aug_img, **default_args) #aug_img, bbox = padding_img(aug_img, bbox, **default_args)", "= image.shape[1] for i in box_loc: if i[2] == 0:", "= i[0], i[2] i[0] = w - x_max i[2] =", "out_img, box_loc.astype(np.int32) #隨機縮小 value~1倍 def random_zoom_out(image, box_loc=None, **kwargs): h, w,", "for i in box_loc: if i[3] == 0: break else:", "#aug_img = resize_img(img, **default_args) #aug_img, bbox = resize_img(img, bbox, **default_args)", "= np.clip(rot_box[...,0:4], [0,0,0,0], [w-1, h-1, w-1, h-1]) return out_img, rot_box", "/ 2) out_img = np.ones((kwargs['output_shape'][0], kwargs['output_shape'][1], 3), np.uint8) * kwargs['bordervalue'][0]", "data def load_csv(xml_path, max_boxes=4): tree = ET.parse(xml_path) root = tree.getroot()", "- h) / 2) out_img = np.ones(image.shape, np.uint8) * kwargs['bordervalue'][0]", "is None: return out_img else: box_loc[:,0] = box_loc[:,0] + dx", "= int((kwargs['output_shape'][1] - w) / 2) dy = int((kwargs['output_shape'][0] -", "**kwargs): h, w = image.shape[0:2] noise = np.random.rand(h,w) out_img =", "#aug_img = random_zoom_out(img, **default_args) #aug_img, bbox = random_zoom_out(img, bbox, **default_args)", "#水平翻轉 #aug_img = horizontal_flip(img, **default_args) #aug_img, bbox = horizontal_flip(img, bbox,", "bbox, **default_args) #等比例resize至指定大小 #aug_img = resize_img(img, **default_args) #aug_img, bbox =", "M, (w, h), borderValue = kwargs['bordervalue']) if box_loc is None:", "< 0] = 0 out_img[out_img > 1] = 1 out_img", "box_loc): for i in box_loc: cv2.rectangle(image, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])),", "cls not in classes or int(difficult) == 1: continue cls_id", "box_loc=None, **kwargs): h, w, _ = image.shape scale = np.random.uniform(kwargs['zoom_out_value'],", "127), 'zoom_out_value': 0.7, 'output_shape': (416, 416), 'take_value' : 5 }", "dy = int((image.shape[0] - h) / 2) out_img = np.ones(image.shape,", "'output_shape': (416, 416), 'take_value' : 5 } #添加黑色noise def sp_noise(image,", "loc_list = np.zeros((0, 5)) box_count = 0 for obj in", "w, _ = image.shape center = ( w // 2,", "axis=-1) loc = np.append(loc, loc[:, 3:4], axis=-1) loc = np.append(loc,", "= box_loc[:,1] * scale box_loc[:,2] = box_loc[:,2] * scale box_loc[:,3]", "3:4], axis=-1) loc = np.append(loc, loc[:, 2:3], axis=-1) loc =", "np.zeros((0, 5)) box_count = 0 for obj in root.iter('object'): if", "padding_img(aug_img, **default_args) #aug_img, bbox = padding_img(aug_img, bbox, **default_args) #隨機縮小 N~1倍", "* 255).astype(np.uint8) if box_loc is None: return out_img else: return", "root.iter('object'): if box_count >= max_boxes: break ''' difficult = obj.find('difficult').text", "= mod_hue(img, **default_args) #aug_img, bbox = mod_hue(img, bbox, **default_args) #調整saturation", "out_img, box_loc #水平翻轉 def horizontal_flip(image, box_loc=None, **kwargs): ''' Args: box_loc:", "+ noise + 0.5 out_img[out_img < 0] = 0 out_img[out_img", "return out_img, box_loc #水平翻轉 def horizontal_flip(image, box_loc=None, **kwargs): ''' Args:", "break else: y_min, y_max = i[1], i[3] i[1] = h", "return out_img, box_loc.astype(np.int32) #load csv data def load_csv(xml_path, max_boxes=4): tree", "rot_box #等比例縮放影像 def resize_img(image, box_loc=None, **kwargs): h, w, _ =", "dx: dx + w] = cv2.resize(image, (w, h)) if box_loc", "cv2.COLOR_HSV2BGR) if box_loc is None: return out_img else: return out_img,", "w - x_min return cv2.flip(image, 1), box_loc #垂直翻轉 def vertical_flip(image,", "box_loc[:,0] = box_loc[:,0] * scale + dx box_loc[:,1] = box_loc[:,1]", "#水平翻轉 def horizontal_flip(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding box", "3), np.uint8) * kwargs['bordervalue'][0] out_img[dy: dy + h, dx: dx", "= rot_image(img, **default_args) #aug_img, bbox = rot_image(img, bbox, **default_args) #等比例resize至指定大小", "cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,0] += np.random.randint(-kwargs['rand_hug'], kwargs['rand_hug']) out_img = cv2.cvtColor(np.clip(out_img, 0,", "dx box_loc[:,1] = box_loc[:,1] * scale + dy box_loc[:,2] =", "loc_list = np.vstack([loc_list, np.array([x_min, y_min, x_max, y_max, 0])]) box_count +=", "location(num box,(x_min, y_min, x_max, y_max, label)) rot: 要選轉的範圍 bordervalue: 空白處補的值", "box_loc[:,3] = box_loc[:,3] * scale + dy return out_img, box_loc.astype(np.int32)", "min( max_edge / h, max_edge / w) h = int(h", "x_max, y_max) ''' if box_loc is None: return cv2.flip(image, 1)", "is None: return out_img else: loc = box_loc[:,0:4].copy() loc =", "for i in box_loc: if i[2] == 0: break else:", "y_min = int(loc.find('ymin').text) x_max = int(loc.find('xmax').text) y_max = int(loc.find('ymax').text) loc_list", "#調整light #aug_img = mod_light(img, **default_args) #aug_img, bbox = mod_light(img, bbox,", "#aug_img, bbox = vertical_flip(img, bbox, **default_args) #旋轉角度 #aug_img = rot_image(img,", "dx + w] = cv2.resize(image, (w, h)) if box_loc is", "scale box_loc[:,1] = box_loc[:,1] * scale box_loc[:,2] = box_loc[:,2] *", "return cv2.flip(image, 1), box_loc #垂直翻轉 def vertical_flip(image, box_loc=None, **kwargs): '''", "'zoom_out_value': 0.7, 'output_shape': (416, 416), 'take_value' : 5 } #添加黑色noise", "np.floor(rot_box) rot_box[...,0:4] = np.clip(rot_box[...,0:4], [0,0,0,0], [w-1, h-1, w-1, h-1]) return", "cv2.flip(image, 0) else: h = image.shape[0] for i in box_loc:", "image.shape[0] for i in box_loc: if i[3] == 0: break", "y_min, x_max, y_max) ''' if box_loc is None: return cv2.flip(image,", "max_edge = max(kwargs['output_shape'][0], kwargs['output_shape'][1]) scale = min( max_edge / h,", "* scale + dx box_loc[:,1] = box_loc[:,1] * scale +", "box_loc=None, **kwargs): h, w = image.shape[0:2] noise = np.random.rand(h,w) out_img", "scale) w = int(w * scale) if box_loc is None:", "if box_loc is None: return cv2.resize(image, (w, h)) else: box_loc[:,0]", "= np.random.normal(kwargs['gasuss_mean'], kwargs['gasuss_var']** 0.5, image.shape) out_img = out_img + noise", "tree = ET.parse(xml_path) root = tree.getroot() #location list loc_list =", "ET.parse(xml_path) root = tree.getroot() #location list loc_list = np.zeros((0, 5))", "- 0.5 noise = np.random.normal(kwargs['gasuss_mean'], kwargs['gasuss_var']** 0.5, image.shape) out_img =", "out_img else: return out_img, box_loc #高斯noise def gasuss_noise(image, box_loc=None, **kwargs):", "for obj in root.iter('object'): if box_count >= max_boxes: break '''", "// 2, h // 2) angle = np.random.randint(-kwargs['rot_angle'], kwargs['rot_angle']) M", "out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,1] += np.random.randint(-kwargs['rand_saturation'], kwargs['rand_saturation']) out_img =", "y_min, x_max, y_max, label)) rot: 要選轉的範圍 bordervalue: 空白處補的值 ''' h,", "4, 2) loc = loc - np.array(center) rot_loc = loc.dot(np.transpose(M[:,0:2]))", "np.clip(rot_box[...,0:4], [0,0,0,0], [w-1, h-1, w-1, h-1]) return out_img, rot_box #等比例縮放影像", "continue cls_id = classes.index(cls) ''' loc = obj.find('bndbox') x_min =", "#aug_img = mod_saturation(img, **default_args) #aug_img, bbox = mod_saturation(img, bbox, **default_args)", "bbox, **default_args) #gasuss_noise #aug_img = gasuss_noise(img, **default_args) #aug_img, bbox =", "255.) - 0.5 noise = np.random.normal(kwargs['gasuss_mean'], kwargs['gasuss_var']** 0.5, image.shape) out_img", "w - x_max i[2] = w - x_min return cv2.flip(image,", "#aug_img = horizontal_flip(img, **default_args) #aug_img, bbox = horizontal_flip(img, bbox, **default_args)", "== 0: break else: y_min, y_max = i[1], i[3] i[1]", "int(i[3])), (0, 255, 0), 4) def print_args(**kwargs): for key, value", "**kwargs): h, w, _ = image.shape dx = int((kwargs['output_shape'][1] -", "scale box_loc[:,3] = box_loc[:,3] * scale return cv2.resize(image, (w, h)),", "cv2.flip(image, 1) else: w = image.shape[1] for i in box_loc:", "= cv2.resize(image, (w, h)) if box_loc is None: return out_img", "box_loc[:,1] = box_loc[:,1] * scale box_loc[:,2] = box_loc[:,2] * scale", "classes or int(difficult) == 1: continue cls_id = classes.index(cls) '''", "bbox = resize_img(img, bbox, **default_args) #補形狀至指定大小 #aug_img = padding_img(aug_img, **default_args)", "is None: return out_img else: return out_img, box_loc #調整飽和度(飽和度通道加上隨機-N~N之值) def", "'rot_angle': 15, 'bordervalue': (127, 127, 127), 'zoom_out_value': 0.7, 'output_shape': (416,", "box_loc is None: return cv2.flip(image, 1) else: w = image.shape[1]", "**default_args) #gasuss_noise #aug_img = gasuss_noise(img, **default_args) #aug_img, bbox = gasuss_noise(img,", "* scale) if box_loc is None: return cv2.resize(image, (w, h))", "out_img, box_loc = resize_img(image, box_loc, **kwargs) #total augmentation function func_list", "box_loc #調整彩度(彩度通道加上隨機-N~N之值) def mod_hue(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32)", "mod_light(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,2] += np.random.randint(-kwargs['rand_light'],", "1 return loc_list.astype(np.float32) #draw rectangle def draw_rect(image, box_loc): for i", "mod_hue(img, bbox, **default_args) #調整saturation #aug_img = mod_saturation(img, **default_args) #aug_img, bbox", "i[1], i[3] i[1] = h - y_max i[3] = h", "else: return out_img, box_loc #水平翻轉 def horizontal_flip(image, box_loc=None, **kwargs): '''", "= int(w * scale) if box_loc is None: return cv2.resize(image,", "kwargs['bordervalue'][0] out_img[dy: dy + h, dx: dx + w] =", "0 out_img[out_img > 1] = 1 out_img = (out_img *", "w, _ = image.shape scale = np.random.uniform(kwargs['zoom_out_value'], 1) h =", "out_img = (image / 255.) - 0.5 noise = np.random.normal(kwargs['gasuss_mean'],", "// 2) angle = np.random.randint(-kwargs['rot_angle'], kwargs['rot_angle']) M = cv2.getRotationMatrix2D(center, angle,", "scale) if box_loc is None: return cv2.resize(image, (w, h)) else:", "box_loc[:,1] * scale + dy box_loc[:,2] = box_loc[:,2] * scale", "30, 'rot_angle': 15, 'bordervalue': (127, 127, 127), 'zoom_out_value': 0.7, 'output_shape':", "(w, h)) else: box_loc[:,0] = box_loc[:,0] * scale box_loc[:,1] =", "box_loc[:,3] * scale return cv2.resize(image, (w, h)), box_loc.astype(np.int32) #將樸片補至指定大小 def", "127, 127), 'zoom_out_value': 0.7, 'output_shape': (416, 416), 'take_value' : 5", "= resize_img(image, box_loc, **kwargs) #total augmentation function func_list = [sp_noise,", "take_func = sample(func_list, np.random.randint(kwargs['take_value'])) for func in take_func: if box_loc", "is None: return out_img else: return out_img, box_loc #高斯noise def", "box_loc[:,1] = box_loc[:,1] * scale + dy box_loc[:,2] = box_loc[:,2]", "bbox = rand_aug_image(img, bbox, **default_args) print(bbox) draw_rect(aug_img, bbox) cv2.imshow('img', img)", "= ( w // 2, h // 2) angle =", "mod_saturation, mod_light, horizontal_flip, vertical_flip, rot_image, random_zoom_out] #rand take function take_func", "dy + h, dx: dx + w] = cv2.resize(image, (w,", "return out_img else: return out_img, box_loc #高斯noise def gasuss_noise(image, box_loc=None,", "1) else: w = image.shape[1] for i in box_loc: if", "= image.shape[0] for i in box_loc: if i[3] == 0:", "* scale) w = int(w * scale) if box_loc is", "int(i[1])), (int(i[2]), int(i[3])), (0, 255, 0), 4) def print_args(**kwargs): for", "value)) #隨機選擇0~N個 image augmentation方法 def rand_aug_image(image, box_loc=None, **kwargs): if box_loc", "**default_args) #aug_img, bbox = mod_saturation(img, bbox, **default_args) #調整light #aug_img =", "#高斯noise def gasuss_noise(image, box_loc=None, **kwargs): out_img = (image / 255.)", "None: out_img = func(out_img, **kwargs) else: out_img, box_loc = func(out_img,", "= horizontal_flip(img, **default_args) #aug_img, bbox = horizontal_flip(img, bbox, **default_args) #垂直翻轉", "= max(kwargs['output_shape'][0], kwargs['output_shape'][1]) scale = min( max_edge / h, max_edge", "w = int(w * scale) if box_loc is None: return", "horizontal_flip, vertical_flip, rot_image, random_zoom_out] #rand take function take_func = sample(func_list,", "cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,1] += np.random.randint(-kwargs['rand_saturation'], kwargs['rand_saturation']) out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8),", "resize_img(image, **kwargs) else: out_img, box_loc = resize_img(image, box_loc, **kwargs) #total", "= sample(func_list, np.random.randint(kwargs['take_value'])) for func in take_func: if box_loc is", "x_max = i[0], i[2] i[0] = w - x_max i[2]", "**default_args) #補形狀至指定大小 #aug_img = padding_img(aug_img, **default_args) #aug_img, bbox = padding_img(aug_img,", "return out_img, box_loc #調整亮度(亮度通道加上隨機-N~N之值) def mod_light(image, box_loc=None, **kwargs): out_img =", "cv2.warpAffine(image, M, (w, h), borderValue = kwargs['bordervalue']) if box_loc is", "box location(num box,(x_min, y_min, x_max, y_max, label)) rot: 要選轉的範圍 bordervalue:", "mod_saturation(img, bbox, **default_args) #調整light #aug_img = mod_light(img, **default_args) #aug_img, bbox", "out_img[:,:,0] += np.random.randint(-kwargs['rand_hug'], kwargs['rand_hug']) out_img = cv2.cvtColor(np.clip(out_img, 0, 180).astype(np.uint8), cv2.COLOR_HSV2BGR)", "= cv2.cvtColor(np.clip(out_img, 0, 180).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is None: return", "box_loc[:, 4:5]]) rot_box = np.floor(rot_box) rot_box[...,0:4] = np.clip(rot_box[...,0:4], [0,0,0,0], [w-1,", "box_loc, **kwargs) if box_loc is None: out_img = padding_img(out_img, **kwargs)", "#調整saturation #aug_img = mod_saturation(img, **default_args) #aug_img, bbox = mod_saturation(img, bbox,", "np.random.normal(kwargs['gasuss_mean'], kwargs['gasuss_var']** 0.5, image.shape) out_img = out_img + noise +", "#aug_img = vertical_flip(img, **default_args) #aug_img, bbox = vertical_flip(img, bbox, **default_args)", "location(x_min, y_min, x_max, y_max) ''' if box_loc is None: return", "= np.random.rand(h,w) out_img = image.copy() out_img[noise < kwargs['noise_prob']] = 0", "int(loc.find('xmin').text) y_min = int(loc.find('ymin').text) x_max = int(loc.find('xmax').text) y_max = int(loc.find('ymax').text)", "random_zoom_out(img, bbox, **default_args) #隨機選擇augmentation方法 aug_img = rand_aug_image(img, **default_args) #aug_img, bbox", "else: out_img, box_loc = func(out_img, box_loc, **kwargs) if box_loc is", "'bordervalue': (127, 127, 127), 'zoom_out_value': 0.7, 'output_shape': (416, 416), 'take_value'", "#旋轉-n~n度 def rot_image(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding box", "is None: return out_img else: return out_img, box_loc #水平翻轉 def", "rot: 要選轉的範圍 bordervalue: 空白處補的值 ''' h, w, _ = image.shape", "2) out_img = np.ones(image.shape, np.uint8) * kwargs['bordervalue'][0] out_img[dy: dy +", "box_loc #垂直翻轉 def vertical_flip(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding", "augmentation function func_list = [sp_noise, gasuss_noise, mod_hue, mod_saturation, mod_light, horizontal_flip,", "= box_loc[:,2] + dx box_loc[:,3] = box_loc[:,3] + dy return", "if box_loc is None: return out_img else: return out_img, box_loc", "**kwargs) else: out_img, box_loc = resize_img(image, box_loc, **kwargs) #total augmentation", "out_img[:,:,2] += np.random.randint(-kwargs['rand_light'], kwargs['rand_light']) out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR)", "return cv2.flip(image, 0), box_loc #旋轉-n~n度 def rot_image(image, box_loc=None, **kwargs): '''", "else: loc = box_loc[:,0:4].copy() loc = np.append(loc, loc[:, 0:1], axis=-1)", "y_max) ''' if box_loc is None: return cv2.flip(image, 1) else:", "if box_loc is None: return cv2.flip(image, 0) else: h =", "''' if box_loc is None: return cv2.flip(image, 0) else: h", "**default_args) #隨機縮小 N~1倍 #aug_img = random_zoom_out(img, **default_args) #aug_img, bbox =", "box_loc[:,0:4].copy() loc = np.append(loc, loc[:, 0:1], axis=-1) loc = np.append(loc,", "**kwargs): ''' Args: box_loc: bounding box location(x_min, y_min, x_max, y_max)", "max_edge / h, max_edge / w) h = int(h *", "+= 1 return loc_list.astype(np.float32) #draw rectangle def draw_rect(image, box_loc): for", "cv2.resize(image, (w, h)) else: box_loc[:,0] = box_loc[:,0] * scale box_loc[:,1]", "#調整飽和度(飽和度通道加上隨機-N~N之值) def mod_saturation(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,1]", "numpy as np import xml.etree.cElementTree as ET from random import", "int((image.shape[0] - h) / 2) out_img = np.ones(image.shape, np.uint8) *", "= mod_light(img, **default_args) #aug_img, bbox = mod_light(img, bbox, **default_args) #水平翻轉", "#aug_img, bbox = sp_noise(img, bbox, **default_args) #gasuss_noise #aug_img = gasuss_noise(img,", "loc[:, 1:2], axis=-1) loc = loc.reshape(-1, 4, 2) loc =", "**default_args) #aug_img, bbox = gasuss_noise(img, bbox, **default_args) #調整Hue #aug_img =", "h, w, _ = image.shape max_edge = max(kwargs['output_shape'][0], kwargs['output_shape'][1]) scale", "<filename>image_aug.py # coding=UTF-8 # This Python file uses the following", "0, 'gasuss_var': 0.001, 'rand_hug': 30, 'rand_saturation':30, 'rand_light': 30, 'rot_angle': 15,", "i[1] = h - y_max i[3] = h - y_min", "if box_loc is None: return out_img else: loc = box_loc[:,0:4].copy()", "is None: return out_img else: return out_img, box_loc #調整彩度(彩度通道加上隨機-N~N之值) def", "2) dy = int((image.shape[0] - h) / 2) out_img =", "= vertical_flip(img, **default_args) #aug_img, bbox = vertical_flip(img, bbox, **default_args) #旋轉角度", "= out_img + noise + 0.5 out_img[out_img < 0] =", "rot_loc = loc.dot(np.transpose(M[:,0:2])) rot_loc = rot_loc + np.array(center) rot_box =", "gasuss_noise(img, **default_args) #aug_img, bbox = gasuss_noise(img, bbox, **default_args) #調整Hue #aug_img", "box_loc[:,2] + dx box_loc[:,3] = box_loc[:,3] + dy return out_img,", "return out_img else: out_img, box_loc = padding_img(out_img, box_loc, **kwargs) return", "max_boxes: break ''' difficult = obj.find('difficult').text cls = obj.find('name').text if", "= kwargs['bordervalue']) if box_loc is None: return out_img else: loc", "* scale) dx = int((image.shape[1] - w) / 2) dy", "is None: out_img = func(out_img, **kwargs) else: out_img, box_loc =", "**default_args) #隨機選擇augmentation方法 aug_img = rand_aug_image(img, **default_args) #aug_img, bbox = rand_aug_image(img,", "2) out_img = np.ones((kwargs['output_shape'][0], kwargs['output_shape'][1], 3), np.uint8) * kwargs['bordervalue'][0] out_img[dy:", "vertical_flip(img, bbox, **default_args) #旋轉角度 #aug_img = rot_image(img, **default_args) #aug_img, bbox", "= image.copy() out_img[noise < kwargs['noise_prob']] = 0 if box_loc is", "= box_loc[:,0] + dx box_loc[:,1] = box_loc[:,1] + dy box_loc[:,2]", "if cls not in classes or int(difficult) == 1: continue", "in take_func: if box_loc is None: out_img = func(out_img, **kwargs)", "else: box_loc[:,0] = box_loc[:,0] + dx box_loc[:,1] = box_loc[:,1] +", "axis=-2), np.max(rot_loc, axis=-2), box_loc[:, 4:5]]) rot_box = np.floor(rot_box) rot_box[...,0:4] =", "def random_zoom_out(image, box_loc=None, **kwargs): h, w, _ = image.shape scale", "int((image.shape[1] - w) / 2) dy = int((image.shape[0] - h)", "255).astype(np.uint8) if box_loc is None: return out_img else: return out_img,", "None: return out_img else: loc = box_loc[:,0:4].copy() loc = np.append(loc,", "out_img, box_loc.astype(np.int32) #load csv data def load_csv(xml_path, max_boxes=4): tree =", "dy = int((kwargs['output_shape'][0] - h) / 2) out_img = np.ones((kwargs['output_shape'][0],", "0] = 0 out_img[out_img > 1] = 1 out_img =", "**kwargs) else: out_img, box_loc = func(out_img, box_loc, **kwargs) if box_loc", "function func_list = [sp_noise, gasuss_noise, mod_hue, mod_saturation, mod_light, horizontal_flip, vertical_flip,", "box location(num box,(x_min, y_min, x_max, y_max, label)) ''' if box_loc", "bbox, **default_args) #隨機縮小 N~1倍 #aug_img = random_zoom_out(img, **default_args) #aug_img, bbox", "= box_loc[:,0] * scale box_loc[:,1] = box_loc[:,1] * scale box_loc[:,2]", "#draw rectangle def draw_rect(image, box_loc): for i in box_loc: cv2.rectangle(image,", "int(loc.find('ymin').text) x_max = int(loc.find('xmax').text) y_max = int(loc.find('ymax').text) loc_list = np.vstack([loc_list,", "**default_args) #aug_img, bbox = padding_img(aug_img, bbox, **default_args) #隨機縮小 N~1倍 #aug_img", "= int(loc.find('xmax').text) y_max = int(loc.find('ymax').text) loc_list = np.vstack([loc_list, np.array([x_min, y_min,", "out_img = padding_img(out_img, **kwargs) return out_img else: out_img, box_loc =", "0: break else: x_min, x_max = i[0], i[2] i[0] =", "''' h, w, _ = image.shape center = ( w", "= box_loc[:,3] * scale + dy return out_img, box_loc.astype(np.int32) #load", "= {'noise_prob': 0.1, 'gasuss_mean': 0, 'gasuss_var': 0.001, 'rand_hug': 30, 'rand_saturation':30,", "**default_args) #垂直翻轉 #aug_img = vertical_flip(img, **default_args) #aug_img, bbox = vertical_flip(img,", "0.7, 'output_shape': (416, 416), 'take_value' : 5 } #添加黑色noise def", "out_img = cv2.cvtColor(np.clip(out_img, 0, 180).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is None:", "30, 'rand_saturation':30, 'rand_light': 30, 'rot_angle': 15, 'bordervalue': (127, 127, 127),", "image augmentation方法 def rand_aug_image(image, box_loc=None, **kwargs): if box_loc is None:", "i[2] == 0: break else: x_min, x_max = i[0], i[2]", "out_img else: loc = box_loc[:,0:4].copy() loc = np.append(loc, loc[:, 0:1],", "None: return out_img else: return out_img, box_loc #高斯noise def gasuss_noise(image,", "None: out_img = resize_img(image, **kwargs) else: out_img, box_loc = resize_img(image,", "= func(out_img, box_loc, **kwargs) if box_loc is None: out_img =", "y_max = i[1], i[3] i[1] = h - y_max i[3]", "None: return out_img else: return out_img, box_loc #調整亮度(亮度通道加上隨機-N~N之值) def mod_light(image,", "tree.getroot() #location list loc_list = np.zeros((0, 5)) box_count = 0", "image.shape[0:2] noise = np.random.rand(h,w) out_img = image.copy() out_img[noise < kwargs['noise_prob']]", "location(num box,(x_min, y_min, x_max, y_max, label)) ''' if box_loc is", "return out_img, box_loc #調整飽和度(飽和度通道加上隨機-N~N之值) def mod_saturation(image, box_loc=None, **kwargs): out_img =", "noise = np.random.rand(h,w) out_img = image.copy() out_img[noise < kwargs['noise_prob']] =", "loc.reshape(-1, 4, 2) loc = loc - np.array(center) rot_loc =", "bbox = horizontal_flip(img, bbox, **default_args) #垂直翻轉 #aug_img = vertical_flip(img, **default_args)", "h, max_edge / w) h = int(h * scale) w", "w, _ = image.shape max_edge = max(kwargs['output_shape'][0], kwargs['output_shape'][1]) scale =", "#default args: default_args = {'noise_prob': 0.1, 'gasuss_mean': 0, 'gasuss_var': 0.001,", "cv2.flip(image, 1), box_loc #垂直翻轉 def vertical_flip(image, box_loc=None, **kwargs): ''' Args:", "0 for obj in root.iter('object'): if box_count >= max_boxes: break", "None: return cv2.flip(image, 1) else: w = image.shape[1] for i", "cv2.getRotationMatrix2D(center, angle, 1) out_img = cv2.warpAffine(image, M, (w, h), borderValue", "= np.hstack([np.min(rot_loc, axis=-2), np.max(rot_loc, axis=-2), box_loc[:, 4:5]]) rot_box = np.floor(rot_box)", "#aug_img, bbox = mod_light(img, bbox, **default_args) #水平翻轉 #aug_img = horizontal_flip(img,", "1) out_img = cv2.warpAffine(image, M, (w, h), borderValue = kwargs['bordervalue'])", "dx box_loc[:,3] = box_loc[:,3] * scale + dy return out_img,", "sp_noise(img, bbox, **default_args) #gasuss_noise #aug_img = gasuss_noise(img, **default_args) #aug_img, bbox", "None: return cv2.resize(image, (w, h)) else: box_loc[:,0] = box_loc[:,0] *", "def gasuss_noise(image, box_loc=None, **kwargs): out_img = (image / 255.) -", "y_max, label)) rot: 要選轉的範圍 bordervalue: 空白處補的值 ''' h, w, _", "= ET.parse(xml_path) root = tree.getroot() #location list loc_list = np.zeros((0,", "image.shape dx = int((kwargs['output_shape'][1] - w) / 2) dy =", "as ET from random import sample #default args: default_args =", "box_loc[:,0] * scale box_loc[:,1] = box_loc[:,1] * scale box_loc[:,2] =", "else: box_loc[:,0] = box_loc[:,0] * scale box_loc[:,1] = box_loc[:,1] *", "box_loc is None: return out_img else: loc = box_loc[:,0:4].copy() loc", "= mod_light(img, bbox, **default_args) #水平翻轉 #aug_img = horizontal_flip(img, **default_args) #aug_img,", "0.5, image.shape) out_img = out_img + noise + 0.5 out_img[out_img", "#黑點noise #aug_img = sp_noise(img, **default_args) #aug_img, bbox = sp_noise(img, bbox,", "= load_csv('./00002.xml') #黑點noise #aug_img = sp_noise(img, **default_args) #aug_img, bbox =", "416), 'take_value' : 5 } #添加黑色noise def sp_noise(image, box_loc=None, **kwargs):", "# This Python file uses the following encoding: utf-8 import", "loc = np.append(loc, loc[:, 1:2], axis=-1) loc = loc.reshape(-1, 4,", "i in box_loc: cv2.rectangle(image, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])), (0, 255,", "_ = image.shape center = ( w // 2, h", "h-1]) return out_img, rot_box #等比例縮放影像 def resize_img(image, box_loc=None, **kwargs): h,", "rot_loc + np.array(center) rot_box = np.hstack([np.min(rot_loc, axis=-2), np.max(rot_loc, axis=-2), box_loc[:,", "in root.iter('object'): if box_count >= max_boxes: break ''' difficult =", "= random_zoom_out(img, **default_args) #aug_img, bbox = random_zoom_out(img, bbox, **default_args) #隨機選擇augmentation方法", "+ dy return out_img, box_loc.astype(np.int32) #load csv data def load_csv(xml_path,", "= gasuss_noise(img, bbox, **default_args) #調整Hue #aug_img = mod_hue(img, **default_args) #aug_img,", "else: w = image.shape[1] for i in box_loc: if i[2]", "(image / 255.) - 0.5 noise = np.random.normal(kwargs['gasuss_mean'], kwargs['gasuss_var']** 0.5,", "w] = cv2.resize(image, (w, h)) if box_loc is None: return", "/ h, max_edge / w) h = int(h * scale)", "def print_args(**kwargs): for key, value in kwargs.items(): print('key name: {}\\nvalue:{}\\n'.format(key,", "**default_args) #調整Hue #aug_img = mod_hue(img, **default_args) #aug_img, bbox = mod_hue(img,", "= mod_saturation(img, bbox, **default_args) #調整light #aug_img = mod_light(img, **default_args) #aug_img,", "#aug_img, bbox = mod_saturation(img, bbox, **default_args) #調整light #aug_img = mod_light(img,", "#aug_img = mod_hue(img, **default_args) #aug_img, bbox = mod_hue(img, bbox, **default_args)", "bbox = rot_image(img, bbox, **default_args) #等比例resize至指定大小 #aug_img = resize_img(img, **default_args)", "/ 255.) - 0.5 noise = np.random.normal(kwargs['gasuss_mean'], kwargs['gasuss_var']** 0.5, image.shape)", "random_zoom_out(image, box_loc=None, **kwargs): h, w, _ = image.shape scale =", "mod_saturation(img, **default_args) #aug_img, bbox = mod_saturation(img, bbox, **default_args) #調整light #aug_img", "box_loc[:,0] + dx box_loc[:,1] = box_loc[:,1] + dy box_loc[:,2] =", "} #添加黑色noise def sp_noise(image, box_loc=None, **kwargs): h, w = image.shape[0:2]", "x_max i[2] = w - x_min return cv2.flip(image, 1), box_loc", "w = image.shape[0:2] noise = np.random.rand(h,w) out_img = image.copy() out_img[noise", "augmentation方法 def rand_aug_image(image, box_loc=None, **kwargs): if box_loc is None: out_img", "def mod_hue(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,0] +=", "= int(h * scale) w = int(w * scale) dx", "out_img, box_loc #調整彩度(彩度通道加上隨機-N~N之值) def mod_hue(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image,", "out_img[noise < kwargs['noise_prob']] = 0 if box_loc is None: return", "mod_hue(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,0] += np.random.randint(-kwargs['rand_hug'],", "scale + dx box_loc[:,3] = box_loc[:,3] * scale + dy", "h)) if box_loc is None: return out_img else: box_loc[:,0] =", "i in box_loc: if i[3] == 0: break else: y_min,", "dy return out_img, box_loc.astype(np.int32) #隨機縮小 value~1倍 def random_zoom_out(image, box_loc=None, **kwargs):", "name: {}\\nvalue:{}\\n'.format(key, value)) #隨機選擇0~N個 image augmentation方法 def rand_aug_image(image, box_loc=None, **kwargs):", "rot_image, random_zoom_out] #rand take function take_func = sample(func_list, np.random.randint(kwargs['take_value'])) for", "(w, h)), box_loc.astype(np.int32) #將樸片補至指定大小 def padding_img(image, box_loc=None, **kwargs): h, w,", "print(bbox) draw_rect(aug_img, bbox) cv2.imshow('img', img) cv2.imshow('aug img', aug_img) cv2.waitKey(0) cv2.destroyAllWindows()", "return out_img else: return out_img, box_loc #調整彩度(彩度通道加上隨機-N~N之值) def mod_hue(image, box_loc=None,", "box_loc is None: return out_img else: box_loc[:,0] = box_loc[:,0] +", "if box_loc is None: out_img = func(out_img, **kwargs) else: out_img,", "= box_loc[:,1] + dy box_loc[:,2] = box_loc[:,2] + dx box_loc[:,3]", "vertical_flip(img, **default_args) #aug_img, bbox = vertical_flip(img, bbox, **default_args) #旋轉角度 #aug_img", "0.5 noise = np.random.normal(kwargs['gasuss_mean'], kwargs['gasuss_var']** 0.5, image.shape) out_img = out_img", "cv2.resize(image, (w, h)) if box_loc is None: return out_img else:", "obj.find('difficult').text cls = obj.find('name').text if cls not in classes or", "args: default_args = {'noise_prob': 0.1, 'gasuss_mean': 0, 'gasuss_var': 0.001, 'rand_hug':", "= image.shape max_edge = max(kwargs['output_shape'][0], kwargs['output_shape'][1]) scale = min( max_edge", "_ = image.shape dx = int((kwargs['output_shape'][1] - w) / 2)", "#aug_img = sp_noise(img, **default_args) #aug_img, bbox = sp_noise(img, bbox, **default_args)", "rot_image(img, bbox, **default_args) #等比例resize至指定大小 #aug_img = resize_img(img, **default_args) #aug_img, bbox", "cv2.flip(image, 0), box_loc #旋轉-n~n度 def rot_image(image, box_loc=None, **kwargs): ''' Args:", "= image.shape dx = int((kwargs['output_shape'][1] - w) / 2) dy", "= resize_img(img, bbox, **default_args) #補形狀至指定大小 #aug_img = padding_img(aug_img, **default_args) #aug_img,", "= mod_hue(img, bbox, **default_args) #調整saturation #aug_img = mod_saturation(img, **default_args) #aug_img,", "Args: box_loc: bounding box location(num box,(x_min, y_min, x_max, y_max, label))", "y_max i[3] = h - y_min return cv2.flip(image, 0), box_loc", "= np.ones((kwargs['output_shape'][0], kwargs['output_shape'][1], 3), np.uint8) * kwargs['bordervalue'][0] out_img[dy: dy +", "#load csv data def load_csv(xml_path, max_boxes=4): tree = ET.parse(xml_path) root", "axis=-1) loc = loc.reshape(-1, 4, 2) loc = loc -", "y_min, x_max, y_max, 0])]) box_count += 1 return loc_list.astype(np.float32) #draw", "bbox, **default_args) #水平翻轉 #aug_img = horizontal_flip(img, **default_args) #aug_img, bbox =", "== \"__main__\": img = cv2.imread('./00002.jpg') bbox = load_csv('./00002.xml') #黑點noise #aug_img", "= padding_img(aug_img, bbox, **default_args) #隨機縮小 N~1倍 #aug_img = random_zoom_out(img, **default_args)", "(out_img * 255).astype(np.uint8) if box_loc is None: return out_img else:", "box_loc[:,3] = box_loc[:,3] * scale return cv2.resize(image, (w, h)), box_loc.astype(np.int32)", "for key, value in kwargs.items(): print('key name: {}\\nvalue:{}\\n'.format(key, value)) #隨機選擇0~N個", "out_img = cv2.warpAffine(image, M, (w, h), borderValue = kwargs['bordervalue']) if", "image.shape[1] for i in box_loc: if i[2] == 0: break", "box_loc[:,2] = box_loc[:,2] + dx box_loc[:,3] = box_loc[:,3] + dy", "rot_box = np.floor(rot_box) rot_box[...,0:4] = np.clip(rot_box[...,0:4], [0,0,0,0], [w-1, h-1, w-1,", "box_loc = func(out_img, box_loc, **kwargs) if box_loc is None: out_img", "difficult = obj.find('difficult').text cls = obj.find('name').text if cls not in", "None: return cv2.flip(image, 0) else: h = image.shape[0] for i", "out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is None:", "box_loc=None, **kwargs): h, w, _ = image.shape max_edge = max(kwargs['output_shape'][0],", "dx = int((image.shape[1] - w) / 2) dy = int((image.shape[0]", "/ w) h = int(h * scale) w = int(w", "for i in box_loc: cv2.rectangle(image, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])), (0,", "(int(i[0]), int(i[1])), (int(i[2]), int(i[3])), (0, 255, 0), 4) def print_args(**kwargs):", "0), 4) def print_args(**kwargs): for key, value in kwargs.items(): print('key", "#gasuss_noise #aug_img = gasuss_noise(img, **default_args) #aug_img, bbox = gasuss_noise(img, bbox,", "box_loc[:,3] = box_loc[:,3] + dy return out_img, box_loc.astype(np.int32) #隨機縮小 value~1倍", "**default_args) #aug_img, bbox = mod_hue(img, bbox, **default_args) #調整saturation #aug_img =", "cv2.cvtColor(np.clip(out_img, 0, 180).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is None: return out_img", "box_loc is None: return out_img else: box_loc[:,0] = box_loc[:,0] *", "import xml.etree.cElementTree as ET from random import sample #default args:", "loc - np.array(center) rot_loc = loc.dot(np.transpose(M[:,0:2])) rot_loc = rot_loc +", "#隨機選擇0~N個 image augmentation方法 def rand_aug_image(image, box_loc=None, **kwargs): if box_loc is", "1] = 1 out_img = (out_img * 255).astype(np.uint8) if box_loc", "return out_img else: box_loc[:,0] = box_loc[:,0] + dx box_loc[:,1] =", "break ''' difficult = obj.find('difficult').text cls = obj.find('name').text if cls", "i in box_loc: if i[2] == 0: break else: x_min,", "= sp_noise(img, bbox, **default_args) #gasuss_noise #aug_img = gasuss_noise(img, **default_args) #aug_img,", "bbox = mod_saturation(img, bbox, **default_args) #調整light #aug_img = mod_light(img, **default_args)", "np.append(loc, loc[:, 2:3], axis=-1) loc = np.append(loc, loc[:, 1:2], axis=-1)", "obj.find('bndbox') x_min = int(loc.find('xmin').text) y_min = int(loc.find('ymin').text) x_max = int(loc.find('xmax').text)", "image.shape max_edge = max(kwargs['output_shape'][0], kwargs['output_shape'][1]) scale = min( max_edge /", "= loc.reshape(-1, 4, 2) loc = loc - np.array(center) rot_loc", "np import xml.etree.cElementTree as ET from random import sample #default", "out_img else: out_img, box_loc = padding_img(out_img, box_loc, **kwargs) return out_img,", "box,(x_min, y_min, x_max, y_max, label)) rot: 要選轉的範圍 bordervalue: 空白處補的值 '''", "box_loc: bounding box location(x_min, y_min, x_max, y_max) ''' if box_loc", "loc[:, 0:1], axis=-1) loc = np.append(loc, loc[:, 3:4], axis=-1) loc", "following encoding: utf-8 import cv2 import numpy as np import", "box_loc, **kwargs) return out_img, box_loc if __name__ == \"__main__\": img", "out_img else: return out_img, box_loc #調整彩度(彩度通道加上隨機-N~N之值) def mod_hue(image, box_loc=None, **kwargs):", "cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,1] += np.random.randint(-kwargs['rand_saturation'], kwargs['rand_saturation']) out_img = cv2.cvtColor(np.clip(out_img, 0,", "box_loc, **kwargs) #total augmentation function func_list = [sp_noise, gasuss_noise, mod_hue,", "bbox = padding_img(aug_img, bbox, **default_args) #隨機縮小 N~1倍 #aug_img = random_zoom_out(img,", "[w-1, h-1, w-1, h-1]) return out_img, rot_box #等比例縮放影像 def resize_img(image,", "np.random.randint(-kwargs['rand_saturation'], kwargs['rand_saturation']) out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc", "= np.random.uniform(kwargs['zoom_out_value'], 1) h = int(h * scale) w =", "scale = np.random.uniform(kwargs['zoom_out_value'], 1) h = int(h * scale) w", "i[2] = w - x_min return cv2.flip(image, 1), box_loc #垂直翻轉", "This Python file uses the following encoding: utf-8 import cv2", "horizontal_flip(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding box location(x_min, y_min,", "''' if box_loc is None: return cv2.flip(image, 1) else: w", "= image.shape center = ( w // 2, h //", "#將樸片補至指定大小 def padding_img(image, box_loc=None, **kwargs): h, w, _ = image.shape", "out_img, box_loc if __name__ == \"__main__\": img = cv2.imread('./00002.jpg') bbox", "= h - y_max i[3] = h - y_min return", "x_max = int(loc.find('xmax').text) y_max = int(loc.find('ymax').text) loc_list = np.vstack([loc_list, np.array([x_min,", "[sp_noise, gasuss_noise, mod_hue, mod_saturation, mod_light, horizontal_flip, vertical_flip, rot_image, random_zoom_out] #rand", "* kwargs['bordervalue'][0] out_img[dy: dy + h, dx: dx + w]", "= np.append(loc, loc[:, 1:2], axis=-1) loc = loc.reshape(-1, 4, 2)", "bbox, **default_args) print(bbox) draw_rect(aug_img, bbox) cv2.imshow('img', img) cv2.imshow('aug img', aug_img)", "else: box_loc[:,0] = box_loc[:,0] * scale + dx box_loc[:,1] =", "/ 2) dy = int((kwargs['output_shape'][0] - h) / 2) out_img", "if __name__ == \"__main__\": img = cv2.imread('./00002.jpg') bbox = load_csv('./00002.xml')", "loc = loc.reshape(-1, 4, 2) loc = loc - np.array(center)", "box_loc[:,1] = box_loc[:,1] + dy box_loc[:,2] = box_loc[:,2] + dx", "= padding_img(out_img, **kwargs) return out_img else: out_img, box_loc = padding_img(out_img,", "**default_args) #aug_img, bbox = horizontal_flip(img, bbox, **default_args) #垂直翻轉 #aug_img =", "None: return out_img else: return out_img, box_loc #水平翻轉 def horizontal_flip(image,", "4) def print_args(**kwargs): for key, value in kwargs.items(): print('key name:", "out_img else: return out_img, box_loc #調整飽和度(飽和度通道加上隨機-N~N之值) def mod_saturation(image, box_loc=None, **kwargs):", "else: x_min, x_max = i[0], i[2] i[0] = w -", "int((kwargs['output_shape'][1] - w) / 2) dy = int((kwargs['output_shape'][0] - h)", "bbox, **default_args) #補形狀至指定大小 #aug_img = padding_img(aug_img, **default_args) #aug_img, bbox =", "bbox = vertical_flip(img, bbox, **default_args) #旋轉角度 #aug_img = rot_image(img, **default_args)", "loc = np.append(loc, loc[:, 0:1], axis=-1) loc = np.append(loc, loc[:,", "or int(difficult) == 1: continue cls_id = classes.index(cls) ''' loc", "**kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,0] += np.random.randint(-kwargs['rand_hug'], kwargs['rand_hug']) out_img", "= np.floor(rot_box) rot_box[...,0:4] = np.clip(rot_box[...,0:4], [0,0,0,0], [w-1, h-1, w-1, h-1])", "= cv2.getRotationMatrix2D(center, angle, 1) out_img = cv2.warpAffine(image, M, (w, h),", "break else: x_min, x_max = i[0], i[2] i[0] = w", "np.random.randint(-kwargs['rand_light'], kwargs['rand_light']) out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc", "out_img else: box_loc[:,0] = box_loc[:,0] * scale + dx box_loc[:,1]", "cv2.imread('./00002.jpg') bbox = load_csv('./00002.xml') #黑點noise #aug_img = sp_noise(img, **default_args) #aug_img,", "- x_max i[2] = w - x_min return cv2.flip(image, 1),", "import numpy as np import xml.etree.cElementTree as ET from random", "bbox = sp_noise(img, bbox, **default_args) #gasuss_noise #aug_img = gasuss_noise(img, **default_args)", "**kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,1] += np.random.randint(-kwargs['rand_saturation'], kwargs['rand_saturation']) out_img", "**kwargs): h, w, _ = image.shape scale = np.random.uniform(kwargs['zoom_out_value'], 1)", "(416, 416), 'take_value' : 5 } #添加黑色noise def sp_noise(image, box_loc=None,", "h)) else: box_loc[:,0] = box_loc[:,0] * scale box_loc[:,1] = box_loc[:,1]", "dy return out_img, box_loc.astype(np.int32) #load csv data def load_csv(xml_path, max_boxes=4):", "int((kwargs['output_shape'][0] - h) / 2) out_img = np.ones((kwargs['output_shape'][0], kwargs['output_shape'][1], 3),", "box_loc #水平翻轉 def horizontal_flip(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding", "bbox = load_csv('./00002.xml') #黑點noise #aug_img = sp_noise(img, **default_args) #aug_img, bbox", "**kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,2] += np.random.randint(-kwargs['rand_light'], kwargs['rand_light']) out_img", "2) angle = np.random.randint(-kwargs['rot_angle'], kwargs['rot_angle']) M = cv2.getRotationMatrix2D(center, angle, 1)", "box_loc is None: return out_img else: return out_img, box_loc #調整亮度(亮度通道加上隨機-N~N之值)", "= sp_noise(img, **default_args) #aug_img, bbox = sp_noise(img, bbox, **default_args) #gasuss_noise", "np.array([x_min, y_min, x_max, y_max, 0])]) box_count += 1 return loc_list.astype(np.float32)", "1: continue cls_id = classes.index(cls) ''' loc = obj.find('bndbox') x_min", "(int(i[2]), int(i[3])), (0, 255, 0), 4) def print_args(**kwargs): for key,", "cls = obj.find('name').text if cls not in classes or int(difficult)", "loc[:, 2:3], axis=-1) loc = np.append(loc, loc[:, 1:2], axis=-1) loc", "func_list = [sp_noise, gasuss_noise, mod_hue, mod_saturation, mod_light, horizontal_flip, vertical_flip, rot_image,", "box_loc: bounding box location(num box,(x_min, y_min, x_max, y_max, label)) rot:", "scale return cv2.resize(image, (w, h)), box_loc.astype(np.int32) #將樸片補至指定大小 def padding_img(image, box_loc=None,", "print('key name: {}\\nvalue:{}\\n'.format(key, value)) #隨機選擇0~N個 image augmentation方法 def rand_aug_image(image, box_loc=None,", "- x_min return cv2.flip(image, 1), box_loc #垂直翻轉 def vertical_flip(image, box_loc=None,", "y_min, x_max, y_max, label)) ''' if box_loc is None: return", "return out_img, box_loc if __name__ == \"__main__\": img = cv2.imread('./00002.jpg')", "horizontal_flip(img, bbox, **default_args) #垂直翻轉 #aug_img = vertical_flip(img, **default_args) #aug_img, bbox", "+= np.random.randint(-kwargs['rand_hug'], kwargs['rand_hug']) out_img = cv2.cvtColor(np.clip(out_img, 0, 180).astype(np.uint8), cv2.COLOR_HSV2BGR) if", "loc.dot(np.transpose(M[:,0:2])) rot_loc = rot_loc + np.array(center) rot_box = np.hstack([np.min(rot_loc, axis=-2),", "classes.index(cls) ''' loc = obj.find('bndbox') x_min = int(loc.find('xmin').text) y_min =", "#aug_img = gasuss_noise(img, **default_args) #aug_img, bbox = gasuss_noise(img, bbox, **default_args)", "box_loc=None, **kwargs): out_img = (image / 255.) - 0.5 noise", "gasuss_noise(image, box_loc=None, **kwargs): out_img = (image / 255.) - 0.5", "kwargs['rand_hug']) out_img = cv2.cvtColor(np.clip(out_img, 0, 180).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is", "out_img, rot_box #等比例縮放影像 def resize_img(image, box_loc=None, **kwargs): h, w, _", "- h) / 2) out_img = np.ones((kwargs['output_shape'][0], kwargs['output_shape'][1], 3), np.uint8)", "{}\\nvalue:{}\\n'.format(key, value)) #隨機選擇0~N個 image augmentation方法 def rand_aug_image(image, box_loc=None, **kwargs): if", "= np.append(loc, loc[:, 0:1], axis=-1) loc = np.append(loc, loc[:, 3:4],", "box,(x_min, y_min, x_max, y_max, label)) ''' if box_loc is None:", "box_loc=None, **kwargs): ''' Args: box_loc: bounding box location(num box,(x_min, y_min,", "max_boxes=4): tree = ET.parse(xml_path) root = tree.getroot() #location list loc_list", "* scale) w = int(w * scale) dx = int((image.shape[1]", "#aug_img = mod_light(img, **default_args) #aug_img, bbox = mod_light(img, bbox, **default_args)", "#aug_img, bbox = padding_img(aug_img, bbox, **default_args) #隨機縮小 N~1倍 #aug_img =", "+= np.random.randint(-kwargs['rand_light'], kwargs['rand_light']) out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR) if", "h, w, _ = image.shape center = ( w //", "= vertical_flip(img, bbox, **default_args) #旋轉角度 #aug_img = rot_image(img, **default_args) #aug_img,", "''' loc = obj.find('bndbox') x_min = int(loc.find('xmin').text) y_min = int(loc.find('ymin').text)", "load_csv('./00002.xml') #黑點noise #aug_img = sp_noise(img, **default_args) #aug_img, bbox = sp_noise(img,", "空白處補的值 ''' h, w, _ = image.shape center = (", "not in classes or int(difficult) == 1: continue cls_id =", "padding_img(image, box_loc=None, **kwargs): h, w, _ = image.shape dx =", "image.copy() out_img[noise < kwargs['noise_prob']] = 0 if box_loc is None:", "rot_image(img, **default_args) #aug_img, bbox = rot_image(img, bbox, **default_args) #等比例resize至指定大小 #aug_img", "random_zoom_out(img, **default_args) #aug_img, bbox = random_zoom_out(img, bbox, **default_args) #隨機選擇augmentation方法 aug_img", "resize_img(image, box_loc, **kwargs) #total augmentation function func_list = [sp_noise, gasuss_noise,", "as np import xml.etree.cElementTree as ET from random import sample", "out_img else: box_loc[:,0] = box_loc[:,0] + dx box_loc[:,1] = box_loc[:,1]", "box_loc is None: return out_img else: return out_img, box_loc #水平翻轉", "w, _ = image.shape dx = int((kwargs['output_shape'][1] - w) /", "**default_args) #aug_img, bbox = vertical_flip(img, bbox, **default_args) #旋轉角度 #aug_img =", "dx box_loc[:,1] = box_loc[:,1] + dy box_loc[:,2] = box_loc[:,2] +", "out_img, box_loc = padding_img(out_img, box_loc, **kwargs) return out_img, box_loc if", ": 5 } #添加黑色noise def sp_noise(image, box_loc=None, **kwargs): h, w", "= mod_saturation(img, **default_args) #aug_img, bbox = mod_saturation(img, bbox, **default_args) #調整light", "function take_func = sample(func_list, np.random.randint(kwargs['take_value'])) for func in take_func: if", "return cv2.resize(image, (w, h)), box_loc.astype(np.int32) #將樸片補至指定大小 def padding_img(image, box_loc=None, **kwargs):", "__name__ == \"__main__\": img = cv2.imread('./00002.jpg') bbox = load_csv('./00002.xml') #黑點noise", "is None: out_img = resize_img(image, **kwargs) else: out_img, box_loc =", "'take_value' : 5 } #添加黑色noise def sp_noise(image, box_loc=None, **kwargs): h,", "root = tree.getroot() #location list loc_list = np.zeros((0, 5)) box_count", "bbox, **default_args) #調整light #aug_img = mod_light(img, **default_args) #aug_img, bbox =", "Args: box_loc: bounding box location(x_min, y_min, x_max, y_max) ''' if", "= cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,2] += np.random.randint(-kwargs['rand_light'], kwargs['rand_light']) out_img = cv2.cvtColor(np.clip(out_img,", "y_min, y_max = i[1], i[3] i[1] = h - y_max", "take_func: if box_loc is None: out_img = func(out_img, **kwargs) else:", "np.random.uniform(kwargs['zoom_out_value'], 1) h = int(h * scale) w = int(w", "kwargs['gasuss_var']** 0.5, image.shape) out_img = out_img + noise + 0.5", "bbox, **default_args) #旋轉角度 #aug_img = rot_image(img, **default_args) #aug_img, bbox =", "box_loc[:,2] = box_loc[:,2] * scale box_loc[:,3] = box_loc[:,3] * scale", "np.random.randint(-kwargs['rot_angle'], kwargs['rot_angle']) M = cv2.getRotationMatrix2D(center, angle, 1) out_img = cv2.warpAffine(image,", "return cv2.resize(image, (w, h)) else: box_loc[:,0] = box_loc[:,0] * scale", "= [sp_noise, gasuss_noise, mod_hue, mod_saturation, mod_light, horizontal_flip, vertical_flip, rot_image, random_zoom_out]", "scale) dx = int((image.shape[1] - w) / 2) dy =", "box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,2] += np.random.randint(-kwargs['rand_light'], kwargs['rand_light'])", "else: h = image.shape[0] for i in box_loc: if i[3]", "**default_args) #aug_img, bbox = random_zoom_out(img, bbox, **default_args) #隨機選擇augmentation方法 aug_img =", "= cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is None: return", "{'noise_prob': 0.1, 'gasuss_mean': 0, 'gasuss_var': 0.001, 'rand_hug': 30, 'rand_saturation':30, 'rand_light':", "bbox = mod_hue(img, bbox, **default_args) #調整saturation #aug_img = mod_saturation(img, **default_args)", "ET from random import sample #default args: default_args = {'noise_prob':", "= resize_img(image, **kwargs) else: out_img, box_loc = resize_img(image, box_loc, **kwargs)", "return out_img else: loc = box_loc[:,0:4].copy() loc = np.append(loc, loc[:,", "file uses the following encoding: utf-8 import cv2 import numpy", "in box_loc: cv2.rectangle(image, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])), (0, 255, 0),", "np.array(center) rot_loc = loc.dot(np.transpose(M[:,0:2])) rot_loc = rot_loc + np.array(center) rot_box", "'gasuss_mean': 0, 'gasuss_var': 0.001, 'rand_hug': 30, 'rand_saturation':30, 'rand_light': 30, 'rot_angle':", "coding=UTF-8 # This Python file uses the following encoding: utf-8", "= (image / 255.) - 0.5 noise = np.random.normal(kwargs['gasuss_mean'], kwargs['gasuss_var']**", "= box_loc[:,3] + dy return out_img, box_loc.astype(np.int32) #隨機縮小 value~1倍 def", "= box_loc[:,2] * scale box_loc[:,3] = box_loc[:,3] * scale return", "random import sample #default args: default_args = {'noise_prob': 0.1, 'gasuss_mean':", "max_edge / w) h = int(h * scale) w =", "out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,0] += np.random.randint(-kwargs['rand_hug'], kwargs['rand_hug']) out_img =", "sp_noise(img, **default_args) #aug_img, bbox = sp_noise(img, bbox, **default_args) #gasuss_noise #aug_img", "print_args(**kwargs): for key, value in kwargs.items(): print('key name: {}\\nvalue:{}\\n'.format(key, value))", "#垂直翻轉 def vertical_flip(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding box", "np.append(loc, loc[:, 0:1], axis=-1) loc = np.append(loc, loc[:, 3:4], axis=-1)", "is None: return out_img else: box_loc[:,0] = box_loc[:,0] * scale", "noise = np.random.normal(kwargs['gasuss_mean'], kwargs['gasuss_var']** 0.5, image.shape) out_img = out_img +", "kwargs['rand_saturation']) out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is", "else: return out_img, box_loc #調整彩度(彩度通道加上隨機-N~N之值) def mod_hue(image, box_loc=None, **kwargs): out_img", "5 } #添加黑色noise def sp_noise(image, box_loc=None, **kwargs): h, w =", "random_zoom_out] #rand take function take_func = sample(func_list, np.random.randint(kwargs['take_value'])) for func", "bbox = gasuss_noise(img, bbox, **default_args) #調整Hue #aug_img = mod_hue(img, **default_args)", "x_min return cv2.flip(image, 1), box_loc #垂直翻轉 def vertical_flip(image, box_loc=None, **kwargs):", "if i[3] == 0: break else: y_min, y_max = i[1],", "* scale box_loc[:,1] = box_loc[:,1] * scale box_loc[:,2] = box_loc[:,2]", "scale box_loc[:,2] = box_loc[:,2] * scale box_loc[:,3] = box_loc[:,3] *", "uses the following encoding: utf-8 import cv2 import numpy as", "out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,2] += np.random.randint(-kwargs['rand_light'], kwargs['rand_light']) out_img =", "int(loc.find('xmax').text) y_max = int(loc.find('ymax').text) loc_list = np.vstack([loc_list, np.array([x_min, y_min, x_max,", "#添加黑色noise def sp_noise(image, box_loc=None, **kwargs): h, w = image.shape[0:2] noise", "+ dx box_loc[:,1] = box_loc[:,1] * scale + dy box_loc[:,2]", "else: out_img, box_loc = padding_img(out_img, box_loc, **kwargs) return out_img, box_loc", "= int(w * scale) dx = int((image.shape[1] - w) /", "kwargs['noise_prob']] = 0 if box_loc is None: return out_img else:", "np.array(center) rot_box = np.hstack([np.min(rot_loc, axis=-2), np.max(rot_loc, axis=-2), box_loc[:, 4:5]]) rot_box", "**default_args) print(bbox) draw_rect(aug_img, bbox) cv2.imshow('img', img) cv2.imshow('aug img', aug_img) cv2.waitKey(0)", "0.1, 'gasuss_mean': 0, 'gasuss_var': 0.001, 'rand_hug': 30, 'rand_saturation':30, 'rand_light': 30,", "else: return out_img, box_loc #調整飽和度(飽和度通道加上隨機-N~N之值) def mod_saturation(image, box_loc=None, **kwargs): out_img", "bbox, **default_args) #垂直翻轉 #aug_img = vertical_flip(img, **default_args) #aug_img, bbox =", "'rand_saturation':30, 'rand_light': 30, 'rot_angle': 15, 'bordervalue': (127, 127, 127), 'zoom_out_value':", "= np.ones(image.shape, np.uint8) * kwargs['bordervalue'][0] out_img[dy: dy + h, dx:", "loc = obj.find('bndbox') x_min = int(loc.find('xmin').text) y_min = int(loc.find('ymin').text) x_max", "**default_args) #旋轉角度 #aug_img = rot_image(img, **default_args) #aug_img, bbox = rot_image(img,", "sample #default args: default_args = {'noise_prob': 0.1, 'gasuss_mean': 0, 'gasuss_var':", "**kwargs): if box_loc is None: out_img = resize_img(image, **kwargs) else:", "scale + dy return out_img, box_loc.astype(np.int32) #load csv data def", "box_loc=None, **kwargs): h, w, _ = image.shape dx = int((kwargs['output_shape'][1]", "h // 2) angle = np.random.randint(-kwargs['rot_angle'], kwargs['rot_angle']) M = cv2.getRotationMatrix2D(center,", "_ = image.shape scale = np.random.uniform(kwargs['zoom_out_value'], 1) h = int(h", "#調整亮度(亮度通道加上隨機-N~N之值) def mod_light(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,2]", "import cv2 import numpy as np import xml.etree.cElementTree as ET", "scale + dx box_loc[:,1] = box_loc[:,1] * scale + dy", "y_max, label)) ''' if box_loc is None: return cv2.flip(image, 0)", "loc = box_loc[:,0:4].copy() loc = np.append(loc, loc[:, 0:1], axis=-1) loc", "255, 0), 4) def print_args(**kwargs): for key, value in kwargs.items():", "x_min = int(loc.find('xmin').text) y_min = int(loc.find('ymin').text) x_max = int(loc.find('xmax').text) y_max", "box_loc is None: out_img = func(out_img, **kwargs) else: out_img, box_loc", "\"__main__\": img = cv2.imread('./00002.jpg') bbox = load_csv('./00002.xml') #黑點noise #aug_img =", "(127, 127, 127), 'zoom_out_value': 0.7, 'output_shape': (416, 416), 'take_value' :", "/ 2) dy = int((image.shape[0] - h) / 2) out_img", "return out_img, box_loc.astype(np.int32) #隨機縮小 value~1倍 def random_zoom_out(image, box_loc=None, **kwargs): h,", "noise + 0.5 out_img[out_img < 0] = 0 out_img[out_img >", "bbox = mod_light(img, bbox, **default_args) #水平翻轉 #aug_img = horizontal_flip(img, **default_args)", "_ = image.shape max_edge = max(kwargs['output_shape'][0], kwargs['output_shape'][1]) scale = min(", "box_loc is None: return out_img else: return out_img, box_loc #調整飽和度(飽和度通道加上隨機-N~N之值)", "15, 'bordervalue': (127, 127, 127), 'zoom_out_value': 0.7, 'output_shape': (416, 416),", "dx = int((kwargs['output_shape'][1] - w) / 2) dy = int((kwargs['output_shape'][0]", "mod_hue, mod_saturation, mod_light, horizontal_flip, vertical_flip, rot_image, random_zoom_out] #rand take function", "= tree.getroot() #location list loc_list = np.zeros((0, 5)) box_count =", "int(h * scale) w = int(w * scale) if box_loc", "= obj.find('bndbox') x_min = int(loc.find('xmin').text) y_min = int(loc.find('ymin').text) x_max =", "#調整彩度(彩度通道加上隨機-N~N之值) def mod_hue(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,0]", "return cv2.flip(image, 0) else: h = image.shape[0] for i in", "angle, 1) out_img = cv2.warpAffine(image, M, (w, h), borderValue =", "2:3], axis=-1) loc = np.append(loc, loc[:, 1:2], axis=-1) loc =", "* scale box_loc[:,3] = box_loc[:,3] * scale return cv2.resize(image, (w,", "w = image.shape[1] for i in box_loc: if i[2] ==", "None: return out_img else: box_loc[:,0] = box_loc[:,0] + dx box_loc[:,1]", "else: y_min, y_max = i[1], i[3] i[1] = h -", "out_img, box_loc #調整飽和度(飽和度通道加上隨機-N~N之值) def mod_saturation(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image,", "'rand_light': 30, 'rot_angle': 15, 'bordervalue': (127, 127, 127), 'zoom_out_value': 0.7,", "= box_loc[:,0] * scale + dx box_loc[:,1] = box_loc[:,1] *", "box_loc[:,3] + dy return out_img, box_loc.astype(np.int32) #隨機縮小 value~1倍 def random_zoom_out(image,", "box_count = 0 for obj in root.iter('object'): if box_count >=", "is None: return cv2.resize(image, (w, h)) else: box_loc[:,0] = box_loc[:,0]", "np.uint8) * kwargs['bordervalue'][0] out_img[dy: dy + h, dx: dx +", "box_loc is None: out_img = resize_img(image, **kwargs) else: out_img, box_loc", "bounding box location(x_min, y_min, x_max, y_max) ''' if box_loc is", "rot_box[...,0:4] = np.clip(rot_box[...,0:4], [0,0,0,0], [w-1, h-1, w-1, h-1]) return out_img,", "kwargs['rot_angle']) M = cv2.getRotationMatrix2D(center, angle, 1) out_img = cv2.warpAffine(image, M,", "#aug_img, bbox = rot_image(img, bbox, **default_args) #等比例resize至指定大小 #aug_img = resize_img(img,", "1 out_img = (out_img * 255).astype(np.uint8) if box_loc is None:", "* scale + dy box_loc[:,2] = box_loc[:,2] * scale +", "+ dx box_loc[:,3] = box_loc[:,3] * scale + dy return", "(w, h), borderValue = kwargs['bordervalue']) if box_loc is None: return", "= np.zeros((0, 5)) box_count = 0 for obj in root.iter('object'):", "= box_loc[:,3] * scale return cv2.resize(image, (w, h)), box_loc.astype(np.int32) #將樸片補至指定大小", "2) dy = int((kwargs['output_shape'][0] - h) / 2) out_img =", "kwargs.items(): print('key name: {}\\nvalue:{}\\n'.format(key, value)) #隨機選擇0~N個 image augmentation方法 def rand_aug_image(image,", "mod_light(img, bbox, **default_args) #水平翻轉 #aug_img = horizontal_flip(img, **default_args) #aug_img, bbox", "box_loc #調整飽和度(飽和度通道加上隨機-N~N之值) def mod_saturation(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32)", "2, h // 2) angle = np.random.randint(-kwargs['rot_angle'], kwargs['rot_angle']) M =", "= loc.dot(np.transpose(M[:,0:2])) rot_loc = rot_loc + np.array(center) rot_box = np.hstack([np.min(rot_loc,", "scale + dy box_loc[:,2] = box_loc[:,2] * scale + dx", "rand_aug_image(image, box_loc=None, **kwargs): if box_loc is None: out_img = resize_img(image,", "xml.etree.cElementTree as ET from random import sample #default args: default_args", "= resize_img(img, **default_args) #aug_img, bbox = resize_img(img, bbox, **default_args) #補形狀至指定大小", "def sp_noise(image, box_loc=None, **kwargs): h, w = image.shape[0:2] noise =", "rand_aug_image(img, bbox, **default_args) print(bbox) draw_rect(aug_img, bbox) cv2.imshow('img', img) cv2.imshow('aug img',", "0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is None: return out_img else:", "box_loc[:,2] * scale + dx box_loc[:,3] = box_loc[:,3] * scale", "mod_saturation(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,1] += np.random.randint(-kwargs['rand_saturation'],", "0) else: h = image.shape[0] for i in box_loc: if", "box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,0] += np.random.randint(-kwargs['rand_hug'], kwargs['rand_hug'])", "h - y_min return cv2.flip(image, 0), box_loc #旋轉-n~n度 def rot_image(image,", "- y_min return cv2.flip(image, 0), box_loc #旋轉-n~n度 def rot_image(image, box_loc=None,", "0), box_loc #旋轉-n~n度 def rot_image(image, box_loc=None, **kwargs): ''' Args: box_loc:", "bounding box location(num box,(x_min, y_min, x_max, y_max, label)) ''' if", "**kwargs): h, w, _ = image.shape max_edge = max(kwargs['output_shape'][0], kwargs['output_shape'][1])", "load_csv(xml_path, max_boxes=4): tree = ET.parse(xml_path) root = tree.getroot() #location list", "if box_loc is None: return cv2.flip(image, 1) else: w =", "default_args = {'noise_prob': 0.1, 'gasuss_mean': 0, 'gasuss_var': 0.001, 'rand_hug': 30,", "+ dy box_loc[:,2] = box_loc[:,2] * scale + dx box_loc[:,3]", "out_img[:,:,1] += np.random.randint(-kwargs['rand_saturation'], kwargs['rand_saturation']) out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR)", "rot_box = np.hstack([np.min(rot_loc, axis=-2), np.max(rot_loc, axis=-2), box_loc[:, 4:5]]) rot_box =", "box_loc[:,3] * scale + dy return out_img, box_loc.astype(np.int32) #load csv", "= horizontal_flip(img, bbox, **default_args) #垂直翻轉 #aug_img = vertical_flip(img, **default_args) #aug_img,", "box_loc = resize_img(image, box_loc, **kwargs) #total augmentation function func_list =", "**default_args) #aug_img, bbox = rot_image(img, bbox, **default_args) #等比例resize至指定大小 #aug_img =", "img = cv2.imread('./00002.jpg') bbox = load_csv('./00002.xml') #黑點noise #aug_img = sp_noise(img,", "box location(x_min, y_min, x_max, y_max) ''' if box_loc is None:", "loc_list.astype(np.float32) #draw rectangle def draw_rect(image, box_loc): for i in box_loc:", "= i[1], i[3] i[1] = h - y_max i[3] =", "= padding_img(aug_img, **default_args) #aug_img, bbox = padding_img(aug_img, bbox, **default_args) #隨機縮小", "+ 0.5 out_img[out_img < 0] = 0 out_img[out_img > 1]", "out_img = np.ones((kwargs['output_shape'][0], kwargs['output_shape'][1], 3), np.uint8) * kwargs['bordervalue'][0] out_img[dy: dy", "aug_img = rand_aug_image(img, **default_args) #aug_img, bbox = rand_aug_image(img, bbox, **default_args)", "box_loc=None, **kwargs): ''' Args: box_loc: bounding box location(x_min, y_min, x_max,", "''' difficult = obj.find('difficult').text cls = obj.find('name').text if cls not", "return loc_list.astype(np.float32) #draw rectangle def draw_rect(image, box_loc): for i in", "= cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,1] += np.random.randint(-kwargs['rand_saturation'], kwargs['rand_saturation']) out_img = cv2.cvtColor(np.clip(out_img,", "y_max, 0])]) box_count += 1 return loc_list.astype(np.float32) #draw rectangle def", "loc = np.append(loc, loc[:, 3:4], axis=-1) loc = np.append(loc, loc[:,", "in kwargs.items(): print('key name: {}\\nvalue:{}\\n'.format(key, value)) #隨機選擇0~N個 image augmentation方法 def", "h)), box_loc.astype(np.int32) #將樸片補至指定大小 def padding_img(image, box_loc=None, **kwargs): h, w, _", "h, dx: dx + w] = cv2.resize(image, (w, h)) if", "= int((image.shape[1] - w) / 2) dy = int((image.shape[0] -", "return cv2.flip(image, 1) else: w = image.shape[1] for i in", "cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,2] += np.random.randint(-kwargs['rand_light'], kwargs['rand_light']) out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8),", "#total augmentation function func_list = [sp_noise, gasuss_noise, mod_hue, mod_saturation, mod_light,", "= (out_img * 255).astype(np.uint8) if box_loc is None: return out_img", "def rand_aug_image(image, box_loc=None, **kwargs): if box_loc is None: out_img =", "= 0 if box_loc is None: return out_img else: return", "out_img = (out_img * 255).astype(np.uint8) if box_loc is None: return", "box_loc[:,0] * scale + dx box_loc[:,1] = box_loc[:,1] * scale", "**kwargs) return out_img, box_loc if __name__ == \"__main__\": img =", "key, value in kwargs.items(): print('key name: {}\\nvalue:{}\\n'.format(key, value)) #隨機選擇0~N個 image", "return out_img else: box_loc[:,0] = box_loc[:,0] * scale + dx", "rot_loc = rot_loc + np.array(center) rot_box = np.hstack([np.min(rot_loc, axis=-2), np.max(rot_loc,", "out_img[dy: dy + h, dx: dx + w] = cv2.resize(image,", "= obj.find('name').text if cls not in classes or int(difficult) ==", "= 0 for obj in root.iter('object'): if box_count >= max_boxes:", "- y_max i[3] = h - y_min return cv2.flip(image, 0),", "* scale + dx box_loc[:,3] = box_loc[:,3] * scale +", "is None: return out_img else: return out_img, box_loc #調整亮度(亮度通道加上隨機-N~N之值) def", "i[0] = w - x_max i[2] = w - x_min", "= np.append(loc, loc[:, 2:3], axis=-1) loc = np.append(loc, loc[:, 1:2],", "draw_rect(image, box_loc): for i in box_loc: cv2.rectangle(image, (int(i[0]), int(i[1])), (int(i[2]),", "take function take_func = sample(func_list, np.random.randint(kwargs['take_value'])) for func in take_func:", "= cv2.warpAffine(image, M, (w, h), borderValue = kwargs['bordervalue']) if box_loc", "return out_img else: return out_img, box_loc #調整亮度(亮度通道加上隨機-N~N之值) def mod_light(image, box_loc=None,", "padding_img(out_img, **kwargs) return out_img else: out_img, box_loc = padding_img(out_img, box_loc,", "the following encoding: utf-8 import cv2 import numpy as np", "import sample #default args: default_args = {'noise_prob': 0.1, 'gasuss_mean': 0,", "= np.random.randint(-kwargs['rot_angle'], kwargs['rot_angle']) M = cv2.getRotationMatrix2D(center, angle, 1) out_img =", "def resize_img(image, box_loc=None, **kwargs): h, w, _ = image.shape max_edge", "**kwargs) #total augmentation function func_list = [sp_noise, gasuss_noise, mod_hue, mod_saturation,", "#aug_img, bbox = mod_hue(img, bbox, **default_args) #調整saturation #aug_img = mod_saturation(img,", "cv2.resize(image, (w, h)), box_loc.astype(np.int32) #將樸片補至指定大小 def padding_img(image, box_loc=None, **kwargs): h,", "np.max(rot_loc, axis=-2), box_loc[:, 4:5]]) rot_box = np.floor(rot_box) rot_box[...,0:4] = np.clip(rot_box[...,0:4],", "= np.vstack([loc_list, np.array([x_min, y_min, x_max, y_max, 0])]) box_count += 1", "< kwargs['noise_prob']] = 0 if box_loc is None: return out_img", "(w, h)) if box_loc is None: return out_img else: box_loc[:,0]", "rectangle def draw_rect(image, box_loc): for i in box_loc: cv2.rectangle(image, (int(i[0]),", "**default_args) #調整saturation #aug_img = mod_saturation(img, **default_args) #aug_img, bbox = mod_saturation(img,", "box_loc.astype(np.int32) #load csv data def load_csv(xml_path, max_boxes=4): tree = ET.parse(xml_path)", "func(out_img, box_loc, **kwargs) if box_loc is None: out_img = padding_img(out_img,", "0])]) box_count += 1 return loc_list.astype(np.float32) #draw rectangle def draw_rect(image,", "return out_img, box_loc #高斯noise def gasuss_noise(image, box_loc=None, **kwargs): out_img =", "None: return out_img else: box_loc[:,0] = box_loc[:,0] * scale +", "if box_loc is None: out_img = resize_img(image, **kwargs) else: out_img,", "= min( max_edge / h, max_edge / w) h =", "box_loc is None: return cv2.resize(image, (w, h)) else: box_loc[:,0] =", "box_loc: cv2.rectangle(image, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])), (0, 255, 0), 4)", "bordervalue: 空白處補的值 ''' h, w, _ = image.shape center =", "axis=-1) loc = np.append(loc, loc[:, 1:2], axis=-1) loc = loc.reshape(-1,", "= loc - np.array(center) rot_loc = loc.dot(np.transpose(M[:,0:2])) rot_loc = rot_loc", "mod_hue(img, **default_args) #aug_img, bbox = mod_hue(img, bbox, **default_args) #調整saturation #aug_img", "''' Args: box_loc: bounding box location(x_min, y_min, x_max, y_max) '''", "#等比例resize至指定大小 #aug_img = resize_img(img, **default_args) #aug_img, bbox = resize_img(img, bbox,", "h-1, w-1, h-1]) return out_img, rot_box #等比例縮放影像 def resize_img(image, box_loc=None,", "#aug_img, bbox = random_zoom_out(img, bbox, **default_args) #隨機選擇augmentation方法 aug_img = rand_aug_image(img,", "def load_csv(xml_path, max_boxes=4): tree = ET.parse(xml_path) root = tree.getroot() #location", "loc[:, 3:4], axis=-1) loc = np.append(loc, loc[:, 2:3], axis=-1) loc", "2) loc = loc - np.array(center) rot_loc = loc.dot(np.transpose(M[:,0:2])) rot_loc", "None: return out_img else: return out_img, box_loc #調整飽和度(飽和度通道加上隨機-N~N之值) def mod_saturation(image,", "#aug_img, bbox = rand_aug_image(img, bbox, **default_args) print(bbox) draw_rect(aug_img, bbox) cv2.imshow('img',", "sp_noise(image, box_loc=None, **kwargs): h, w = image.shape[0:2] noise = np.random.rand(h,w)", "else: return out_img, box_loc #高斯noise def gasuss_noise(image, box_loc=None, **kwargs): out_img", "borderValue = kwargs['bordervalue']) if box_loc is None: return out_img else:", "int(difficult) == 1: continue cls_id = classes.index(cls) ''' loc =", "w // 2, h // 2) angle = np.random.randint(-kwargs['rot_angle'], kwargs['rot_angle'])", "image.shape scale = np.random.uniform(kwargs['zoom_out_value'], 1) h = int(h * scale)", "cls_id = classes.index(cls) ''' loc = obj.find('bndbox') x_min = int(loc.find('xmin').text)", "cv2.rectangle(image, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])), (0, 255, 0), 4) def", "= gasuss_noise(img, **default_args) #aug_img, bbox = gasuss_noise(img, bbox, **default_args) #調整Hue", "+ dx box_loc[:,1] = box_loc[:,1] + dy box_loc[:,2] = box_loc[:,2]", "* scale + dy return out_img, box_loc.astype(np.int32) #load csv data", "#aug_img, bbox = horizontal_flip(img, bbox, **default_args) #垂直翻轉 #aug_img = vertical_flip(img,", "return out_img else: return out_img, box_loc #水平翻轉 def horizontal_flip(image, box_loc=None,", "scale = min( max_edge / h, max_edge / w) h", "out_img = out_img + noise + 0.5 out_img[out_img < 0]", "= rot_image(img, bbox, **default_args) #等比例resize至指定大小 #aug_img = resize_img(img, **default_args) #aug_img,", "= 1 out_img = (out_img * 255).astype(np.uint8) if box_loc is", "np.append(loc, loc[:, 1:2], axis=-1) loc = loc.reshape(-1, 4, 2) loc", "h, w, _ = image.shape scale = np.random.uniform(kwargs['zoom_out_value'], 1) h", "box_loc #高斯noise def gasuss_noise(image, box_loc=None, **kwargs): out_img = (image /", "#location list loc_list = np.zeros((0, 5)) box_count = 0 for", "def mod_light(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,2] +=", "y_max = int(loc.find('ymax').text) loc_list = np.vstack([loc_list, np.array([x_min, y_min, x_max, y_max,", "#垂直翻轉 #aug_img = vertical_flip(img, **default_args) #aug_img, bbox = vertical_flip(img, bbox,", "angle = np.random.randint(-kwargs['rot_angle'], kwargs['rot_angle']) M = cv2.getRotationMatrix2D(center, angle, 1) out_img", "= rand_aug_image(img, bbox, **default_args) print(bbox) draw_rect(aug_img, bbox) cv2.imshow('img', img) cv2.imshow('aug", "0: break else: y_min, y_max = i[1], i[3] i[1] =", "0.5 out_img[out_img < 0] = 0 out_img[out_img > 1] =", "from random import sample #default args: default_args = {'noise_prob': 0.1,", "w) / 2) dy = int((image.shape[0] - h) / 2)", "def mod_saturation(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,1] +=", "= func(out_img, **kwargs) else: out_img, box_loc = func(out_img, box_loc, **kwargs)", "+ dy box_loc[:,2] = box_loc[:,2] + dx box_loc[:,3] = box_loc[:,3]", "x_max, y_max, label)) ''' if box_loc is None: return cv2.flip(image,", "max(kwargs['output_shape'][0], kwargs['output_shape'][1]) scale = min( max_edge / h, max_edge /", "5)) box_count = 0 for obj in root.iter('object'): if box_count", "bounding box location(num box,(x_min, y_min, x_max, y_max, label)) rot: 要選轉的範圍", "= int(h * scale) w = int(w * scale) if", "#aug_img, bbox = gasuss_noise(img, bbox, **default_args) #調整Hue #aug_img = mod_hue(img,", "+= np.random.randint(-kwargs['rand_saturation'], kwargs['rand_saturation']) out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR) if", "cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,0] += np.random.randint(-kwargs['rand_hug'], kwargs['rand_hug']) out_img = cv2.cvtColor(np.clip(out_img, 0, 180).astype(np.uint8),", "box_loc if __name__ == \"__main__\": img = cv2.imread('./00002.jpg') bbox =", "np.random.randint(-kwargs['rand_hug'], kwargs['rand_hug']) out_img = cv2.cvtColor(np.clip(out_img, 0, 180).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc", "if i[2] == 0: break else: x_min, x_max = i[0],", "x_max, y_max, 0])]) box_count += 1 return loc_list.astype(np.float32) #draw rectangle", "box_loc[:,0] = box_loc[:,0] * scale box_loc[:,1] = box_loc[:,1] * scale", "255).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is None: return out_img else: return", "i[3] == 0: break else: y_min, y_max = i[1], i[3]", "kwargs['output_shape'][1], 3), np.uint8) * kwargs['bordervalue'][0] out_img[dy: dy + h, dx:", "padding_img(out_img, box_loc, **kwargs) return out_img, box_loc if __name__ == \"__main__\":", "= int((image.shape[0] - h) / 2) out_img = np.ones(image.shape, np.uint8)", "i[3] = h - y_min return cv2.flip(image, 0), box_loc #旋轉-n~n度", "resize_img(image, box_loc=None, **kwargs): h, w, _ = image.shape max_edge =", "np.ones(image.shape, np.uint8) * kwargs['bordervalue'][0] out_img[dy: dy + h, dx: dx", "vertical_flip(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding box location(num box,(x_min,", "#補形狀至指定大小 #aug_img = padding_img(aug_img, **default_args) #aug_img, bbox = padding_img(aug_img, bbox,", ">= max_boxes: break ''' difficult = obj.find('difficult').text cls = obj.find('name').text", "value in kwargs.items(): print('key name: {}\\nvalue:{}\\n'.format(key, value)) #隨機選擇0~N個 image augmentation方法", "i[3] i[1] = h - y_max i[3] = h -", "**kwargs): ''' Args: box_loc: bounding box location(num box,(x_min, y_min, x_max,", "is None: return cv2.flip(image, 0) else: h = image.shape[0] for", "- w) / 2) dy = int((kwargs['output_shape'][0] - h) /", "box_loc[:,2] = box_loc[:,2] * scale + dx box_loc[:,3] = box_loc[:,3]", "= int(loc.find('xmin').text) y_min = int(loc.find('ymin').text) x_max = int(loc.find('xmax').text) y_max =", "cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,2] += np.random.randint(-kwargs['rand_light'], kwargs['rand_light']) out_img = cv2.cvtColor(np.clip(out_img, 0,", "+ np.array(center) rot_box = np.hstack([np.min(rot_loc, axis=-2), np.max(rot_loc, axis=-2), box_loc[:, 4:5]])", "label)) rot: 要選轉的範圍 bordervalue: 空白處補的值 ''' h, w, _ =", "= image.shape scale = np.random.uniform(kwargs['zoom_out_value'], 1) h = int(h *", "box_count >= max_boxes: break ''' difficult = obj.find('difficult').text cls =", "0.001, 'rand_hug': 30, 'rand_saturation':30, 'rand_light': 30, 'rot_angle': 15, 'bordervalue': (127,", "w) h = int(h * scale) w = int(w *", "i[2] i[0] = w - x_max i[2] = w -", "box_loc #調整亮度(亮度通道加上隨機-N~N之值) def mod_light(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32)", "func(out_img, **kwargs) else: out_img, box_loc = func(out_img, box_loc, **kwargs) if", "int(h * scale) w = int(w * scale) dx =", "return out_img, rot_box #等比例縮放影像 def resize_img(image, box_loc=None, **kwargs): h, w,", "return out_img, box_loc #調整彩度(彩度通道加上隨機-N~N之值) def mod_hue(image, box_loc=None, **kwargs): out_img =", "= box_loc[:,2] * scale + dx box_loc[:,3] = box_loc[:,3] *", "( w // 2, h // 2) angle = np.random.randint(-kwargs['rot_angle'],", "''' Args: box_loc: bounding box location(num box,(x_min, y_min, x_max, y_max,", "M = cv2.getRotationMatrix2D(center, angle, 1) out_img = cv2.warpAffine(image, M, (w,", "out_img else: return out_img, box_loc #調整亮度(亮度通道加上隨機-N~N之值) def mod_light(image, box_loc=None, **kwargs):", "if box_count >= max_boxes: break ''' difficult = obj.find('difficult').text cls", "box_loc is None: return out_img else: return out_img, box_loc #高斯noise", "#rand take function take_func = sample(func_list, np.random.randint(kwargs['take_value'])) for func in", "+ dx box_loc[:,3] = box_loc[:,3] + dy return out_img, box_loc.astype(np.int32)", "#調整Hue #aug_img = mod_hue(img, **default_args) #aug_img, bbox = mod_hue(img, bbox,", "box_loc[:,1] * scale box_loc[:,2] = box_loc[:,2] * scale box_loc[:,3] =", "box_loc[:,1] + dy box_loc[:,2] = box_loc[:,2] + dx box_loc[:,3] =", "int(loc.find('ymax').text) loc_list = np.vstack([loc_list, np.array([x_min, y_min, x_max, y_max, 0])]) box_count", "'gasuss_var': 0.001, 'rand_hug': 30, 'rand_saturation':30, 'rand_light': 30, 'rot_angle': 15, 'bordervalue':", "box_loc: bounding box location(num box,(x_min, y_min, x_max, y_max, label)) '''", "**default_args) #調整light #aug_img = mod_light(img, **default_args) #aug_img, bbox = mod_light(img,", "(0, 255, 0), 4) def print_args(**kwargs): for key, value in", "y_min return cv2.flip(image, 0), box_loc #旋轉-n~n度 def rot_image(image, box_loc=None, **kwargs):", "h), borderValue = kwargs['bordervalue']) if box_loc is None: return out_img", "0:1], axis=-1) loc = np.append(loc, loc[:, 3:4], axis=-1) loc =", "+ dy return out_img, box_loc.astype(np.int32) #隨機縮小 value~1倍 def random_zoom_out(image, box_loc=None,", "box_count += 1 return loc_list.astype(np.float32) #draw rectangle def draw_rect(image, box_loc):", "bbox = random_zoom_out(img, bbox, **default_args) #隨機選擇augmentation方法 aug_img = rand_aug_image(img, **default_args)", "utf-8 import cv2 import numpy as np import xml.etree.cElementTree as", "**default_args) #aug_img, bbox = mod_light(img, bbox, **default_args) #水平翻轉 #aug_img =", "= cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,0] += np.random.randint(-kwargs['rand_hug'], kwargs['rand_hug']) out_img = cv2.cvtColor(np.clip(out_img,", "in box_loc: if i[3] == 0: break else: y_min, y_max", "np.hstack([np.min(rot_loc, axis=-2), np.max(rot_loc, axis=-2), box_loc[:, 4:5]]) rot_box = np.floor(rot_box) rot_box[...,0:4]", "= np.append(loc, loc[:, 3:4], axis=-1) loc = np.append(loc, loc[:, 2:3],", "dy box_loc[:,2] = box_loc[:,2] + dx box_loc[:,3] = box_loc[:,3] +", "h, w, _ = image.shape dx = int((kwargs['output_shape'][1] - w)", "**kwargs) if box_loc is None: out_img = padding_img(out_img, **kwargs) return", "i[0], i[2] i[0] = w - x_max i[2] = w", "#aug_img, bbox = resize_img(img, bbox, **default_args) #補形狀至指定大小 #aug_img = padding_img(aug_img,", "h, w = image.shape[0:2] noise = np.random.rand(h,w) out_img = image.copy()", "resize_img(img, **default_args) #aug_img, bbox = resize_img(img, bbox, **default_args) #補形狀至指定大小 #aug_img", "**kwargs) return out_img else: out_img, box_loc = padding_img(out_img, box_loc, **kwargs)", "box_loc #旋轉-n~n度 def rot_image(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding", "obj.find('name').text if cls not in classes or int(difficult) == 1:", "np.append(loc, loc[:, 3:4], axis=-1) loc = np.append(loc, loc[:, 2:3], axis=-1)", "box_loc.astype(np.int32) #隨機縮小 value~1倍 def random_zoom_out(image, box_loc=None, **kwargs): h, w, _", "w) / 2) dy = int((kwargs['output_shape'][0] - h) / 2)", "int(w * scale) if box_loc is None: return cv2.resize(image, (w,", "0, 180).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is None: return out_img else:", "box_loc: if i[3] == 0: break else: y_min, y_max =", "4:5]]) rot_box = np.floor(rot_box) rot_box[...,0:4] = np.clip(rot_box[...,0:4], [0,0,0,0], [w-1, h-1,", "np.ones((kwargs['output_shape'][0], kwargs['output_shape'][1], 3), np.uint8) * kwargs['bordervalue'][0] out_img[dy: dy + h,", "cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is None: return out_img", "= box_loc[:,1] * scale + dy box_loc[:,2] = box_loc[:,2] *", "np.random.rand(h,w) out_img = image.copy() out_img[noise < kwargs['noise_prob']] = 0 if", "#隨機選擇augmentation方法 aug_img = rand_aug_image(img, **default_args) #aug_img, bbox = rand_aug_image(img, bbox,", "obj in root.iter('object'): if box_count >= max_boxes: break ''' difficult", "= classes.index(cls) ''' loc = obj.find('bndbox') x_min = int(loc.find('xmin').text) y_min", "mod_light, horizontal_flip, vertical_flip, rot_image, random_zoom_out] #rand take function take_func =", "mod_light(img, **default_args) #aug_img, bbox = mod_light(img, bbox, **default_args) #水平翻轉 #aug_img", "center = ( w // 2, h // 2) angle", "axis=-1) loc = np.append(loc, loc[:, 2:3], axis=-1) loc = np.append(loc,", "gasuss_noise(img, bbox, **default_args) #調整Hue #aug_img = mod_hue(img, **default_args) #aug_img, bbox", "**default_args) #等比例resize至指定大小 #aug_img = resize_img(img, **default_args) #aug_img, bbox = resize_img(img,", "if box_loc is None: return out_img else: box_loc[:,0] = box_loc[:,0]", "loc = loc - np.array(center) rot_loc = loc.dot(np.transpose(M[:,0:2])) rot_loc =", "w = int(w * scale) dx = int((image.shape[1] - w)", "vertical_flip, rot_image, random_zoom_out] #rand take function take_func = sample(func_list, np.random.randint(kwargs['take_value']))", "out_img[out_img < 0] = 0 out_img[out_img > 1] = 1", "padding_img(aug_img, bbox, **default_args) #隨機縮小 N~1倍 #aug_img = random_zoom_out(img, **default_args) #aug_img,", "+ w] = cv2.resize(image, (w, h)) if box_loc is None:", "- np.array(center) rot_loc = loc.dot(np.transpose(M[:,0:2])) rot_loc = rot_loc + np.array(center)", "label)) ''' if box_loc is None: return cv2.flip(image, 0) else:", "out_img = np.ones(image.shape, np.uint8) * kwargs['bordervalue'][0] out_img[dy: dy + h,", "h) / 2) out_img = np.ones(image.shape, np.uint8) * kwargs['bordervalue'][0] out_img[dy:", "N~1倍 #aug_img = random_zoom_out(img, **default_args) #aug_img, bbox = random_zoom_out(img, bbox,", "* scale box_loc[:,2] = box_loc[:,2] * scale box_loc[:,3] = box_loc[:,3]", "= h - y_min return cv2.flip(image, 0), box_loc #旋轉-n~n度 def", "w-1, h-1]) return out_img, rot_box #等比例縮放影像 def resize_img(image, box_loc=None, **kwargs):", "**default_args) #水平翻轉 #aug_img = horizontal_flip(img, **default_args) #aug_img, bbox = horizontal_flip(img,", "None: return out_img else: return out_img, box_loc #調整彩度(彩度通道加上隨機-N~N之值) def mod_hue(image,", "horizontal_flip(img, **default_args) #aug_img, bbox = horizontal_flip(img, bbox, **default_args) #垂直翻轉 #aug_img", "x_max, y_max, label)) rot: 要選轉的範圍 bordervalue: 空白處補的值 ''' h, w,", "'rand_hug': 30, 'rand_saturation':30, 'rand_light': 30, 'rot_angle': 15, 'bordervalue': (127, 127,", "1) h = int(h * scale) w = int(w *", "gasuss_noise, mod_hue, mod_saturation, mod_light, horizontal_flip, vertical_flip, rot_image, random_zoom_out] #rand take", "else: return out_img, box_loc #調整亮度(亮度通道加上隨機-N~N之值) def mod_light(image, box_loc=None, **kwargs): out_img", "out_img = func(out_img, **kwargs) else: out_img, box_loc = func(out_img, box_loc,", "encoding: utf-8 import cv2 import numpy as np import xml.etree.cElementTree", "= obj.find('difficult').text cls = obj.find('name').text if cls not in classes", "csv data def load_csv(xml_path, max_boxes=4): tree = ET.parse(xml_path) root =", "box_loc = padding_img(out_img, box_loc, **kwargs) return out_img, box_loc if __name__", "= int(loc.find('ymin').text) x_max = int(loc.find('xmax').text) y_max = int(loc.find('ymax').text) loc_list =", "= int(loc.find('ymax').text) loc_list = np.vstack([loc_list, np.array([x_min, y_min, x_max, y_max, 0])])", "rand_aug_image(img, **default_args) #aug_img, bbox = rand_aug_image(img, bbox, **default_args) print(bbox) draw_rect(aug_img,", "> 1] = 1 out_img = (out_img * 255).astype(np.uint8) if", "box_loc[:,2] * scale box_loc[:,3] = box_loc[:,3] * scale return cv2.resize(image,", "out_img = image.copy() out_img[noise < kwargs['noise_prob']] = 0 if box_loc", "image.shape center = ( w // 2, h // 2)", "dy box_loc[:,2] = box_loc[:,2] * scale + dx box_loc[:,3] =", "is None: out_img = padding_img(out_img, **kwargs) return out_img else: out_img,", "= cv2.imread('./00002.jpg') bbox = load_csv('./00002.xml') #黑點noise #aug_img = sp_noise(img, **default_args)", "bbox, **default_args) #調整Hue #aug_img = mod_hue(img, **default_args) #aug_img, bbox =", "def rot_image(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding box location(num", "box_loc is None: return out_img else: return out_img, box_loc #調整彩度(彩度通道加上隨機-N~N之值)", "= 0 out_img[out_img > 1] = 1 out_img = (out_img", "**kwargs): out_img = (image / 255.) - 0.5 noise =", "dx box_loc[:,3] = box_loc[:,3] + dy return out_img, box_loc.astype(np.int32) #隨機縮小", "def horizontal_flip(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding box location(x_min,", "= int((kwargs['output_shape'][0] - h) / 2) out_img = np.ones((kwargs['output_shape'][0], kwargs['output_shape'][1],", "h) / 2) out_img = np.ones((kwargs['output_shape'][0], kwargs['output_shape'][1], 3), np.uint8) *", "= image.shape[0:2] noise = np.random.rand(h,w) out_img = image.copy() out_img[noise <", "kwargs['output_shape'][1]) scale = min( max_edge / h, max_edge / w)", "= padding_img(out_img, box_loc, **kwargs) return out_img, box_loc if __name__ ==", "1:2], axis=-1) loc = loc.reshape(-1, 4, 2) loc = loc", "#等比例縮放影像 def resize_img(image, box_loc=None, **kwargs): h, w, _ = image.shape", "box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,1] += np.random.randint(-kwargs['rand_saturation'], kwargs['rand_saturation'])", "def vertical_flip(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding box location(num", "np.random.randint(kwargs['take_value'])) for func in take_func: if box_loc is None: out_img", "bbox, **default_args) #調整saturation #aug_img = mod_saturation(img, **default_args) #aug_img, bbox =", "return out_img else: return out_img, box_loc #調整飽和度(飽和度通道加上隨機-N~N之值) def mod_saturation(image, box_loc=None,", "要選轉的範圍 bordervalue: 空白處補的值 ''' h, w, _ = image.shape center", "kwargs['rand_light']) out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is", "180).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is None: return out_img else: return", "* scale return cv2.resize(image, (w, h)), box_loc.astype(np.int32) #將樸片補至指定大小 def padding_img(image,", "== 0: break else: x_min, x_max = i[0], i[2] i[0]", "kwargs['bordervalue']) if box_loc is None: return out_img else: loc =", "h = image.shape[0] for i in box_loc: if i[3] ==", "int(w * scale) dx = int((image.shape[1] - w) / 2)", "**default_args) #aug_img, bbox = rand_aug_image(img, bbox, **default_args) print(bbox) draw_rect(aug_img, bbox)", "list loc_list = np.zeros((0, 5)) box_count = 0 for obj", "= w - x_max i[2] = w - x_min return", "else: out_img, box_loc = resize_img(image, box_loc, **kwargs) #total augmentation function", "#隨機縮小 value~1倍 def random_zoom_out(image, box_loc=None, **kwargs): h, w, _ =", "box_loc is None: out_img = padding_img(out_img, **kwargs) return out_img else:", "#aug_img = rot_image(img, **default_args) #aug_img, bbox = rot_image(img, bbox, **default_args)", "#旋轉角度 #aug_img = rot_image(img, **default_args) #aug_img, bbox = rot_image(img, bbox,", "rot_image(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding box location(num box,(x_min,", "out_img, box_loc #高斯noise def gasuss_noise(image, box_loc=None, **kwargs): out_img = (image", "out_img[out_img > 1] = 1 out_img = (out_img * 255).astype(np.uint8)", "sample(func_list, np.random.randint(kwargs['take_value'])) for func in take_func: if box_loc is None:", "x_min, x_max = i[0], i[2] i[0] = w - x_max", "in box_loc: if i[2] == 0: break else: x_min, x_max", "def padding_img(image, box_loc=None, **kwargs): h, w, _ = image.shape dx", "None: out_img = padding_img(out_img, **kwargs) return out_img else: out_img, box_loc", "box_loc is None: return cv2.flip(image, 0) else: h = image.shape[0]", "= w - x_min return cv2.flip(image, 1), box_loc #垂直翻轉 def", "- w) / 2) dy = int((image.shape[0] - h) /", "loc = np.append(loc, loc[:, 2:3], axis=-1) loc = np.append(loc, loc[:,", "box_loc: if i[2] == 0: break else: x_min, x_max =", "def draw_rect(image, box_loc): for i in box_loc: cv2.rectangle(image, (int(i[0]), int(i[1])),", "Python file uses the following encoding: utf-8 import cv2 import", "== 1: continue cls_id = classes.index(cls) ''' loc = obj.find('bndbox')", "/ 2) out_img = np.ones(image.shape, np.uint8) * kwargs['bordervalue'][0] out_img[dy: dy", "**default_args) #aug_img, bbox = sp_noise(img, bbox, **default_args) #gasuss_noise #aug_img =", "= rot_loc + np.array(center) rot_box = np.hstack([np.min(rot_loc, axis=-2), np.max(rot_loc, axis=-2),", "out_img else: return out_img, box_loc #水平翻轉 def horizontal_flip(image, box_loc=None, **kwargs):", "is None: return cv2.flip(image, 1) else: w = image.shape[1] for", "1), box_loc #垂直翻轉 def vertical_flip(image, box_loc=None, **kwargs): ''' Args: box_loc:", "if box_loc is None: out_img = padding_img(out_img, **kwargs) return out_img", "func in take_func: if box_loc is None: out_img = func(out_img,", "cv2 import numpy as np import xml.etree.cElementTree as ET from", "+ h, dx: dx + w] = cv2.resize(image, (w, h))", "np.vstack([loc_list, np.array([x_min, y_min, x_max, y_max, 0])]) box_count += 1 return", "scale) w = int(w * scale) dx = int((image.shape[1] -", "= rand_aug_image(img, **default_args) #aug_img, bbox = rand_aug_image(img, bbox, **default_args) print(bbox)", "h = int(h * scale) w = int(w * scale)", "box_loc=None, **kwargs): if box_loc is None: out_img = resize_img(image, **kwargs)", "image.shape) out_img = out_img + noise + 0.5 out_img[out_img <", "out_img, box_loc #調整亮度(亮度通道加上隨機-N~N之值) def mod_light(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image,", "out_img + noise + 0.5 out_img[out_img < 0] = 0", "h - y_max i[3] = h - y_min return cv2.flip(image,", "[0,0,0,0], [w-1, h-1, w-1, h-1]) return out_img, rot_box #等比例縮放影像 def", "in classes or int(difficult) == 1: continue cls_id = classes.index(cls)", "#隨機縮小 N~1倍 #aug_img = random_zoom_out(img, **default_args) #aug_img, bbox = random_zoom_out(img,", "= box_loc[:,0:4].copy() loc = np.append(loc, loc[:, 0:1], axis=-1) loc =", "resize_img(img, bbox, **default_args) #補形狀至指定大小 #aug_img = padding_img(aug_img, **default_args) #aug_img, bbox", "box_loc.astype(np.int32) #將樸片補至指定大小 def padding_img(image, box_loc=None, **kwargs): h, w, _ =" ]
[ "\"--sorted\", help=\"a boolean flag\", action=\"store_true\") return parser.parse_args() # -------------------------------------------------- def", "formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument(\"items\", type=str, nargs=\"+\", metavar=\"str\", help=\"item(s) to bring\") parser.add_argument(\"-s\",", "bring\") parser.add_argument(\"-s\", \"--sorted\", help=\"a boolean flag\", action=\"store_true\") return parser.parse_args() #", "= and_last # print(items) print(f\"You are bringing {', '.join(items)}.\") #", "print(items) last = items[-1] and_last = \"and \" + last", "len(items) < 3: items.insert(-1, \"and\") print(f\"You are bringing {' '.join(items)}.\")", "parser.parse_args() # -------------------------------------------------- def main(): \"\"\"The main function: formatting and", "flag\", action=\"store_true\") return parser.parse_args() # -------------------------------------------------- def main(): \"\"\"The main", "{' '.join(items)}.\") else: # print(items) last = items[-1] and_last =", "#!/usr/bin/env python3 \"\"\" Author : <NAME> <<EMAIL>> Date : 2021-12-15", "\"\"\" Author : <NAME> <<EMAIL>> Date : 2021-12-15 Purpose: Working", "help=\"item(s) to bring\") parser.add_argument(\"-s\", \"--sorted\", help=\"a boolean flag\", action=\"store_true\") return", "to bring\") parser.add_argument(\"-s\", \"--sorted\", help=\"a boolean flag\", action=\"store_true\") return parser.parse_args()", "Working with lists \"\"\" import argparse # -------------------------------------------------- def get_args():", "elif len(items) < 3: items.insert(-1, \"and\") print(f\"You are bringing {'", "function: formatting and printing the output\"\"\" args = get_args() sort_flag", "with lists\", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument(\"items\", type=str, nargs=\"+\", metavar=\"str\", help=\"item(s) to", "= items[-1] and_last = \"and \" + last items[-1] =", "get_args(): \"\"\"Get command-line arguments\"\"\" parser = argparse.ArgumentParser( description=\"Working with lists\",", "nargs=\"+\", metavar=\"str\", help=\"item(s) to bring\") parser.add_argument(\"-s\", \"--sorted\", help=\"a boolean flag\",", "else: # print(items) last = items[-1] and_last = \"and \"", "\" + last items[-1] = and_last # print(items) print(f\"You are", "= args.items if sort_flag: items = sorted(items) if len(items) ==", "= args.sorted items = args.items if sort_flag: items = sorted(items)", "= argparse.ArgumentParser( description=\"Working with lists\", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument(\"items\", type=str, nargs=\"+\",", "python3 \"\"\" Author : <NAME> <<EMAIL>> Date : 2021-12-15 Purpose:", "= \"and \" + last items[-1] = and_last # print(items)", "lists \"\"\" import argparse # -------------------------------------------------- def get_args(): \"\"\"Get command-line", "printing the output\"\"\" args = get_args() sort_flag = args.sorted items", "Author : <NAME> <<EMAIL>> Date : 2021-12-15 Purpose: Working with", "items[-1] = and_last # print(items) print(f\"You are bringing {', '.join(items)}.\")", "are bringing {', '.join(items)}.\") # -------------------------------------------------- if __name__ == \"__main__\":", "<<EMAIL>> Date : 2021-12-15 Purpose: Working with lists \"\"\" import", "\"\"\"The main function: formatting and printing the output\"\"\" args =", "parser = argparse.ArgumentParser( description=\"Working with lists\", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument(\"items\", type=str,", "3: items.insert(-1, \"and\") print(f\"You are bringing {' '.join(items)}.\") else: #", "items.insert(-1, \"and\") print(f\"You are bringing {' '.join(items)}.\") else: # print(items)", "arguments\"\"\" parser = argparse.ArgumentParser( description=\"Working with lists\", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument(\"items\",", "def get_args(): \"\"\"Get command-line arguments\"\"\" parser = argparse.ArgumentParser( description=\"Working with", "# print(items) print(f\"You are bringing {', '.join(items)}.\") # -------------------------------------------------- if", "= get_args() sort_flag = args.sorted items = args.items if sort_flag:", "argparse.ArgumentParser( description=\"Working with lists\", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument(\"items\", type=str, nargs=\"+\", metavar=\"str\",", "args.items if sort_flag: items = sorted(items) if len(items) == 1:", "\"\"\"Get command-line arguments\"\"\" parser = argparse.ArgumentParser( description=\"Working with lists\", formatter_class=argparse.ArgumentDefaultsHelpFormatter,", "command-line arguments\"\"\" parser = argparse.ArgumentParser( description=\"Working with lists\", formatter_class=argparse.ArgumentDefaultsHelpFormatter, )", "if len(items) == 1: print(f\"You are bringing {items[0]}.\") elif len(items)", ") parser.add_argument(\"items\", type=str, nargs=\"+\", metavar=\"str\", help=\"item(s) to bring\") parser.add_argument(\"-s\", \"--sorted\",", "and_last # print(items) print(f\"You are bringing {', '.join(items)}.\") # --------------------------------------------------", "parser.add_argument(\"-s\", \"--sorted\", help=\"a boolean flag\", action=\"store_true\") return parser.parse_args() # --------------------------------------------------", "bringing {', '.join(items)}.\") # -------------------------------------------------- if __name__ == \"__main__\": main()", "args = get_args() sort_flag = args.sorted items = args.items if", ": 2021-12-15 Purpose: Working with lists \"\"\" import argparse #", "last = items[-1] and_last = \"and \" + last items[-1]", "sorted(items) if len(items) == 1: print(f\"You are bringing {items[0]}.\") elif", "Purpose: Working with lists \"\"\" import argparse # -------------------------------------------------- def", "type=str, nargs=\"+\", metavar=\"str\", help=\"item(s) to bring\") parser.add_argument(\"-s\", \"--sorted\", help=\"a boolean", "\"and \" + last items[-1] = and_last # print(items) print(f\"You", "{items[0]}.\") elif len(items) < 3: items.insert(-1, \"and\") print(f\"You are bringing", "== 1: print(f\"You are bringing {items[0]}.\") elif len(items) < 3:", "# -------------------------------------------------- def get_args(): \"\"\"Get command-line arguments\"\"\" parser = argparse.ArgumentParser(", "return parser.parse_args() # -------------------------------------------------- def main(): \"\"\"The main function: formatting", "bringing {items[0]}.\") elif len(items) < 3: items.insert(-1, \"and\") print(f\"You are", "print(f\"You are bringing {', '.join(items)}.\") # -------------------------------------------------- if __name__ ==", "\"and\") print(f\"You are bringing {' '.join(items)}.\") else: # print(items) last", "sort_flag: items = sorted(items) if len(items) == 1: print(f\"You are", "-------------------------------------------------- def main(): \"\"\"The main function: formatting and printing the", "main function: formatting and printing the output\"\"\" args = get_args()", "and_last = \"and \" + last items[-1] = and_last #", "2021-12-15 Purpose: Working with lists \"\"\" import argparse # --------------------------------------------------", "are bringing {items[0]}.\") elif len(items) < 3: items.insert(-1, \"and\") print(f\"You", "print(items) print(f\"You are bringing {', '.join(items)}.\") # -------------------------------------------------- if __name__", "description=\"Working with lists\", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument(\"items\", type=str, nargs=\"+\", metavar=\"str\", help=\"item(s)", "argparse # -------------------------------------------------- def get_args(): \"\"\"Get command-line arguments\"\"\" parser =", "boolean flag\", action=\"store_true\") return parser.parse_args() # -------------------------------------------------- def main(): \"\"\"The", "lists\", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument(\"items\", type=str, nargs=\"+\", metavar=\"str\", help=\"item(s) to bring\")", "< 3: items.insert(-1, \"and\") print(f\"You are bringing {' '.join(items)}.\") else:", "import argparse # -------------------------------------------------- def get_args(): \"\"\"Get command-line arguments\"\"\" parser", "# print(items) last = items[-1] and_last = \"and \" +", "items[-1] and_last = \"and \" + last items[-1] = and_last", "items = args.items if sort_flag: items = sorted(items) if len(items)", "get_args() sort_flag = args.sorted items = args.items if sort_flag: items", "main(): \"\"\"The main function: formatting and printing the output\"\"\" args", "\"\"\" import argparse # -------------------------------------------------- def get_args(): \"\"\"Get command-line arguments\"\"\"", "'.join(items)}.\") else: # print(items) last = items[-1] and_last = \"and", "are bringing {' '.join(items)}.\") else: # print(items) last = items[-1]", "action=\"store_true\") return parser.parse_args() # -------------------------------------------------- def main(): \"\"\"The main function:", "1: print(f\"You are bringing {items[0]}.\") elif len(items) < 3: items.insert(-1,", "the output\"\"\" args = get_args() sort_flag = args.sorted items =", "<NAME> <<EMAIL>> Date : 2021-12-15 Purpose: Working with lists \"\"\"", "metavar=\"str\", help=\"item(s) to bring\") parser.add_argument(\"-s\", \"--sorted\", help=\"a boolean flag\", action=\"store_true\")", "items = sorted(items) if len(items) == 1: print(f\"You are bringing", "sort_flag = args.sorted items = args.items if sort_flag: items =", "bringing {' '.join(items)}.\") else: # print(items) last = items[-1] and_last", "with lists \"\"\" import argparse # -------------------------------------------------- def get_args(): \"\"\"Get", "def main(): \"\"\"The main function: formatting and printing the output\"\"\"", ": <NAME> <<EMAIL>> Date : 2021-12-15 Purpose: Working with lists", "args.sorted items = args.items if sort_flag: items = sorted(items) if", "print(f\"You are bringing {items[0]}.\") elif len(items) < 3: items.insert(-1, \"and\")", "formatting and printing the output\"\"\" args = get_args() sort_flag =", "last items[-1] = and_last # print(items) print(f\"You are bringing {',", "-------------------------------------------------- def get_args(): \"\"\"Get command-line arguments\"\"\" parser = argparse.ArgumentParser( description=\"Working", "parser.add_argument(\"items\", type=str, nargs=\"+\", metavar=\"str\", help=\"item(s) to bring\") parser.add_argument(\"-s\", \"--sorted\", help=\"a", "= sorted(items) if len(items) == 1: print(f\"You are bringing {items[0]}.\")", "Date : 2021-12-15 Purpose: Working with lists \"\"\" import argparse", "if sort_flag: items = sorted(items) if len(items) == 1: print(f\"You", "help=\"a boolean flag\", action=\"store_true\") return parser.parse_args() # -------------------------------------------------- def main():", "output\"\"\" args = get_args() sort_flag = args.sorted items = args.items", "# -------------------------------------------------- def main(): \"\"\"The main function: formatting and printing", "len(items) == 1: print(f\"You are bringing {items[0]}.\") elif len(items) <", "print(f\"You are bringing {' '.join(items)}.\") else: # print(items) last =", "and printing the output\"\"\" args = get_args() sort_flag = args.sorted", "+ last items[-1] = and_last # print(items) print(f\"You are bringing" ]
[ "equilateral triangle.\") elif side_a==side_b or side_a==side_c or side_b==side_c: print(\"The triangle", "side(b):\")) side_c=int(input(\"Enter the third side(c):\")) if side_a==side_b and side_a==side_c: print(\"The", "or side_b==side_c: print(\"The triangle is an isosceles triangle.\") else: print(\"The", "the second side(b):\")) side_c=int(input(\"Enter the third side(c):\")) if side_a==side_b and", "triangle.\") elif side_a==side_b or side_a==side_c or side_b==side_c: print(\"The triangle is", "and side_a==side_c: print(\"The triangle is an equilateral triangle.\") elif side_a==side_b", "print(\"The triangle is an equilateral triangle.\") elif side_a==side_b or side_a==side_c", "side(c):\")) if side_a==side_b and side_a==side_c: print(\"The triangle is an equilateral", "side_b==side_c: print(\"The triangle is an isosceles triangle.\") else: print(\"The triangle", "<filename>triangle.py side_a=int(input(\"Enter the first side(a):\")) side_b=int(input(\"Enter the second side(b):\")) side_c=int(input(\"Enter", "if side_a==side_b and side_a==side_c: print(\"The triangle is an equilateral triangle.\")", "an equilateral triangle.\") elif side_a==side_b or side_a==side_c or side_b==side_c: print(\"The", "is an isosceles triangle.\") else: print(\"The triangle is scalene triangle.\")", "triangle is an isosceles triangle.\") else: print(\"The triangle is scalene", "side_a=int(input(\"Enter the first side(a):\")) side_b=int(input(\"Enter the second side(b):\")) side_c=int(input(\"Enter the", "the third side(c):\")) if side_a==side_b and side_a==side_c: print(\"The triangle is", "side_a==side_c: print(\"The triangle is an equilateral triangle.\") elif side_a==side_b or", "the first side(a):\")) side_b=int(input(\"Enter the second side(b):\")) side_c=int(input(\"Enter the third", "second side(b):\")) side_c=int(input(\"Enter the third side(c):\")) if side_a==side_b and side_a==side_c:", "or side_a==side_c or side_b==side_c: print(\"The triangle is an isosceles triangle.\")", "first side(a):\")) side_b=int(input(\"Enter the second side(b):\")) side_c=int(input(\"Enter the third side(c):\"))", "third side(c):\")) if side_a==side_b and side_a==side_c: print(\"The triangle is an", "side_b=int(input(\"Enter the second side(b):\")) side_c=int(input(\"Enter the third side(c):\")) if side_a==side_b", "side_c=int(input(\"Enter the third side(c):\")) if side_a==side_b and side_a==side_c: print(\"The triangle", "side_a==side_b or side_a==side_c or side_b==side_c: print(\"The triangle is an isosceles", "side_a==side_c or side_b==side_c: print(\"The triangle is an isosceles triangle.\") else:", "print(\"The triangle is an isosceles triangle.\") else: print(\"The triangle is", "side(a):\")) side_b=int(input(\"Enter the second side(b):\")) side_c=int(input(\"Enter the third side(c):\")) if", "side_a==side_b and side_a==side_c: print(\"The triangle is an equilateral triangle.\") elif", "is an equilateral triangle.\") elif side_a==side_b or side_a==side_c or side_b==side_c:", "elif side_a==side_b or side_a==side_c or side_b==side_c: print(\"The triangle is an", "triangle is an equilateral triangle.\") elif side_a==side_b or side_a==side_c or" ]
[ "Blueprint, request from david.lib.template import st from .model import Artist", "import Blueprint, request from david.lib.template import st from .model import", "bp = Blueprint('artist', __name__) @bp.app_template_global('artists') def artists(): return Artist.query.all() @bp.route('/artist/<uid>/')", "def intro(uid): artist = Artist.get_or_404(uid) return st('modules/artist/show.html', **locals()) @bp.route('/artist/<uid>/detail') def", ".model import Artist bp = Blueprint('artist', __name__) @bp.app_template_global('artists') def artists():", "from .model import Artist bp = Blueprint('artist', __name__) @bp.app_template_global('artists') def", "@bp.app_template_global('artists') def artists(): return Artist.query.all() @bp.route('/artist/<uid>/') def intro(uid): artist =", "= Artist.get_or_404(uid) return st('modules/artist/show.html', **locals()) @bp.route('/artist/<uid>/detail') def detail(uid): artist =", "__name__) @bp.app_template_global('artists') def artists(): return Artist.query.all() @bp.route('/artist/<uid>/') def intro(uid): artist", "**locals()) @bp.route('/artist/<uid>/detail') def detail(uid): artist = Artist.get_or_404(uid) return st('modules/artist/detailed.html', **locals())", "def artists(): return Artist.query.all() @bp.route('/artist/<uid>/') def intro(uid): artist = Artist.get_or_404(uid)", "-*- from flask import Blueprint, request from david.lib.template import st", "david.lib.template import st from .model import Artist bp = Blueprint('artist',", "import st from .model import Artist bp = Blueprint('artist', __name__)", "-*- coding: utf-8 -*- from flask import Blueprint, request from", "return st('modules/artist/show.html', **locals()) @bp.route('/artist/<uid>/detail') def detail(uid): artist = Artist.get_or_404(uid) return", "# -*- coding: utf-8 -*- from flask import Blueprint, request", "Blueprint('artist', __name__) @bp.app_template_global('artists') def artists(): return Artist.query.all() @bp.route('/artist/<uid>/') def intro(uid):", "artist = Artist.get_or_404(uid) return st('modules/artist/show.html', **locals()) @bp.route('/artist/<uid>/detail') def detail(uid): artist", "Artist.get_or_404(uid) return st('modules/artist/show.html', **locals()) @bp.route('/artist/<uid>/detail') def detail(uid): artist = Artist.get_or_404(uid)", "request from david.lib.template import st from .model import Artist bp", "flask import Blueprint, request from david.lib.template import st from .model", "import Artist bp = Blueprint('artist', __name__) @bp.app_template_global('artists') def artists(): return", "<reponame>ktmud/david # -*- coding: utf-8 -*- from flask import Blueprint,", "artists(): return Artist.query.all() @bp.route('/artist/<uid>/') def intro(uid): artist = Artist.get_or_404(uid) return", "utf-8 -*- from flask import Blueprint, request from david.lib.template import", "coding: utf-8 -*- from flask import Blueprint, request from david.lib.template", "st from .model import Artist bp = Blueprint('artist', __name__) @bp.app_template_global('artists')", "Artist bp = Blueprint('artist', __name__) @bp.app_template_global('artists') def artists(): return Artist.query.all()", "from flask import Blueprint, request from david.lib.template import st from", "from david.lib.template import st from .model import Artist bp =", "return Artist.query.all() @bp.route('/artist/<uid>/') def intro(uid): artist = Artist.get_or_404(uid) return st('modules/artist/show.html',", "Artist.query.all() @bp.route('/artist/<uid>/') def intro(uid): artist = Artist.get_or_404(uid) return st('modules/artist/show.html', **locals())", "intro(uid): artist = Artist.get_or_404(uid) return st('modules/artist/show.html', **locals()) @bp.route('/artist/<uid>/detail') def detail(uid):", "= Blueprint('artist', __name__) @bp.app_template_global('artists') def artists(): return Artist.query.all() @bp.route('/artist/<uid>/') def", "@bp.route('/artist/<uid>/') def intro(uid): artist = Artist.get_or_404(uid) return st('modules/artist/show.html', **locals()) @bp.route('/artist/<uid>/detail')", "st('modules/artist/show.html', **locals()) @bp.route('/artist/<uid>/detail') def detail(uid): artist = Artist.get_or_404(uid) return st('modules/artist/detailed.html'," ]
[ "def polygons_to_mask(img_shape, polygons): mask = np.zeros(img_shape, dtype=np.uint8) mask = Image.fromarray(mask)", "plate_depth = 1.5 #cm plate_thickness = 0.2 #cm def Max(x,", "np.array(points) shape = points.shape points = points.reshape(shape[0], 1, shape[1]) for", "img[point[1]][point[0]] k += 1 avg = avg/float(k) depth = lowest", "= index[:, 0] clos = index[:, 1] left_top_r = np.min(rows)", "depth = lowest - avg depth_per_pix = plate_depth/depth return len_per_pix,", "np.min(clos) right_bottom_r = np.max(rows) right_bottom_c = np.max(clos) return [left_top_c, left_top_r,", "(label in vol_dict): vol_dict[label] += volume else: vol_dict[label] = volume", "volume def get_volume(img, json_path): lowest = np.max(img) vol_dict = {}", "* len_per_pix return volume def get_volume(img, json_path): lowest = np.max(img)", "def get_scale(points, img, lowest): bbox = get_bbox(points, img.shape[0], img.shape[1]) diameter", "1, shape[1]) for i in range(bbox[0], bbox[2]+1): for j in", "(i,j), False) >= 0): volume += Max(0, (lowest - img[j][i])", "= points.shape points = points.reshape(shape[0], 1, shape[1]) for i in", "= plate_depth/depth return len_per_pix, depth_per_pix def cal_volume(points, img, len_per_pix, depth_per_pix,", "plate_depth/depth return len_per_pix, depth_per_pix def cal_volume(points, img, len_per_pix, depth_per_pix, lowest):", "= lowest - avg depth_per_pix = plate_depth/depth return len_per_pix, depth_per_pix", "== \"plate\"): continue points = shape['points'] volume = cal_volume(points, img,", "== 1) rows = index[:, 0] clos = index[:, 1]", "return y def polygons_to_mask(img_shape, polygons): mask = np.zeros(img_shape, dtype=np.uint8) mask", "0 k = 0 for point in points: avg +=", "+= Max(0, (lowest - img[j][i]) * depth_per_pix - plate_thickness) *", "(bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2 len_per_pix = plate_diameter/float(diameter) avg = 0 k = 0", "j in range(bbox[1], bbox[3]+1): if (cv2.pointPolygonTest(points, (i,j), False) >= 0):", "get_scale(shape['points'], img, lowest) #print(len_per_pix, depth_per_pix) break for shape in data['shapes']:", "else: vol_dict[label] = volume return vol_dict img = cv2.imread(\"out.png\",0) print(get_volume(img,\"test.json\"))", "bbox[3]+1): if (cv2.pointPolygonTest(points, (i,j), False) >= 0): volume += Max(0,", "bbox = get_bbox(points, img.shape[0], img.shape[1]) diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2 len_per_pix =", "os import json import glob from PIL import Image, ImageDraw", "len_per_pix return volume def get_volume(img, json_path): lowest = np.max(img) vol_dict", "len_per_pix, depth_per_pix, lowest): volume = 0.0 bbox = get_bbox(points, img.shape[0],", "as np import cv2 import os import json import glob", "right_bottom_c, right_bottom_r] def get_bbox(points, h, w): polygons = points mask", "break for shape in data['shapes']: label = shape['label'] if (label", "if (x >= y): return x else: return y def", "import json import glob from PIL import Image, ImageDraw plate_diameter", "for shape in data['shapes']: label = shape['label'] if (label ==", "shape['points'] volume = cal_volume(points, img, len_per_pix, depth_per_pix, lowest) if (label", "volume else: vol_dict[label] = volume return vol_dict img = cv2.imread(\"out.png\",0)", "if (cv2.pointPolygonTest(points, (i,j), False) >= 0): volume += Max(0, (lowest", "dtype=np.uint8) mask = Image.fromarray(mask) xy = list(map(tuple, polygons)) ImageDraw.Draw(mask).polygon(xy=xy, outline=1,", "depth_per_pix def cal_volume(points, img, len_per_pix, depth_per_pix, lowest): volume = 0.0", "img.shape[0], img.shape[1]) diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2 len_per_pix = plate_diameter/float(diameter) avg =", "json.load(json_file) for shape in data['shapes']: if (shape['label'] == \"plate\"): len_per_pix,", "0.0 with open(json_path, 'r') as json_file: data = json.load(json_file) for", "k = 0 for point in points: avg += img[point[1]][point[0]]", "+= 1 avg = avg/float(k) depth = lowest - avg", "def get_bbox(points, h, w): polygons = points mask = polygons_to_mask([h,w],", "range(bbox[1], bbox[3]+1): if (cv2.pointPolygonTest(points, (i,j), False) >= 0): volume +=", "data['shapes']: if (shape['label'] == \"plate\"): len_per_pix, depth_per_pix = get_scale(shape['points'], img,", "len_per_pix = plate_diameter/float(diameter) avg = 0 k = 0 for", "+= volume else: vol_dict[label] = volume return vol_dict img =", "volume += Max(0, (lowest - img[j][i]) * depth_per_pix - plate_thickness)", "k += 1 avg = avg/float(k) depth = lowest -", "import cv2 import os import json import glob from PIL", "plate_diameter = 25 #cm plate_depth = 1.5 #cm plate_thickness =", "mask = polygons_to_mask([h,w], polygons) return mask2box(mask) def get_scale(points, img, lowest):", "shape in data['shapes']: label = shape['label'] if (label == \"plate\"):", "shape[1]) for i in range(bbox[0], bbox[2]+1): for j in range(bbox[1],", "vol_dict = {} #print(lowest) len_per_pix = 0.0 depth_per_pix = 0.0", "get_volume(img, json_path): lowest = np.max(img) vol_dict = {} #print(lowest) len_per_pix", "shape['label'] if (label == \"plate\"): continue points = shape['points'] volume", "depth_per_pix = plate_depth/depth return len_per_pix, depth_per_pix def cal_volume(points, img, len_per_pix,", "= np.array(points) shape = points.shape points = points.reshape(shape[0], 1, shape[1])", "as json_file: data = json.load(json_file) for shape in data['shapes']: if", "json import glob from PIL import Image, ImageDraw plate_diameter =", "def cal_volume(points, img, len_per_pix, depth_per_pix, lowest): volume = 0.0 bbox", "plate_thickness = 0.2 #cm def Max(x, y): if (x >=", "depth_per_pix = 0.0 with open(json_path, 'r') as json_file: data =", "avg depth_per_pix = plate_depth/depth return len_per_pix, depth_per_pix def cal_volume(points, img,", "0.0 depth_per_pix = 0.0 with open(json_path, 'r') as json_file: data", "return mask2box(mask) def get_scale(points, img, lowest): bbox = get_bbox(points, img.shape[0],", "for shape in data['shapes']: if (shape['label'] == \"plate\"): len_per_pix, depth_per_pix", "h, w): polygons = points mask = polygons_to_mask([h,w], polygons) return", "= np.max(img) vol_dict = {} #print(lowest) len_per_pix = 0.0 depth_per_pix", "numpy as np import cv2 import os import json import", "= 25 #cm plate_depth = 1.5 #cm plate_thickness = 0.2", "= Image.fromarray(mask) xy = list(map(tuple, polygons)) ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1) mask", "shape = points.shape points = points.reshape(shape[0], 1, shape[1]) for i", "= 0.0 depth_per_pix = 0.0 with open(json_path, 'r') as json_file:", "import os import json import glob from PIL import Image,", "depth_per_pix = get_scale(shape['points'], img, lowest) #print(len_per_pix, depth_per_pix) break for shape", "def get_volume(img, json_path): lowest = np.max(img) vol_dict = {} #print(lowest)", "len_per_pix, depth_per_pix, lowest) if (label in vol_dict): vol_dict[label] += volume", "1] left_top_r = np.min(rows) left_top_c = np.min(clos) right_bottom_r = np.max(rows)", "right_bottom_r = np.max(rows) right_bottom_c = np.max(clos) return [left_top_c, left_top_r, right_bottom_c,", "+= img[point[1]][point[0]] k += 1 avg = avg/float(k) depth =", "lowest): bbox = get_bbox(points, img.shape[0], img.shape[1]) diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2 len_per_pix", "* len_per_pix * len_per_pix return volume def get_volume(img, json_path): lowest", "= list(map(tuple, polygons)) ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1) mask = np.array(mask, dtype=bool)", "= avg/float(k) depth = lowest - avg depth_per_pix = plate_depth/depth", "points.shape points = points.reshape(shape[0], 1, shape[1]) for i in range(bbox[0],", "index[:, 1] left_top_r = np.min(rows) left_top_c = np.min(clos) right_bottom_r =", ">= 0): volume += Max(0, (lowest - img[j][i]) * depth_per_pix", "#cm plate_thickness = 0.2 #cm def Max(x, y): if (x", "import Image, ImageDraw plate_diameter = 25 #cm plate_depth = 1.5", "return len_per_pix, depth_per_pix def cal_volume(points, img, len_per_pix, depth_per_pix, lowest): volume", "'r') as json_file: data = json.load(json_file) for shape in data['shapes']:", "right_bottom_c = np.max(clos) return [left_top_c, left_top_r, right_bottom_c, right_bottom_r] def get_bbox(points,", "plate_diameter/float(diameter) avg = 0 k = 0 for point in", "= get_bbox(points, img.shape[0], img.shape[1]) points = np.array(points) shape = points.shape", "= np.array(mask, dtype=bool) return mask def mask2box(mask): index = np.argwhere(mask", "volume = cal_volume(points, img, len_per_pix, depth_per_pix, lowest) if (label in", "import glob from PIL import Image, ImageDraw plate_diameter = 25", "ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1) mask = np.array(mask, dtype=bool) return mask def", "plate_thickness) * len_per_pix * len_per_pix return volume def get_volume(img, json_path):", "= get_scale(shape['points'], img, lowest) #print(len_per_pix, depth_per_pix) break for shape in", "lowest) #print(len_per_pix, depth_per_pix) break for shape in data['shapes']: label =", "def Max(x, y): if (x >= y): return x else:", "Image.fromarray(mask) xy = list(map(tuple, polygons)) ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1) mask =", "if (label in vol_dict): vol_dict[label] += volume else: vol_dict[label] =", "for point in points: avg += img[point[1]][point[0]] k += 1", "- img[j][i]) * depth_per_pix - plate_thickness) * len_per_pix * len_per_pix", "np.max(clos) return [left_top_c, left_top_r, right_bottom_c, right_bottom_r] def get_bbox(points, h, w):", "len_per_pix, depth_per_pix def cal_volume(points, img, len_per_pix, depth_per_pix, lowest): volume =", "img.shape[1]) diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2 len_per_pix = plate_diameter/float(diameter) avg = 0", "x else: return y def polygons_to_mask(img_shape, polygons): mask = np.zeros(img_shape,", "= np.min(clos) right_bottom_r = np.max(rows) right_bottom_c = np.max(clos) return [left_top_c,", "rows = index[:, 0] clos = index[:, 1] left_top_r =", "in points: avg += img[point[1]][point[0]] k += 1 avg =", "y): if (x >= y): return x else: return y", "= 0 k = 0 for point in points: avg", "points = shape['points'] volume = cal_volume(points, img, len_per_pix, depth_per_pix, lowest)", "index[:, 0] clos = index[:, 1] left_top_r = np.min(rows) left_top_c", "bbox[2]+1): for j in range(bbox[1], bbox[3]+1): if (cv2.pointPolygonTest(points, (i,j), False)", "left_top_r = np.min(rows) left_top_c = np.min(clos) right_bottom_r = np.max(rows) right_bottom_c", "glob from PIL import Image, ImageDraw plate_diameter = 25 #cm", "np.min(rows) left_top_c = np.min(clos) right_bottom_r = np.max(rows) right_bottom_c = np.max(clos)", "get_scale(points, img, lowest): bbox = get_bbox(points, img.shape[0], img.shape[1]) diameter =", "get_bbox(points, img.shape[0], img.shape[1]) points = np.array(points) shape = points.shape points", "volume = 0.0 bbox = get_bbox(points, img.shape[0], img.shape[1]) points =", "cal_volume(points, img, len_per_pix, depth_per_pix, lowest) if (label in vol_dict): vol_dict[label]", "Max(x, y): if (x >= y): return x else: return", "#cm plate_depth = 1.5 #cm plate_thickness = 0.2 #cm def", "img.shape[1]) points = np.array(points) shape = points.shape points = points.reshape(shape[0],", "return [left_top_c, left_top_r, right_bottom_c, right_bottom_r] def get_bbox(points, h, w): polygons", "= get_bbox(points, img.shape[0], img.shape[1]) diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2 len_per_pix = plate_diameter/float(diameter)", "(cv2.pointPolygonTest(points, (i,j), False) >= 0): volume += Max(0, (lowest -", "img[j][i]) * depth_per_pix - plate_thickness) * len_per_pix * len_per_pix return", "list(map(tuple, polygons)) ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1) mask = np.array(mask, dtype=bool) return", "left_top_r, right_bottom_c, right_bottom_r] def get_bbox(points, h, w): polygons = points", "in range(bbox[0], bbox[2]+1): for j in range(bbox[1], bbox[3]+1): if (cv2.pointPolygonTest(points,", "[left_top_c, left_top_r, right_bottom_c, right_bottom_r] def get_bbox(points, h, w): polygons =", "mask = np.array(mask, dtype=bool) return mask def mask2box(mask): index =", "= (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2 len_per_pix = plate_diameter/float(diameter) avg = 0 k =", "mask = np.zeros(img_shape, dtype=np.uint8) mask = Image.fromarray(mask) xy = list(map(tuple,", "data['shapes']: label = shape['label'] if (label == \"plate\"): continue points", "* depth_per_pix - plate_thickness) * len_per_pix * len_per_pix return volume", "points = points.reshape(shape[0], 1, shape[1]) for i in range(bbox[0], bbox[2]+1):", "import numpy as np import cv2 import os import json", "= np.max(rows) right_bottom_c = np.max(clos) return [left_top_c, left_top_r, right_bottom_c, right_bottom_r]", "np.max(rows) right_bottom_c = np.max(clos) return [left_top_c, left_top_r, right_bottom_c, right_bottom_r] def", "= plate_diameter/float(diameter) avg = 0 k = 0 for point", "= np.max(clos) return [left_top_c, left_top_r, right_bottom_c, right_bottom_r] def get_bbox(points, h,", "for i in range(bbox[0], bbox[2]+1): for j in range(bbox[1], bbox[3]+1):", "index = np.argwhere(mask == 1) rows = index[:, 0] clos", "avg = avg/float(k) depth = lowest - avg depth_per_pix =", "json_path): lowest = np.max(img) vol_dict = {} #print(lowest) len_per_pix =", "= {} #print(lowest) len_per_pix = 0.0 depth_per_pix = 0.0 with", "depth_per_pix, lowest): volume = 0.0 bbox = get_bbox(points, img.shape[0], img.shape[1])", "data = json.load(json_file) for shape in data['shapes']: if (shape['label'] ==", "0): volume += Max(0, (lowest - img[j][i]) * depth_per_pix -", "from PIL import Image, ImageDraw plate_diameter = 25 #cm plate_depth", "img.shape[0], img.shape[1]) points = np.array(points) shape = points.shape points =", "\"plate\"): len_per_pix, depth_per_pix = get_scale(shape['points'], img, lowest) #print(len_per_pix, depth_per_pix) break", "lowest): volume = 0.0 bbox = get_bbox(points, img.shape[0], img.shape[1]) points", "0] clos = index[:, 1] left_top_r = np.min(rows) left_top_c =", "- plate_thickness) * len_per_pix * len_per_pix return volume def get_volume(img,", "0 for point in points: avg += img[point[1]][point[0]] k +=", "json_file: data = json.load(json_file) for shape in data['shapes']: if (shape['label']", "points = np.array(points) shape = points.shape points = points.reshape(shape[0], 1,", "= json.load(json_file) for shape in data['shapes']: if (shape['label'] == \"plate\"):", "np.zeros(img_shape, dtype=np.uint8) mask = Image.fromarray(mask) xy = list(map(tuple, polygons)) ImageDraw.Draw(mask).polygon(xy=xy,", "xy = list(map(tuple, polygons)) ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1) mask = np.array(mask,", "avg += img[point[1]][point[0]] k += 1 avg = avg/float(k) depth", "polygons): mask = np.zeros(img_shape, dtype=np.uint8) mask = Image.fromarray(mask) xy =", "len_per_pix * len_per_pix return volume def get_volume(img, json_path): lowest =", "with open(json_path, 'r') as json_file: data = json.load(json_file) for shape", "= 1.5 #cm plate_thickness = 0.2 #cm def Max(x, y):", "= 0.2 #cm def Max(x, y): if (x >= y):", "img, lowest): bbox = get_bbox(points, img.shape[0], img.shape[1]) diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2", "bbox = get_bbox(points, img.shape[0], img.shape[1]) points = np.array(points) shape =", "shape in data['shapes']: if (shape['label'] == \"plate\"): len_per_pix, depth_per_pix =", "in data['shapes']: label = shape['label'] if (label == \"plate\"): continue", "= index[:, 1] left_top_r = np.min(rows) left_top_c = np.min(clos) right_bottom_r", "- avg depth_per_pix = plate_depth/depth return len_per_pix, depth_per_pix def cal_volume(points,", "len_per_pix, depth_per_pix = get_scale(shape['points'], img, lowest) #print(len_per_pix, depth_per_pix) break for", "mask = Image.fromarray(mask) xy = list(map(tuple, polygons)) ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)", "#print(lowest) len_per_pix = 0.0 depth_per_pix = 0.0 with open(json_path, 'r')", "np.array(mask, dtype=bool) return mask def mask2box(mask): index = np.argwhere(mask ==", "open(json_path, 'r') as json_file: data = json.load(json_file) for shape in", "lowest) if (label in vol_dict): vol_dict[label] += volume else: vol_dict[label]", "cal_volume(points, img, len_per_pix, depth_per_pix, lowest): volume = 0.0 bbox =", "polygons_to_mask(img_shape, polygons): mask = np.zeros(img_shape, dtype=np.uint8) mask = Image.fromarray(mask) xy", "dtype=bool) return mask def mask2box(mask): index = np.argwhere(mask == 1)", "outline=1, fill=1) mask = np.array(mask, dtype=bool) return mask def mask2box(mask):", "polygons = points mask = polygons_to_mask([h,w], polygons) return mask2box(mask) def", "mask2box(mask) def get_scale(points, img, lowest): bbox = get_bbox(points, img.shape[0], img.shape[1])", "avg = 0 k = 0 for point in points:", "= points.reshape(shape[0], 1, shape[1]) for i in range(bbox[0], bbox[2]+1): for", "in range(bbox[1], bbox[3]+1): if (cv2.pointPolygonTest(points, (i,j), False) >= 0): volume", "#cm def Max(x, y): if (x >= y): return x", "ImageDraw plate_diameter = 25 #cm plate_depth = 1.5 #cm plate_thickness", "= 0.0 with open(json_path, 'r') as json_file: data = json.load(json_file)", "(shape['label'] == \"plate\"): len_per_pix, depth_per_pix = get_scale(shape['points'], img, lowest) #print(len_per_pix,", "= cal_volume(points, img, len_per_pix, depth_per_pix, lowest) if (label in vol_dict):", "img, len_per_pix, depth_per_pix, lowest) if (label in vol_dict): vol_dict[label] +=", "vol_dict[label] += volume else: vol_dict[label] = volume return vol_dict img", "Image, ImageDraw plate_diameter = 25 #cm plate_depth = 1.5 #cm", "in data['shapes']: if (shape['label'] == \"plate\"): len_per_pix, depth_per_pix = get_scale(shape['points'],", "if (label == \"plate\"): continue points = shape['points'] volume =", "lowest = np.max(img) vol_dict = {} #print(lowest) len_per_pix = 0.0", "in vol_dict): vol_dict[label] += volume else: vol_dict[label] = volume return", "polygons_to_mask([h,w], polygons) return mask2box(mask) def get_scale(points, img, lowest): bbox =", "label = shape['label'] if (label == \"plate\"): continue points =", "clos = index[:, 1] left_top_r = np.min(rows) left_top_c = np.min(clos)", "return mask def mask2box(mask): index = np.argwhere(mask == 1) rows", "0.2 #cm def Max(x, y): if (x >= y): return", "if (shape['label'] == \"plate\"): len_per_pix, depth_per_pix = get_scale(shape['points'], img, lowest)", "img, len_per_pix, depth_per_pix, lowest): volume = 0.0 bbox = get_bbox(points,", "#print(len_per_pix, depth_per_pix) break for shape in data['shapes']: label = shape['label']", "return volume def get_volume(img, json_path): lowest = np.max(img) vol_dict =", "points.reshape(shape[0], 1, shape[1]) for i in range(bbox[0], bbox[2]+1): for j", "avg/float(k) depth = lowest - avg depth_per_pix = plate_depth/depth return", "= np.zeros(img_shape, dtype=np.uint8) mask = Image.fromarray(mask) xy = list(map(tuple, polygons))", "polygons)) ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1) mask = np.array(mask, dtype=bool) return mask", "depth_per_pix - plate_thickness) * len_per_pix * len_per_pix return volume def", "points: avg += img[point[1]][point[0]] k += 1 avg = avg/float(k)", "Max(0, (lowest - img[j][i]) * depth_per_pix - plate_thickness) * len_per_pix", "len_per_pix = 0.0 depth_per_pix = 0.0 with open(json_path, 'r') as", "== \"plate\"): len_per_pix, depth_per_pix = get_scale(shape['points'], img, lowest) #print(len_per_pix, depth_per_pix)", "25 #cm plate_depth = 1.5 #cm plate_thickness = 0.2 #cm", "mask2box(mask): index = np.argwhere(mask == 1) rows = index[:, 0]", "get_bbox(points, h, w): polygons = points mask = polygons_to_mask([h,w], polygons)", "depth_per_pix) break for shape in data['shapes']: label = shape['label'] if", "\"plate\"): continue points = shape['points'] volume = cal_volume(points, img, len_per_pix,", "for j in range(bbox[1], bbox[3]+1): if (cv2.pointPolygonTest(points, (i,j), False) >=", "= shape['points'] volume = cal_volume(points, img, len_per_pix, depth_per_pix, lowest) if", "(lowest - img[j][i]) * depth_per_pix - plate_thickness) * len_per_pix *", "mask def mask2box(mask): index = np.argwhere(mask == 1) rows =", "= points mask = polygons_to_mask([h,w], polygons) return mask2box(mask) def get_scale(points,", "np import cv2 import os import json import glob from", "fill=1) mask = np.array(mask, dtype=bool) return mask def mask2box(mask): index", "else: return y def polygons_to_mask(img_shape, polygons): mask = np.zeros(img_shape, dtype=np.uint8)", "= np.min(rows) left_top_c = np.min(clos) right_bottom_r = np.max(rows) right_bottom_c =", "(x >= y): return x else: return y def polygons_to_mask(img_shape,", "0.0 bbox = get_bbox(points, img.shape[0], img.shape[1]) points = np.array(points) shape", "img, lowest) #print(len_per_pix, depth_per_pix) break for shape in data['shapes']: label", "(label == \"plate\"): continue points = shape['points'] volume = cal_volume(points,", "continue points = shape['points'] volume = cal_volume(points, img, len_per_pix, depth_per_pix,", "lowest - avg depth_per_pix = plate_depth/depth return len_per_pix, depth_per_pix def", "= 0.0 bbox = get_bbox(points, img.shape[0], img.shape[1]) points = np.array(points)", "{} #print(lowest) len_per_pix = 0.0 depth_per_pix = 0.0 with open(json_path,", "vol_dict): vol_dict[label] += volume else: vol_dict[label] = volume return vol_dict", "get_bbox(points, img.shape[0], img.shape[1]) diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2 len_per_pix = plate_diameter/float(diameter) avg", "1) rows = index[:, 0] clos = index[:, 1] left_top_r", "range(bbox[0], bbox[2]+1): for j in range(bbox[1], bbox[3]+1): if (cv2.pointPolygonTest(points, (i,j),", "def mask2box(mask): index = np.argwhere(mask == 1) rows = index[:,", "np.argwhere(mask == 1) rows = index[:, 0] clos = index[:,", "y): return x else: return y def polygons_to_mask(img_shape, polygons): mask", "w): polygons = points mask = polygons_to_mask([h,w], polygons) return mask2box(mask)", "np.max(img) vol_dict = {} #print(lowest) len_per_pix = 0.0 depth_per_pix =", "= shape['label'] if (label == \"plate\"): continue points = shape['points']", "right_bottom_r] def get_bbox(points, h, w): polygons = points mask =", "= np.argwhere(mask == 1) rows = index[:, 0] clos =", "= 0 for point in points: avg += img[point[1]][point[0]] k", "depth_per_pix, lowest) if (label in vol_dict): vol_dict[label] += volume else:", "diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2 len_per_pix = plate_diameter/float(diameter) avg = 0 k", "False) >= 0): volume += Max(0, (lowest - img[j][i]) *", "cv2 import os import json import glob from PIL import", "left_top_c = np.min(clos) right_bottom_r = np.max(rows) right_bottom_c = np.max(clos) return", "y def polygons_to_mask(img_shape, polygons): mask = np.zeros(img_shape, dtype=np.uint8) mask =", "PIL import Image, ImageDraw plate_diameter = 25 #cm plate_depth =", ">= y): return x else: return y def polygons_to_mask(img_shape, polygons):", "return x else: return y def polygons_to_mask(img_shape, polygons): mask =", "polygons) return mask2box(mask) def get_scale(points, img, lowest): bbox = get_bbox(points,", "point in points: avg += img[point[1]][point[0]] k += 1 avg", "1 avg = avg/float(k) depth = lowest - avg depth_per_pix", "points mask = polygons_to_mask([h,w], polygons) return mask2box(mask) def get_scale(points, img,", "i in range(bbox[0], bbox[2]+1): for j in range(bbox[1], bbox[3]+1): if", "= polygons_to_mask([h,w], polygons) return mask2box(mask) def get_scale(points, img, lowest): bbox", "1.5 #cm plate_thickness = 0.2 #cm def Max(x, y): if" ]
[ "('*.[hc]', '*.[hc]xx', '*.[hc]pp', '*.cc', '*.hh'), 'python':('*.py'), 'java':('*.java')} # Ignore these", "based on cmt show uses \"\"\" parser = OptionParser() parser.add_option('--cpp',", "(default)') parser.add_option('--python', dest='python', action='store_true', default=False, help='tag only python files') parser.add_option('--java',", "grouping negation find_args.extend([')', '(']) # Add prune files for match_type", "tokens[1] not in IGNORES: basepath = tokens[-1].strip('()') # highland and", "account relpath_list = [master for master in tokens[3:-1]] relpath_list.extend([tokens[1], tokens[2]])", "return ['find']+paths+['-type', 'f']+find_args def build_tags_cmd(): return ['etags', '-'] def main():", "find_args.append('-not') find_args.append('-'+match_type) find_args.append('{0}'.format(aprune)) find_args.append(')') return find_args def build_find_cmd(opts, paths): \"\"\"", "paths): \"\"\" Builds teh cmd file using ctags. Returns cmd", "\"\"\" Builds teh cmd file using ctags. Returns cmd based", "\"\"\"Returns the initial use_dict which contains the current (cwd) package", "elif opts.java: return EXTENSIONS['java'] else: return EXTENSIONS['cpp'] def build_find_args(exts): \"\"\"", "external packages for now IGNORES = ['CMT', 'EXTERN', 'GSL', 'MYSQL',", "'EXTERN', 'GSL', 'MYSQL', 'GEANT', 'CLHEP'] # Extensions for finding src", "c/cpp files (default)') parser.add_option('--python', dest='python', action='store_true', default=False, help='tag only python", "''): tokens = line.split() # ignore lines that start with", "# Add prune files for match_type in PRUNE: for aprune", "# Ignore large external packages for now IGNORES = ['CMT',", "def init_use_dict(): \"\"\"Returns the initial use_dict which contains the current", "EXTENSIONS['cpp'] def build_find_args(exts): \"\"\" ext is a list of file", "'-iname']) find_args.append('{0}'.format(a_ext)) # replace first '-o' with '( for grouping", "# highland and psyche do not strictly follow CMT path", "tags_cmd = build_tags_cmd() print 'Creating TAGS file based on dependencies:'", "itself. \"\"\" # Must call os.path.dirname because the cwd should", "line in iter(proc.stdout.readline, ''): tokens = line.split() # ignore lines", "Uses ctags to generate TAGS file in cmt directory based", "= parse_uses() # build the commands find_cmd = build_find_cmd(opts, list(use_dict.itervalues()))", "for line in iter(proc.stdout.readline, ''): tokens = line.split() # ignore", "'java':('*.java')} # Ignore these files and dirs, key specifies argument", "parse_uses(): \"\"\" Returns a dict of used packages and their", "{'iname':['*_Dict.[hc]*', '*linkdef.h']} def check_dir(): \"\"\" Are we inside cmt/ \"\"\"", "CMT path # organization. They have subpackages within a master,", "must satisfy unix wildcard rules EXTENSIONS = {'cpp': ('*.[hc]', '*.[hc]xx',", "cmd file using ctags. Returns cmd based on the following", "to take that into account relpath_list = [master for master", "OptionParser() parser.add_option('--cpp', dest='cpp', action='store_true', default=False, help='tag only c/cpp files (default)')", "inside cmt/ \"\"\" if os.path.basename(os.getcwd()) != 'cmt': sys.exit('Not inside cmt", "within cmt/ directory \"\"\" import subprocess import sys import os", "file extensions corresponding to the files we want to search.", "that requirements file exists in cmt dir \"\"\" if not", "-type f {1} | etags -' \"\"\" find_args = build_find_args(get_exts(opts))", "initial use_dict which contains the current (cwd) package and its", "organization. They have subpackages within a master, so # we", "parse_uses() # build the commands find_cmd = build_find_cmd(opts, list(use_dict.itervalues())) tags_cmd", "= {'iname':['*_Dict.[hc]*', '*linkdef.h']} def check_dir(): \"\"\" Are we inside cmt/", "to generate TAGS file in cmt directory based on cmt", "!= 'cmt': sys.exit('Not inside cmt directory!') def check_requirements(): \"\"\" Ensure", "Add prune files for match_type in PRUNE: for aprune in", "for finding src files, must satisfy unix wildcard rules EXTENSIONS", "paths. e.g. {ROOT:/path/to/cmt/installed/ROOT/vXrY} \"\"\" check_dir() check_requirements() proc = subprocess.Popen(['cmt', 'show',", "action='store_true', default=False, help='tag only python files') parser.add_option('--java', dest='java', action='store_true', default=False,", "= tokens[-1].strip('()') # highland and psyche do not strictly follow", "files and dirs, key specifies argument to find # (e.g.", "'GEANT', 'CLHEP'] # Extensions for finding src files, must satisfy", "src files, must satisfy unix wildcard rules EXTENSIONS = {'cpp':", "directory based on cmt show uses \"\"\" parser = OptionParser()", "subprocess.Popen(find_cmd, stdout=subprocess.PIPE) tags_proc = subprocess.Popen(tags_cmd, stdin=find_proc.stdout) tags_proc.communicate() if __name__ ==", "return {'this':os.path.dirname(os.getcwd())} def parse_uses(): \"\"\" Returns a dict of used", "if not os.path.isfile('requirements'): sys.exit('No requirements file!') def init_use_dict(): \"\"\"Returns the", "\"\"\" parser = OptionParser() parser.add_option('--cpp', dest='cpp', action='store_true', default=False, help='tag only", "Builds teh cmd file using ctags. Returns cmd based on", "files') parser.add_option('--java', dest='java', action='store_true', default=False, help='tag only java files') parser.add_option('-n',", "`find` \"\"\" find_args = [] for a_ext in exts: #", "build_find_args(get_exts(opts)) return ['find']+paths+['-type', 'f']+find_args def build_tags_cmd(): return ['etags', '-'] def", "init_use_dict() for line in iter(proc.stdout.readline, ''): tokens = line.split() #", "list of arguments that can be passed to `find` \"\"\"", "'cmt': sys.exit('Not inside cmt directory!') def check_requirements(): \"\"\" Ensure that", "want to search. This will return a list of arguments", "def parse_uses(): \"\"\" Returns a dict of used packages and", "build_tags_cmd(): return ['etags', '-'] def main(): \"\"\" Uses ctags to", "search. This will return a list of arguments that can", "!= '#' and tokens[1] not in IGNORES: basepath = tokens[-1].strip('()')", "and psyche do not strictly follow CMT path # organization.", "= {'cpp': ('*.[hc]', '*.[hc]xx', '*.[hc]pp', '*.cc', '*.hh'), 'python':('*.py'), 'java':('*.java')} #", "def build_find_args(exts): \"\"\" ext is a list of file extensions", "def main(): \"\"\" Uses ctags to generate TAGS file in", "strictly follow CMT path # organization. They have subpackages within", "path. 'cmt show uses' does not include the package itself.", "find_args.append('{0}'.format(aprune)) find_args.append(')') return find_args def build_find_cmd(opts, paths): \"\"\" Builds teh", "Returns cmd based on the following template: 'find {0} -type", "and their root dir paths. e.g. {ROOT:/path/to/cmt/installed/ROOT/vXrY} \"\"\" check_dir() check_requirements()", "so # we need to take that into account relpath_list", "parser.add_option('--python', dest='python', action='store_true', default=False, help='tag only python files') parser.add_option('--java', dest='java',", "build the commands find_cmd = build_find_cmd(opts, list(use_dict.itervalues())) tags_cmd = build_tags_cmd()", "only java files') parser.add_option('-n', dest='dry_run', action='store_true', default=False, help='dry run') (opts,", "# directory return {'this':os.path.dirname(os.getcwd())} def parse_uses(): \"\"\" Returns a dict", "parser.add_option('--cpp', dest='cpp', action='store_true', default=False, help='tag only c/cpp files (default)') parser.add_option('--python',", "does not include the package itself. \"\"\" # Must call", "subpackages within a master, so # we need to take", "OptionParser __author__ = '<NAME>' __email__ = 't<EMAIL>uan [at] colorado.edu' #", "= line.split() # ignore lines that start with '#' if", "= OptionParser() parser.add_option('--cpp', dest='cpp', action='store_true', default=False, help='tag only c/cpp files", "to create tags for CMT managed packages. Call from within", "= build_find_cmd(opts, list(use_dict.itervalues())) tags_cmd = build_tags_cmd() print 'Creating TAGS file", "based on dependencies:' print use_dict if not opts.dry_run: find_proc =", "build_find_cmd(opts, paths): \"\"\" Builds teh cmd file using ctags. Returns", "[master for master in tokens[3:-1]] relpath_list.extend([tokens[1], tokens[2]]) use_dict[tokens[1]] = os.path.join(basepath,", "'#' if line[0] != '#' and tokens[1] not in IGNORES:", "A script to create tags for CMT managed packages. Call", "root dir paths. e.g. {ROOT:/path/to/cmt/installed/ROOT/vXrY} \"\"\" check_dir() check_requirements() proc =", "tokens[-1].strip('()') # highland and psyche do not strictly follow CMT", "a cmt # directory return {'this':os.path.dirname(os.getcwd())} def parse_uses(): \"\"\" Returns", "line[0] != '#' and tokens[1] not in IGNORES: basepath =", "find_cmd = build_find_cmd(opts, list(use_dict.itervalues())) tags_cmd = build_tags_cmd() print 'Creating TAGS", "| etags -' \"\"\" find_args = build_find_args(get_exts(opts)) return ['find']+paths+['-type', 'f']+find_args", "'Creating TAGS file based on dependencies:' print use_dict if not", "the cwd should be inside a cmt # directory return", "requirements file exists in cmt dir \"\"\" if not os.path.isfile('requirements'):", "requirements file!') def init_use_dict(): \"\"\"Returns the initial use_dict which contains", "file based on dependencies:' print use_dict if not opts.dry_run: find_proc", "find_args def build_find_cmd(opts, paths): \"\"\" Builds teh cmd file using", "not opts.dry_run: find_proc = subprocess.Popen(find_cmd, stdout=subprocess.PIPE) tags_proc = subprocess.Popen(tags_cmd, stdin=find_proc.stdout)", "for CMT managed packages. Call from within cmt/ directory \"\"\"", "negation find_args.extend([')', '(']) # Add prune files for match_type in", "files for match_type in PRUNE: for aprune in PRUNE[match_type]: find_args.append('-not')", "# we need to take that into account relpath_list =", "managed packages. Call from within cmt/ directory \"\"\" import subprocess", "etags -' \"\"\" find_args = build_find_args(get_exts(opts)) return ['find']+paths+['-type', 'f']+find_args def", "should be inside a cmt # directory return {'this':os.path.dirname(os.getcwd())} def", "parser.add_option('--java', dest='java', action='store_true', default=False, help='tag only java files') parser.add_option('-n', dest='dry_run',", "default=False, help='tag only python files') parser.add_option('--java', dest='java', action='store_true', default=False, help='tag", "run') (opts, args) = parser.parse_args() # get the cmt show", "'*.[hc]xx', '*.[hc]pp', '*.cc', '*.hh'), 'python':('*.py'), 'java':('*.java')} # Ignore these files", "path # organization. They have subpackages within a master, so", "'(']) # Add prune files for match_type in PRUNE: for", "IGNORES = ['CMT', 'EXTERN', 'GSL', 'MYSQL', 'GEANT', 'CLHEP'] # Extensions", "return find_args def build_find_cmd(opts, paths): \"\"\" Builds teh cmd file", "-' \"\"\" find_args = build_find_args(get_exts(opts)) return ['find']+paths+['-type', 'f']+find_args def build_tags_cmd():", "in PRUNE: for aprune in PRUNE[match_type]: find_args.append('-not') find_args.append('-'+match_type) find_args.append('{0}'.format(aprune)) find_args.append(')')", "build_tags_cmd() print 'Creating TAGS file based on dependencies:' print use_dict", "help='tag only python files') parser.add_option('--java', dest='java', action='store_true', default=False, help='tag only", "we inside cmt/ \"\"\" if os.path.basename(os.getcwd()) != 'cmt': sys.exit('Not inside", "'CLHEP'] # Extensions for finding src files, must satisfy unix", "dir \"\"\" if not os.path.isfile('requirements'): sys.exit('No requirements file!') def init_use_dict():", "cmt show uses dictionary of programs and paths use_dict =", "print 'Creating TAGS file based on dependencies:' print use_dict if", "contains the current (cwd) package and its path. 'cmt show", "ctags to generate TAGS file in cmt directory based on", "['etags', '-'] def main(): \"\"\" Uses ctags to generate TAGS", "'( for grouping matches find_args[0] = '(' # append parens", "with '( for grouping matches find_args[0] = '(' # append", "(e.g. '-iname') PRUNE = {'iname':['*_Dict.[hc]*', '*linkdef.h']} def check_dir(): \"\"\" Are", "within a master, so # we need to take that", "for grouping negation find_args.extend([')', '(']) # Add prune files for", "master in tokens[3:-1]] relpath_list.extend([tokens[1], tokens[2]]) use_dict[tokens[1]] = os.path.join(basepath, *relpath_list) return", "build_find_cmd(opts, list(use_dict.itervalues())) tags_cmd = build_tags_cmd() print 'Creating TAGS file based", "the package itself. \"\"\" # Must call os.path.dirname because the", "of used packages and their root dir paths. e.g. {ROOT:/path/to/cmt/installed/ROOT/vXrY}", "template: 'find {0} -type f {1} | etags -' \"\"\"", "Ignore these files and dirs, key specifies argument to find", "ctags. Returns cmd based on the following template: 'find {0}", "TAGS file based on dependencies:' print use_dict if not opts.dry_run:", "cmt/ \"\"\" if os.path.basename(os.getcwd()) != 'cmt': sys.exit('Not inside cmt directory!')", "print use_dict if not opts.dry_run: find_proc = subprocess.Popen(find_cmd, stdout=subprocess.PIPE) tags_proc", "# replace first '-o' with '( for grouping matches find_args[0]", "cmt show uses \"\"\" parser = OptionParser() parser.add_option('--cpp', dest='cpp', action='store_true',", "match_type in PRUNE: for aprune in PRUNE[match_type]: find_args.append('-not') find_args.append('-'+match_type) find_args.append('{0}'.format(aprune))", "help='dry run') (opts, args) = parser.parse_args() # get the cmt", "default=False, help='tag only c/cpp files (default)') parser.add_option('--python', dest='python', action='store_true', default=False,", "action='store_true', default=False, help='tag only c/cpp files (default)') parser.add_option('--python', dest='python', action='store_true',", "tags for CMT managed packages. Call from within cmt/ directory", "iter(proc.stdout.readline, ''): tokens = line.split() # ignore lines that start", "exists in cmt dir \"\"\" if not os.path.isfile('requirements'): sys.exit('No requirements", "sys import os from optparse import OptionParser __author__ = '<NAME>'", "can be passed to `find` \"\"\" find_args = [] for", "They have subpackages within a master, so # we need", "in IGNORES: basepath = tokens[-1].strip('()') # highland and psyche do", "['CMT', 'EXTERN', 'GSL', 'MYSQL', 'GEANT', 'CLHEP'] # Extensions for finding", "python files') parser.add_option('--java', dest='java', action='store_true', default=False, help='tag only java files')", "'-iname') PRUNE = {'iname':['*_Dict.[hc]*', '*linkdef.h']} def check_dir(): \"\"\" Are we", "-o for \"or\" find_args.extend(['-o', '-iname']) find_args.append('{0}'.format(a_ext)) # replace first '-o'", "list(use_dict.itervalues())) tags_cmd = build_tags_cmd() print 'Creating TAGS file based on", "'*.hh'), 'python':('*.py'), 'java':('*.java')} # Ignore these files and dirs, key", "first '-o' with '( for grouping matches find_args[0] = '('", "if line[0] != '#' and tokens[1] not in IGNORES: basepath", "dependencies:' print use_dict if not opts.dry_run: find_proc = subprocess.Popen(find_cmd, stdout=subprocess.PIPE)", "key specifies argument to find # (e.g. '-iname') PRUNE =", "with '#' if line[0] != '#' and tokens[1] not in", "files (default)') parser.add_option('--python', dest='python', action='store_true', default=False, help='tag only python files')", "colorado.edu' # Ignore large external packages for now IGNORES =", "main(): \"\"\" Uses ctags to generate TAGS file in cmt", "IGNORES: basepath = tokens[-1].strip('()') # highland and psyche do not", "opts.python: return EXTENSIONS['python'] elif opts.java: return EXTENSIONS['java'] else: return EXTENSIONS['cpp']", "'(' # append parens for grouping negation find_args.extend([')', '(']) #", "opts.dry_run: find_proc = subprocess.Popen(find_cmd, stdout=subprocess.PIPE) tags_proc = subprocess.Popen(tags_cmd, stdin=find_proc.stdout) tags_proc.communicate()", "{'cpp': ('*.[hc]', '*.[hc]xx', '*.[hc]pp', '*.cc', '*.hh'), 'python':('*.py'), 'java':('*.java')} # Ignore", "check_dir(): \"\"\" Are we inside cmt/ \"\"\" if os.path.basename(os.getcwd()) !=", "replace first '-o' with '( for grouping matches find_args[0] =", "be passed to `find` \"\"\" find_args = [] for a_ext", "'MYSQL', 'GEANT', 'CLHEP'] # Extensions for finding src files, must", "ignore lines that start with '#' if line[0] != '#'", "'find {0} -type f {1} | etags -' \"\"\" find_args", "parser = OptionParser() parser.add_option('--cpp', dest='cpp', action='store_true', default=False, help='tag only c/cpp", "os from optparse import OptionParser __author__ = '<NAME>' __email__ =", "commands find_cmd = build_find_cmd(opts, list(use_dict.itervalues())) tags_cmd = build_tags_cmd() print 'Creating", "package and its path. 'cmt show uses' does not include", "to find # (e.g. '-iname') PRUNE = {'iname':['*_Dict.[hc]*', '*linkdef.h']} def", "# (e.g. '-iname') PRUNE = {'iname':['*_Dict.[hc]*', '*linkdef.h']} def check_dir(): \"\"\"", "Ignore large external packages for now IGNORES = ['CMT', 'EXTERN',", "= '<NAME>' __email__ = 't<EMAIL>uan [at] colorado.edu' # Ignore large", "(cwd) package and its path. 'cmt show uses' does not", "based on the following template: 'find {0} -type f {1}", "if os.path.basename(os.getcwd()) != 'cmt': sys.exit('Not inside cmt directory!') def check_requirements():", "import sys import os from optparse import OptionParser __author__ =", "find_args.extend([')', '(']) # Add prune files for match_type in PRUNE:", "a dict of used packages and their root dir paths.", "that into account relpath_list = [master for master in tokens[3:-1]]", "\"\"\" if os.path.basename(os.getcwd()) != 'cmt': sys.exit('Not inside cmt directory!') def", "their root dir paths. e.g. {ROOT:/path/to/cmt/installed/ROOT/vXrY} \"\"\" check_dir() check_requirements() proc", "uses \"\"\" parser = OptionParser() parser.add_option('--cpp', dest='cpp', action='store_true', default=False, help='tag", "\"\"\" # Must call os.path.dirname because the cwd should be", "'#' and tokens[1] not in IGNORES: basepath = tokens[-1].strip('()') #", "now IGNORES = ['CMT', 'EXTERN', 'GSL', 'MYSQL', 'GEANT', 'CLHEP'] #", "'*.cc', '*.hh'), 'python':('*.py'), 'java':('*.java')} # Ignore these files and dirs,", "This will return a list of arguments that can be", "in exts: # -o for \"or\" find_args.extend(['-o', '-iname']) find_args.append('{0}'.format(a_ext)) #", "return a list of arguments that can be passed to", "using ctags. Returns cmd based on the following template: 'find", "return ['etags', '-'] def main(): \"\"\" Uses ctags to generate", "\"\"\" Are we inside cmt/ \"\"\" if os.path.basename(os.getcwd()) != 'cmt':", "subprocess import sys import os from optparse import OptionParser __author__", "EXTENSIONS = {'cpp': ('*.[hc]', '*.[hc]xx', '*.[hc]pp', '*.cc', '*.hh'), 'python':('*.py'), 'java':('*.java')}", "Are we inside cmt/ \"\"\" if os.path.basename(os.getcwd()) != 'cmt': sys.exit('Not", "a_ext in exts: # -o for \"or\" find_args.extend(['-o', '-iname']) find_args.append('{0}'.format(a_ext))", "= '(' # append parens for grouping negation find_args.extend([')', '('])", "\"\"\" find_args = build_find_args(get_exts(opts)) return ['find']+paths+['-type', 'f']+find_args def build_tags_cmd(): return", "'GSL', 'MYSQL', 'GEANT', 'CLHEP'] # Extensions for finding src files,", "\"\"\" A script to create tags for CMT managed packages.", "paths use_dict = parse_uses() # build the commands find_cmd =", "[] for a_ext in exts: # -o for \"or\" find_args.extend(['-o',", "start with '#' if line[0] != '#' and tokens[1] not", "dirs, key specifies argument to find # (e.g. '-iname') PRUNE", "# Ignore these files and dirs, key specifies argument to", "= os.path.join(basepath, *relpath_list) return use_dict def get_exts(opts): if opts.python: return", "def build_find_cmd(opts, paths): \"\"\" Builds teh cmd file using ctags.", "which contains the current (cwd) package and its path. 'cmt", "highland and psyche do not strictly follow CMT path #", "= build_tags_cmd() print 'Creating TAGS file based on dependencies:' print", "prune files for match_type in PRUNE: for aprune in PRUNE[match_type]:", "= parser.parse_args() # get the cmt show uses dictionary of", "proc = subprocess.Popen(['cmt', 'show', 'uses'], stdout=subprocess.PIPE) use_dict = init_use_dict() for", "and its path. 'cmt show uses' does not include the", "will return a list of arguments that can be passed", "to `find` \"\"\" find_args = [] for a_ext in exts:", "file in cmt directory based on cmt show uses \"\"\"", "inside a cmt # directory return {'this':os.path.dirname(os.getcwd())} def parse_uses(): \"\"\"", "packages. Call from within cmt/ directory \"\"\" import subprocess import", "package itself. \"\"\" # Must call os.path.dirname because the cwd", "default=False, help='dry run') (opts, args) = parser.parse_args() # get the", "get the cmt show uses dictionary of programs and paths", "def get_exts(opts): if opts.python: return EXTENSIONS['python'] elif opts.java: return EXTENSIONS['java']", "is a list of file extensions corresponding to the files", "not include the package itself. \"\"\" # Must call os.path.dirname", "cmd based on the following template: 'find {0} -type f", "corresponding to the files we want to search. This will", "for a_ext in exts: # -o for \"or\" find_args.extend(['-o', '-iname'])", "stdout=subprocess.PIPE) use_dict = init_use_dict() for line in iter(proc.stdout.readline, ''): tokens", "PRUNE[match_type]: find_args.append('-not') find_args.append('-'+match_type) find_args.append('{0}'.format(aprune)) find_args.append(')') return find_args def build_find_cmd(opts, paths):", "relpath_list = [master for master in tokens[3:-1]] relpath_list.extend([tokens[1], tokens[2]]) use_dict[tokens[1]]", "check_dir() check_requirements() proc = subprocess.Popen(['cmt', 'show', 'uses'], stdout=subprocess.PIPE) use_dict =", "= subprocess.Popen(['cmt', 'show', 'uses'], stdout=subprocess.PIPE) use_dict = init_use_dict() for line", "only c/cpp files (default)') parser.add_option('--python', dest='python', action='store_true', default=False, help='tag only", "os.path.dirname because the cwd should be inside a cmt #", "cmt # directory return {'this':os.path.dirname(os.getcwd())} def parse_uses(): \"\"\" Returns a", "help='tag only java files') parser.add_option('-n', dest='dry_run', action='store_true', default=False, help='dry run')", "(opts, args) = parser.parse_args() # get the cmt show uses", "need to take that into account relpath_list = [master for", "dest='python', action='store_true', default=False, help='tag only python files') parser.add_option('--java', dest='java', action='store_true',", "['find']+paths+['-type', 'f']+find_args def build_tags_cmd(): return ['etags', '-'] def main(): \"\"\"", "exts: # -o for \"or\" find_args.extend(['-o', '-iname']) find_args.append('{0}'.format(a_ext)) # replace", "for match_type in PRUNE: for aprune in PRUNE[match_type]: find_args.append('-not') find_args.append('-'+match_type)", "cmt directory!') def check_requirements(): \"\"\" Ensure that requirements file exists", "dest='java', action='store_true', default=False, help='tag only java files') parser.add_option('-n', dest='dry_run', action='store_true',", "file exists in cmt dir \"\"\" if not os.path.isfile('requirements'): sys.exit('No", "# Extensions for finding src files, must satisfy unix wildcard", "'uses'], stdout=subprocess.PIPE) use_dict = init_use_dict() for line in iter(proc.stdout.readline, ''):", "args) = parser.parse_args() # get the cmt show uses dictionary", "show uses' does not include the package itself. \"\"\" #", "use_dict[tokens[1]] = os.path.join(basepath, *relpath_list) return use_dict def get_exts(opts): if opts.python:", "of file extensions corresponding to the files we want to", "use_dict = parse_uses() # build the commands find_cmd = build_find_cmd(opts,", "files we want to search. This will return a list", "os.path.basename(os.getcwd()) != 'cmt': sys.exit('Not inside cmt directory!') def check_requirements(): \"\"\"", "to search. This will return a list of arguments that", "# build the commands find_cmd = build_find_cmd(opts, list(use_dict.itervalues())) tags_cmd =", "for grouping matches find_args[0] = '(' # append parens for", "do not strictly follow CMT path # organization. They have", "\"\"\" Uses ctags to generate TAGS file in cmt directory", "= subprocess.Popen(find_cmd, stdout=subprocess.PIPE) tags_proc = subprocess.Popen(tags_cmd, stdin=find_proc.stdout) tags_proc.communicate() if __name__", "include the package itself. \"\"\" # Must call os.path.dirname because", "used packages and their root dir paths. e.g. {ROOT:/path/to/cmt/installed/ROOT/vXrY} \"\"\"", "\"or\" find_args.extend(['-o', '-iname']) find_args.append('{0}'.format(a_ext)) # replace first '-o' with '(", "current (cwd) package and its path. 'cmt show uses' does", "EXTENSIONS['python'] elif opts.java: return EXTENSIONS['java'] else: return EXTENSIONS['cpp'] def build_find_args(exts):", "matches find_args[0] = '(' # append parens for grouping negation", "take that into account relpath_list = [master for master in", "files') parser.add_option('-n', dest='dry_run', action='store_true', default=False, help='dry run') (opts, args) =", "append parens for grouping negation find_args.extend([')', '(']) # Add prune", "specifies argument to find # (e.g. '-iname') PRUNE = {'iname':['*_Dict.[hc]*',", "get_exts(opts): if opts.python: return EXTENSIONS['python'] elif opts.java: return EXTENSIONS['java'] else:", "action='store_true', default=False, help='tag only java files') parser.add_option('-n', dest='dry_run', action='store_true', default=False,", "# ignore lines that start with '#' if line[0] !=", "only python files') parser.add_option('--java', dest='java', action='store_true', default=False, help='tag only java", "\"\"\" import subprocess import sys import os from optparse import", "def build_tags_cmd(): return ['etags', '-'] def main(): \"\"\" Uses ctags", "# organization. They have subpackages within a master, so #", "else: return EXTENSIONS['cpp'] def build_find_args(exts): \"\"\" ext is a list", "# append parens for grouping negation find_args.extend([')', '(']) # Add", "default=False, help='tag only java files') parser.add_option('-n', dest='dry_run', action='store_true', default=False, help='dry", "ext is a list of file extensions corresponding to the", "'t<EMAIL>uan [at] colorado.edu' # Ignore large external packages for now", "a list of arguments that can be passed to `find`", "to the files we want to search. This will return", "python \"\"\" A script to create tags for CMT managed", "{'this':os.path.dirname(os.getcwd())} def parse_uses(): \"\"\" Returns a dict of used packages", "find_args.append('-'+match_type) find_args.append('{0}'.format(aprune)) find_args.append(')') return find_args def build_find_cmd(opts, paths): \"\"\" Builds", "dir paths. e.g. {ROOT:/path/to/cmt/installed/ROOT/vXrY} \"\"\" check_dir() check_requirements() proc = subprocess.Popen(['cmt',", "'python':('*.py'), 'java':('*.java')} # Ignore these files and dirs, key specifies", "following template: 'find {0} -type f {1} | etags -'", "find_args.append('{0}'.format(a_ext)) # replace first '-o' with '( for grouping matches", "line.split() # ignore lines that start with '#' if line[0]", "tokens = line.split() # ignore lines that start with '#'", "dict of used packages and their root dir paths. e.g.", "'-o' with '( for grouping matches find_args[0] = '(' #", "these files and dirs, key specifies argument to find #", "__author__ = '<NAME>' __email__ = 't<EMAIL>uan [at] colorado.edu' # Ignore", "TAGS file in cmt directory based on cmt show uses", "find # (e.g. '-iname') PRUNE = {'iname':['*_Dict.[hc]*', '*linkdef.h']} def check_dir():", "large external packages for now IGNORES = ['CMT', 'EXTERN', 'GSL',", "list of file extensions corresponding to the files we want", "'f']+find_args def build_tags_cmd(): return ['etags', '-'] def main(): \"\"\" Uses", "wildcard rules EXTENSIONS = {'cpp': ('*.[hc]', '*.[hc]xx', '*.[hc]pp', '*.cc', '*.hh'),", "e.g. {ROOT:/path/to/cmt/installed/ROOT/vXrY} \"\"\" check_dir() check_requirements() proc = subprocess.Popen(['cmt', 'show', 'uses'],", "[at] colorado.edu' # Ignore large external packages for now IGNORES", "Call from within cmt/ directory \"\"\" import subprocess import sys", "call os.path.dirname because the cwd should be inside a cmt", "uses dictionary of programs and paths use_dict = parse_uses() #", "#!/usr/bin/env python \"\"\" A script to create tags for CMT", "in tokens[3:-1]] relpath_list.extend([tokens[1], tokens[2]]) use_dict[tokens[1]] = os.path.join(basepath, *relpath_list) return use_dict", "uses' does not include the package itself. \"\"\" # Must", "on cmt show uses \"\"\" parser = OptionParser() parser.add_option('--cpp', dest='cpp',", "the files we want to search. This will return a", "sys.exit('No requirements file!') def init_use_dict(): \"\"\"Returns the initial use_dict which", "in PRUNE[match_type]: find_args.append('-not') find_args.append('-'+match_type) find_args.append('{0}'.format(aprune)) find_args.append(')') return find_args def build_find_cmd(opts,", "= ['CMT', 'EXTERN', 'GSL', 'MYSQL', 'GEANT', 'CLHEP'] # Extensions for", "directory!') def check_requirements(): \"\"\" Ensure that requirements file exists in", "PRUNE = {'iname':['*_Dict.[hc]*', '*linkdef.h']} def check_dir(): \"\"\" Are we inside", "Must call os.path.dirname because the cwd should be inside a", "cmt directory based on cmt show uses \"\"\" parser =", "\"\"\" ext is a list of file extensions corresponding to", "java files') parser.add_option('-n', dest='dry_run', action='store_true', default=False, help='dry run') (opts, args)", "files, must satisfy unix wildcard rules EXTENSIONS = {'cpp': ('*.[hc]',", "find_proc = subprocess.Popen(find_cmd, stdout=subprocess.PIPE) tags_proc = subprocess.Popen(tags_cmd, stdin=find_proc.stdout) tags_proc.communicate() if", "cwd should be inside a cmt # directory return {'this':os.path.dirname(os.getcwd())}", "subprocess.Popen(['cmt', 'show', 'uses'], stdout=subprocess.PIPE) use_dict = init_use_dict() for line in", "return EXTENSIONS['cpp'] def build_find_args(exts): \"\"\" ext is a list of", "return use_dict def get_exts(opts): if opts.python: return EXTENSIONS['python'] elif opts.java:", "satisfy unix wildcard rules EXTENSIONS = {'cpp': ('*.[hc]', '*.[hc]xx', '*.[hc]pp',", "packages and their root dir paths. e.g. {ROOT:/path/to/cmt/installed/ROOT/vXrY} \"\"\" check_dir()", "show uses dictionary of programs and paths use_dict = parse_uses()", "if not opts.dry_run: find_proc = subprocess.Popen(find_cmd, stdout=subprocess.PIPE) tags_proc = subprocess.Popen(tags_cmd,", "not strictly follow CMT path # organization. They have subpackages", "'<NAME>' __email__ = 't<EMAIL>uan [at] colorado.edu' # Ignore large external", "find_args.extend(['-o', '-iname']) find_args.append('{0}'.format(a_ext)) # replace first '-o' with '( for", "argument to find # (e.g. '-iname') PRUNE = {'iname':['*_Dict.[hc]*', '*linkdef.h']}", "and paths use_dict = parse_uses() # build the commands find_cmd", "# get the cmt show uses dictionary of programs and", "programs and paths use_dict = parse_uses() # build the commands", "f {1} | etags -' \"\"\" find_args = build_find_args(get_exts(opts)) return", "from optparse import OptionParser __author__ = '<NAME>' __email__ = 't<EMAIL>uan", "use_dict if not opts.dry_run: find_proc = subprocess.Popen(find_cmd, stdout=subprocess.PIPE) tags_proc =", "follow CMT path # organization. They have subpackages within a", "*relpath_list) return use_dict def get_exts(opts): if opts.python: return EXTENSIONS['python'] elif", "for aprune in PRUNE[match_type]: find_args.append('-not') find_args.append('-'+match_type) find_args.append('{0}'.format(aprune)) find_args.append(')') return find_args", "for \"or\" find_args.extend(['-o', '-iname']) find_args.append('{0}'.format(a_ext)) # replace first '-o' with", "= 't<EMAIL>uan [at] colorado.edu' # Ignore large external packages for", "'show', 'uses'], stdout=subprocess.PIPE) use_dict = init_use_dict() for line in iter(proc.stdout.readline,", "lines that start with '#' if line[0] != '#' and", "CMT managed packages. Call from within cmt/ directory \"\"\" import", "aprune in PRUNE[match_type]: find_args.append('-not') find_args.append('-'+match_type) find_args.append('{0}'.format(aprune)) find_args.append(')') return find_args def", "= build_find_args(get_exts(opts)) return ['find']+paths+['-type', 'f']+find_args def build_tags_cmd(): return ['etags', '-']", "the following template: 'find {0} -type f {1} | etags", "'-'] def main(): \"\"\" Uses ctags to generate TAGS file", "we want to search. This will return a list of", "directory \"\"\" import subprocess import sys import os from optparse", "use_dict which contains the current (cwd) package and its path.", "find_args = build_find_args(get_exts(opts)) return ['find']+paths+['-type', 'f']+find_args def build_tags_cmd(): return ['etags',", "tokens[2]]) use_dict[tokens[1]] = os.path.join(basepath, *relpath_list) return use_dict def get_exts(opts): if", "cmt/ directory \"\"\" import subprocess import sys import os from", "master, so # we need to take that into account", "os.path.join(basepath, *relpath_list) return use_dict def get_exts(opts): if opts.python: return EXTENSIONS['python']", "unix wildcard rules EXTENSIONS = {'cpp': ('*.[hc]', '*.[hc]xx', '*.[hc]pp', '*.cc',", "Ensure that requirements file exists in cmt dir \"\"\" if", "be inside a cmt # directory return {'this':os.path.dirname(os.getcwd())} def parse_uses():", "on dependencies:' print use_dict if not opts.dry_run: find_proc = subprocess.Popen(find_cmd,", "PRUNE: for aprune in PRUNE[match_type]: find_args.append('-not') find_args.append('-'+match_type) find_args.append('{0}'.format(aprune)) find_args.append(')') return", "sys.exit('Not inside cmt directory!') def check_requirements(): \"\"\" Ensure that requirements", "rules EXTENSIONS = {'cpp': ('*.[hc]', '*.[hc]xx', '*.[hc]pp', '*.cc', '*.hh'), 'python':('*.py'),", "dest='dry_run', action='store_true', default=False, help='dry run') (opts, args) = parser.parse_args() #", "parser.add_option('-n', dest='dry_run', action='store_true', default=False, help='dry run') (opts, args) = parser.parse_args()", "generate TAGS file in cmt directory based on cmt show", "parser.parse_args() # get the cmt show uses dictionary of programs", "arguments that can be passed to `find` \"\"\" find_args =", "extensions corresponding to the files we want to search. This", "opts.java: return EXTENSIONS['java'] else: return EXTENSIONS['cpp'] def build_find_args(exts): \"\"\" ext", "= [] for a_ext in exts: # -o for \"or\"", "import os from optparse import OptionParser __author__ = '<NAME>' __email__", "import OptionParser __author__ = '<NAME>' __email__ = 't<EMAIL>uan [at] colorado.edu'", "'cmt show uses' does not include the package itself. \"\"\"", "\"\"\" Ensure that requirements file exists in cmt dir \"\"\"", "find_args.append(')') return find_args def build_find_cmd(opts, paths): \"\"\" Builds teh cmd", "stdout=subprocess.PIPE) tags_proc = subprocess.Popen(tags_cmd, stdin=find_proc.stdout) tags_proc.communicate() if __name__ == '__main__':", "__email__ = 't<EMAIL>uan [at] colorado.edu' # Ignore large external packages", "from within cmt/ directory \"\"\" import subprocess import sys import", "optparse import OptionParser __author__ = '<NAME>' __email__ = 't<EMAIL>uan [at]", "help='tag only c/cpp files (default)') parser.add_option('--python', dest='python', action='store_true', default=False, help='tag", "def check_dir(): \"\"\" Are we inside cmt/ \"\"\" if os.path.basename(os.getcwd())", "use_dict = init_use_dict() for line in iter(proc.stdout.readline, ''): tokens =", "file using ctags. Returns cmd based on the following template:", "not in IGNORES: basepath = tokens[-1].strip('()') # highland and psyche", "that start with '#' if line[0] != '#' and tokens[1]", "passed to `find` \"\"\" find_args = [] for a_ext in", "relpath_list.extend([tokens[1], tokens[2]]) use_dict[tokens[1]] = os.path.join(basepath, *relpath_list) return use_dict def get_exts(opts):", "build_find_args(exts): \"\"\" ext is a list of file extensions corresponding", "# Must call os.path.dirname because the cwd should be inside", "file!') def init_use_dict(): \"\"\"Returns the initial use_dict which contains the", "parens for grouping negation find_args.extend([')', '(']) # Add prune files", "init_use_dict(): \"\"\"Returns the initial use_dict which contains the current (cwd)", "Returns a dict of used packages and their root dir", "of programs and paths use_dict = parse_uses() # build the", "have subpackages within a master, so # we need to", "inside cmt directory!') def check_requirements(): \"\"\" Ensure that requirements file", "\"\"\" find_args = [] for a_ext in exts: # -o", "because the cwd should be inside a cmt # directory", "import subprocess import sys import os from optparse import OptionParser", "find_args = [] for a_ext in exts: # -o for", "= [master for master in tokens[3:-1]] relpath_list.extend([tokens[1], tokens[2]]) use_dict[tokens[1]] =", "a master, so # we need to take that into", "the cmt show uses dictionary of programs and paths use_dict", "psyche do not strictly follow CMT path # organization. They", "not os.path.isfile('requirements'): sys.exit('No requirements file!') def init_use_dict(): \"\"\"Returns the initial", "'*.[hc]pp', '*.cc', '*.hh'), 'python':('*.py'), 'java':('*.java')} # Ignore these files and", "script to create tags for CMT managed packages. Call from", "return EXTENSIONS['python'] elif opts.java: return EXTENSIONS['java'] else: return EXTENSIONS['cpp'] def", "return EXTENSIONS['java'] else: return EXTENSIONS['cpp'] def build_find_args(exts): \"\"\" ext is", "dictionary of programs and paths use_dict = parse_uses() # build", "\"\"\" if not os.path.isfile('requirements'): sys.exit('No requirements file!') def init_use_dict(): \"\"\"Returns", "packages for now IGNORES = ['CMT', 'EXTERN', 'GSL', 'MYSQL', 'GEANT',", "directory return {'this':os.path.dirname(os.getcwd())} def parse_uses(): \"\"\" Returns a dict of", "tags_proc = subprocess.Popen(tags_cmd, stdin=find_proc.stdout) tags_proc.communicate() if __name__ == '__main__': main()", "find_args[0] = '(' # append parens for grouping negation find_args.extend([')',", "in cmt dir \"\"\" if not os.path.isfile('requirements'): sys.exit('No requirements file!')", "that can be passed to `find` \"\"\" find_args = []", "= init_use_dict() for line in iter(proc.stdout.readline, ''): tokens = line.split()", "action='store_true', default=False, help='dry run') (opts, args) = parser.parse_args() # get", "use_dict def get_exts(opts): if opts.python: return EXTENSIONS['python'] elif opts.java: return", "# -o for \"or\" find_args.extend(['-o', '-iname']) find_args.append('{0}'.format(a_ext)) # replace first", "{1} | etags -' \"\"\" find_args = build_find_args(get_exts(opts)) return ['find']+paths+['-type',", "cmt dir \"\"\" if not os.path.isfile('requirements'): sys.exit('No requirements file!') def", "we need to take that into account relpath_list = [master", "into account relpath_list = [master for master in tokens[3:-1]] relpath_list.extend([tokens[1],", "of arguments that can be passed to `find` \"\"\" find_args", "the current (cwd) package and its path. 'cmt show uses'", "its path. 'cmt show uses' does not include the package", "{ROOT:/path/to/cmt/installed/ROOT/vXrY} \"\"\" check_dir() check_requirements() proc = subprocess.Popen(['cmt', 'show', 'uses'], stdout=subprocess.PIPE)", "for now IGNORES = ['CMT', 'EXTERN', 'GSL', 'MYSQL', 'GEANT', 'CLHEP']", "and dirs, key specifies argument to find # (e.g. '-iname')", "check_requirements() proc = subprocess.Popen(['cmt', 'show', 'uses'], stdout=subprocess.PIPE) use_dict = init_use_dict()", "finding src files, must satisfy unix wildcard rules EXTENSIONS =", "on the following template: 'find {0} -type f {1} |", "EXTENSIONS['java'] else: return EXTENSIONS['cpp'] def build_find_args(exts): \"\"\" ext is a", "{0} -type f {1} | etags -' \"\"\" find_args =", "create tags for CMT managed packages. Call from within cmt/", "def check_requirements(): \"\"\" Ensure that requirements file exists in cmt", "tokens[3:-1]] relpath_list.extend([tokens[1], tokens[2]]) use_dict[tokens[1]] = os.path.join(basepath, *relpath_list) return use_dict def", "grouping matches find_args[0] = '(' # append parens for grouping", "'*linkdef.h']} def check_dir(): \"\"\" Are we inside cmt/ \"\"\" if", "show uses \"\"\" parser = OptionParser() parser.add_option('--cpp', dest='cpp', action='store_true', default=False,", "check_requirements(): \"\"\" Ensure that requirements file exists in cmt dir", "Extensions for finding src files, must satisfy unix wildcard rules", "in iter(proc.stdout.readline, ''): tokens = line.split() # ignore lines that", "if opts.python: return EXTENSIONS['python'] elif opts.java: return EXTENSIONS['java'] else: return", "in cmt directory based on cmt show uses \"\"\" parser", "dest='cpp', action='store_true', default=False, help='tag only c/cpp files (default)') parser.add_option('--python', dest='python',", "\"\"\" Returns a dict of used packages and their root", "the initial use_dict which contains the current (cwd) package and", "the commands find_cmd = build_find_cmd(opts, list(use_dict.itervalues())) tags_cmd = build_tags_cmd() print", "and tokens[1] not in IGNORES: basepath = tokens[-1].strip('()') # highland", "a list of file extensions corresponding to the files we", "basepath = tokens[-1].strip('()') # highland and psyche do not strictly", "teh cmd file using ctags. Returns cmd based on the", "os.path.isfile('requirements'): sys.exit('No requirements file!') def init_use_dict(): \"\"\"Returns the initial use_dict", "\"\"\" check_dir() check_requirements() proc = subprocess.Popen(['cmt', 'show', 'uses'], stdout=subprocess.PIPE) use_dict", "for master in tokens[3:-1]] relpath_list.extend([tokens[1], tokens[2]]) use_dict[tokens[1]] = os.path.join(basepath, *relpath_list)" ]
[ "= fs_.symlink_list self._file_envs = fs_.envs def __verify_minion_publish(self, load): ''' Verify", "needs to be written to again. Windows enforces this. os.chmod(keyfile,", "= parts[1] except IndexError: log.critical( 'Unable to extract external pillar", "False if not salt.utils.verify.valid_id(self.opts, load['id']): return False file_recv_max_size = 1024*1024", "return True def check_autoreject(self, keyid): ''' Checks if the specified", "fun = load.pop('fun') tag = tagify(jid, prefix='wheel') data = {'fun':", "The configuration on the master allows minions to be matched", "{'fun': load['fun'], 'arg': load['arg'], 'id': load['id'], 'doc': False, 'conf_file': self.opts['conf_file']}", "os.path.isfile(cpath) and load['loc'] != 0: mode = 'ab' else: mode", "tagify from salt.exceptions import SaltMasterError # Import 3rd-party libs import", "perms.update(self.opts['peer_run'][match]) good = False for perm in perms: if re.match(perm,", "the publication payload pub_load = { 'fun': load['fun'], 'arg': load['arg'],", "as exc: log.error(exc) log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun,", "except KeyError: log.error( 'Failed to determine groups for user {0}.", "in load: if 'ret_config' in load['kwargs']: pub_load['ret_config'] = load['kwargs'].get('ret_config') if", "if expire_minutes > 0: min_time = time.time() - (60 *", "= '{0}.save_load'.format(self.opts['ext_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The specified returner", "load is valid if 'peer' not in self.opts: return False", "compound function else: funs_to_check = load['fun'] for fun in funs_to_check:", "'fun': load['fun'], 'arg': load['arg'], 'tgt': load['tgt'], 'jid': load['jid'], 'ret': load['ret'],", "therefore not # chown the key file pass keys[user] =", "True else: if stat.S_IWOTH & fmode.st_mode: # don't allow others", "file_roots = {} envs = self._file_envs() for saltenv in envs:", "this method executes minion restrictions so that the minion publication", "or other if not (stat.S_IWGRP & fmode.st_mode or stat.S_IWOTH &", "= salt.key.Key(self.opts) keyapi.delete_key(load['id'], preserve_minions=load.get('preserve_minion_cache', False)) return True class LocalFuncs(object): '''", "'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][load['eauth']][name] if", "check group flags if self.opts.get('permissive_pki_access', False) and stat.S_IWGRP & fmode.st_mode:", "to access the external job cache self.mminion = salt.minion.MasterMinion( self.opts,", "= token['name'] log.debug('Minion tokenized user = \"{0}\"'.format(load['user'])) elif 'eauth' in", "prefix='wheel') data = {'fun': \"wheel.{0}\".format(fun), 'jid': jid, 'tag': tag, 'user':", "# touching this stuff, we can probably do what you", "fmode.st_uid == uid or fmode.st_gid != gid: return True elif", "have it saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[saveload_fstr](load['jid'], load) log.info('Got return from", "the runner system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def", "users = [] keys = {} acl_users = set(opts['client_acl'].keys()) if", "= os.path.join( self.opts['cachedir'], 'minions', load['id'], 'files', normpath) cdir = os.path.dirname(cpath)", "'arg': [['cat', '/proc/cpuinfo'], [], ['foo']] load['fun'] = load['fun'].split(',') arg_ =", "salt.utils.fopen(tmpfname, 'w+b') as fp_: fp_.write( self.serial.dumps( {'grains': load['grains'], 'pillar': data})", "False) \\ and fmode.st_gid in groups: return True else: if", "Send a master control function back to the runner system", "pub_load = { 'fun': load['fun'], 'arg': load['arg'], 'tgt': load['tgt'], 'jid':", "wheel ops pass through eauth if 'token' in load: try:", "minion foo.example.com to execute commands from the test module '''", "''' Check a keyid for membership in a signing file", "# After we've ascertained we're not on windows try: user", "load.get('tgt_type', 'glob') ) # If we order masters (via a", "self.opts['user'] pwnam = pwd.getpwnam(user) uid = pwnam[2] gid = pwnam[3]", "False) or self.opts.get('enforce_mine_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if", "load['id'], } if 'tgt_type' in load: if load['tgt_type'].startswith('node'): if load['tgt']", "occurred.') return '' return self.loadauth.mk_token(load) except Exception as exc: log.error(", "sure we don't step on anyone else's toes del good", "try: os.makedirs(cdir) except os.error: pass if os.path.isfile(cpath) and load['loc'] !=", "for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if", "'contact your local administrator if you believe this is in", "load: log.info( 'User {user} Published command {fun} with jid {jid}'.format(", "that the load is valid if 'peer' not in self.opts:", "don't short circuit if no minions # are found if", "'expr_form': load.get('tgt_type', 'glob'), 'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id'], }", "good = self.ckminions.wheel_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'],", "= self.ckminions.auth_check( perms, load['fun'], load['tgt'], load.get('tgt_type', 'glob'), publish_validate=True) if not", "if 'kwargs' in load: if 'ret_config' in load['kwargs']: pub_load['ret_config'] =", "minions' ) raise SaltMasterError('No fileserver backends available') fileserver.update() except Exception", "ignoring content' log.warn(message.format(signing_file)) return False with salt.utils.fopen(signing_file, 'r') as fp_:", "= fdata except Exception: continue return ret def _mine(self, load,", "make sure double backslashes are normalized normpath = normpath.replace('\\\\', '/')", "= load['fun'].split(',') arg_ = [] for arg in load['arg']: arg_.append(arg.split())", "minion_pub(self, load): ''' Publish a command initiated from a minion,", "is not who it says it is!'.format( load['id'] ) )", "not ((name in self.opts['external_auth'][extra['eauth']]) | ('*' in self.opts['external_auth'][extra['eauth']])): log.warning( 'Authentication", "''' Create and return an authentication token, the clear load", "('id', 'tgt', 'fun')): return {} if 'mine_get' in self.opts: #", "exc: log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) data['return']", "'Authentication failure of type \"eauth\" occurred.' ) return '' if", "on the master event interface ''' if 'id' not in", "eauth provider: {0}').format( self.opts['external_auth']) return '' good = self.ckminions.auth_check( self.opts['external_auth'][token['eauth']][token['name']]", "'data' not in load): return False if 'events' in load:", "os.path.getmtime(stub_file) if mtime < min_time: log.warn('Autosign keyid expired {0}'.format(stub_file)) os.remove(stub_file)", "{'fun': \"wheel.{0}\".format(fun), 'jid': jid, 'tag': tag, 'user': token['name']} try: self.event.fire_event(data,", "except KeyError: log.critical( 'The specified returner used for the master", ") ) pub_load['user'] = load['user'] else: log.info( 'Published command {fun}", "opts ) ) return pillargitfs def clean_fsbackend(opts): ''' Clean out", "access to the master ''' def __init__(self, opts): self.opts =", "functions The config will look like this: peer: .*: -", "= True except ImportError: # pwd is not available on", "fs_.serve_file self._file_hash = fs_.file_hash self._file_list = fs_.file_list self._file_list_emptydirs = fs_.file_list_emptydirs", "not in load for key in ('return', 'jid', 'id')): return", "commands from the test module ''' if not self.__verify_minion_publish(load): return", "if not good: msg = ('Authentication failure of type \"eauth\"", "'data': data} except Exception as exc: log.error(exc) log.error('Exception occurred while", "token, the clear load needs to contain the eauth key", "import absolute_import # Import python libs import fnmatch import logging", "occurred.' ) return '' load['user'] = token['name'] log.debug('Minion tokenized user", "a stack trace:\\n', exc_info=True ) # Altering the contents of", "salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) self.serial = salt.payload.Serial(opts) self.ckminions", "False: log.error( '{user} does not have permissions to run {function}.", "If the command will make a recursive publish don't run", "{ 'fun': load['fun'], 'arg': load['arg'], 'tgt': load['tgt'], 'jid': load['jid'], 'ret':", "' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if load['eauth']", "__verify_minion_publish(self, load): ''' Verify that the passed information authorized a", "\"eauth\" occurred.' ) return '' try: name = self.loadauth.load_name(extra) if", "in ('return', 'jid', 'id')): return None # if we have", "key in ('fun', 'arg', 'id')): return {} perms = set()", "else self.opts['external_auth'][token['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not good: #", "command {fun} with jid {jid}'.format( **load ) ) pub_load['user'] =", "since we don't have it saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[saveload_fstr](load['jid'], load)", "load, skip_verify=False): ''' Return the results from an external node", "If master side acl defined. if not isinstance(self.opts['mine_get'], dict): return", "a specific function from its own mine ''' if 'id'", "'w+b') as fp_: fp_.write( self.serial.dumps( {'grains': load['grains'], 'pillar': data}) )", "load or 'data' not in load): return False if 'events'", "rend=False, ) # If the master job cache has a", "''' # Verify the load if any(key not in load", "access returner data self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False) #", "threw a stack trace:\\n', exc_info=True ) # always write out", "does not have a save_load function!'.format( self.opts['master_job_cache'] ) ) except", "or '../' in load['path']: # Can overwrite master files!! return", "False os.remove(stub_file) return True def check_autoreject(self, keyid): ''' Checks if", "isinstance(load['fun'], str): funs_to_check = [load['fun']] # if this a compound", "[])]: if 'git' in opts_dict: try: import git except ImportError:", "os.path.join(cdir, 'mine.p') if not load.get('clear', False): if os.path.isfile(datap): with salt.utils.fopen(datap,", "load['id']): if isinstance(self.opts['mine_get'][match], list): perms.update(self.opts['mine_get'][match]) if not any(re.match(perm, load['fun']) for", "return False if not self.opts['file_recv'] or os.path.isabs(load['path']): return False if", "permissions for this minion perms = [] for match in", "only. # Write access is necessary since on subsequent runs,", "ret = {} ret['jid'] = self.local.cmd_async(**pub_load) ret['minions'] = self.ckminions.check_minions( load['tgt'],", "message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else", "# the clear: # publish (The publish from the LocalClient)", "git except ImportError: return pillargitfs parts = opts_dict['git'].strip().split() try: br", "to minions' ) raise SaltMasterError('No fileserver backends available') fileserver.update() except", "all commands. peer: foo.example.com: - test.* This configuration will only", "and return an authentication token, the clear load needs to", "minions = self.ckminions.check_minions( load['tgt'], load.get('tgt_type', 'glob') ) # If we", "fileserver backends ''' # Clear remote fileserver backend caches so", "envs = self._file_envs() for saltenv in envs: if saltenv not", "in load: msg = ('Authentication failure of type \"eauth\" occurred", "good: # Accept find_job so the CLI will function cleanly", "= salt.runner.Runner(opts) return runner.run() def pub_ret(self, load, skip_verify=False): ''' Request", "preserve_minions=load.get('preserve_minion_cache', False)) return True class LocalFuncs(object): ''' Set up methods", "{fun} with jid {jid}'.format( **load ) ) log.debug('Published command details", "self.mminion.returners[saveload_fstr](load['jid'], load) log.info('Got return from {id} for job {jid}'.format(**load)) self.event.fire_event(load,", "pub_load['metadata'] = load['kwargs'].get('metadata') if 'user' in load: log.info( 'User {user}", "token_data.get('expire', 0) < time.time(): try: os.remove(token_path) except (IOError, OSError): pass", "a keyid for membership in a signing file ''' if", "load.get('tgt_type', 'glob')) if not good: # Accept find_job so the", "matched to salt functions, so the minions can only publish", "like returns from individual minions. ''' # Verify the load", "# Generate EndTime endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid()) # If the return", "self.opts['ext_job_cache'] ) ) except Exception: log.critical( 'The specified returner threw", "if load.pop('key') != self.key.get(load['user']): log.warning( 'Authentication failure of type \"user\"", "ret[minion['id']] = data else: ret[minion['id']] = minion['return'] if 'jid' in", "= os.path.join(autosign_dir, keyid) if not os.path.exists(stub_file): return False os.remove(stub_file) return", ") class AutoKey(object): ''' Implement the methods to run auto", "the specified filename has correct permissions ''' if salt.utils.is_windows(): return", "'peer' not in self.opts: return False if not isinstance(self.opts['peer'], dict):", "if not any(re.match(perm, load['fun']) for perm in perms): return {}", "= os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): os.makedirs(cdir) datap =", "set up a master server, this involves preparing the three", "if 'load' in load: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load['load']) #", "the clear load needs to contain the eauth key and", "occurred while authenticating: {0}'.format(exc) ) return '' good = self.ckminions.auth_check(", "'Exception occurred while authenticating: {0}'.format(exc) ) return '' def get_token(self,", "is enabled in the config. The configuration on the master", "'load' in load: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load['load']) # Format", "clear: # publish (The publish from the LocalClient) # _auth", "= salt.wheel.Wheel(opts) def runner(self, load): ''' Send a master control", "False datap = os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try: os.remove(datap) except", "files to the master, files are sent to the master", "('jid', 'id')): return {} else: auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth')", "pub_load['user'] = load['user'] else: log.info( 'Published command {fun} with jid", "wheel system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def mk_token(self,", "'jid', 'id')): return None # if we have a load,", "a minion to execute ''' # Verify that the load", "normpath = load['path'] if ':' in normpath: # make sure", "load: log.warning('Authentication failure of type \"eauth\" occurred.') return '' if", "('*' in self.opts['external_auth'][load['eauth']])): log.warning('Authentication failure of type \"eauth\" occurred.') return", "keyapi = salt.key.Key(self.opts) keyapi.delete_key(load['id'], preserve_minions=load.get('preserve_minion_cache', False)) return True class LocalFuncs(object):", "'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if load['eauth'] not in self.opts['external_auth']:", "for opts_dict in [x for x in opts.get('ext_pillar', [])]: if", "return False if load['jid'] == 'req': # The minion is", "acl defined. if not isinstance(self.opts['mine_get'], dict): return {} perms =", "self.serial.load(fp_).get(load['fun']) if fdata: ret[minion] = fdata except Exception: continue return", "os.remove(token_path) except (IOError, OSError): pass def clean_pub_auth(opts): try: auth_cache =", "{0}'.format(exc) ) return '' def get_token(self, load): ''' Return the", "'tgt': load['tgt'], 'jid': load['jid'], 'ret': load['ret'], } if 'id' in", "= load.get('kwargs', {}) # check blacklist/whitelist good = True #", "- time.time() > opts['keep_jobs']: os.remove(auth_file_path) except (IOError, OSError): log.error('Unable to", "'eauth' not in load: msg = ('Authentication failure of type", "runner = salt.runner.Runner(opts) return runner.run() def pub_ret(self, load, skip_verify=False): '''", "\"user\" occurred.' ) return '' else: if load.pop('key') != self.key[salt.utils.get_user()]:", "return dict(error=dict(name='TokenAuthenticationError', message=msg)) if not token: msg = 'Authentication failure", "'Authentication failure of type \"user\" occurred.' ) return '' else:", "in ('id', 'tgt', 'fun')): return {} if 'mine_get' in self.opts:", "Implement the methods to run auto key acceptance and rejection", "\\ Token could not be retrieved.') return '' if token['eauth']", "a syndic), don't short circuit if no minions # are", "mminion.returners: mminion.returners[fstr]() def access_keys(opts): ''' A key needs to be", "stuff, we can probably do what you want to do", "= {} acl_users = set(opts['client_acl'].keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.get_user()) if", "{} perms = set() for match in self.opts['mine_get']: if re.match(match,", "load['user'].startswith('sudo_'): # If someone can sudo, allow them to act", "occurred.' ) return '' try: name = self.loadauth.load_name(extra) if not", "self.check_signing_file( keyid, self.opts.get('autoreject_file', None) ) def check_autosign(self, keyid): ''' Checks", "os.remove(cache_file) except OSError as exc: log.critical( 'Unable to file_lists cache", "libs import fnmatch import logging import os import re import", "a built fileserver object be passed in ''' try: if", "{0} is not who it says it is!'.format( load['id'] )", "def revoke_auth(self, load): ''' Allow a minion to request revocation", "check_permissions(self, filename): ''' Check if the specified filename has correct", "in ('id', 'path', 'loc')): return False if not self.opts['file_recv'] or", "by the LocalClient. ''' extra = load.get('kwargs', {}) # check", "'to' in load: pub_load['to'] = load['to'] if 'kwargs' in load:", "skip_verify=False): ''' Return the mine data ''' if not skip_verify:", "fun, exc.__class__.__name__, exc, ) self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag':", "don't run if re.match('publish.*', load['fun']): return False # Check the", "the minions ''' # Generate EndTime endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid()) #", "return False if any(key not in load for key in", "fstr = '{0}.save_load'.format(self.opts['ext_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The specified", "= load['tgt_type'] if 'to' in load: pub_load['to'] = load['to'] if", "eauth system is not enabled, fail msg = ('Authentication failure", "load['jid'] == 'req': # The minion is returning a standalone", "'files', normpath) cdir = os.path.dirname(cpath) if not os.path.isdir(cdir): try: os.makedirs(cdir)", "the execution. ''' if not skip_verify and any(key not in", "False) and stat.S_IWGRP & fmode.st_mode: return True elif stat.S_IWGRP &", "TODO: better way to not require creating the masterminion every", "will fail if the destination file exists. salt.utils.atomicfile.atomic_rename(tmpfname, datap) return", "job, request a jobid prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache',", "dict(error=dict(name='TokenAuthenticationError', message=msg)) if not token: msg = 'Authentication failure of", "OSError as exc: log.critical( 'Unable to file_lists cache file {0}:", "else: return {} else: pub_load['expr_form'] = load['tgt_type'] pub_load['raw'] = True", "step on anyone else's toes del good # Check for", "ret = {} if 'opts' in load: opts = load['opts']", "only accept valid minion ids def init_git_pillar(opts): ''' Clear out", "not (name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']]): msg =", "def mk_token(self, load): ''' Create and return an authentication token,", "\\ and fmode.st_gid in groups: return True else: if stat.S_IWOTH", "event self.event.fire_event(new_job_load, tagify([load['jid'], 'new'], 'job')) # Save the invocation information", "destination file exists. salt.utils.atomicfile.atomic_rename(tmpfname, datap) return data def _minion_event(self, load):", "we don't have it saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[saveload_fstr](load['jid'], load) log.info('Got", "**load ) ) pub_load['user'] = load['user'] else: log.info( 'Published command", "dict): new.update(load['data']) load['data'] = new with salt.utils.fopen(datap, 'w+b') as fp_:", "failure of type \"eauth\" occurred.') return '' if load['eauth'] not", "= salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) self.serial = salt.payload.Serial(opts)", "False # Check the permissions for this minion perms =", "user in acl_users: log.info( 'Preparing the {0} key for local", "in the top generation, log it and move on log.error(", "'{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load['load']) # Format individual return loads for key,", "self.loadauth = salt.auth.LoadAuth(opts) # Stand up the master Minion to", "if self.check_signing_file(keyid, self.opts.get('autosign_file', None)): return True if self.check_autosign_dir(keyid): return True", "the requesting minion also initialted the execution. ''' if not", "the data from the specified minions' mine ''' if not", "''' A key needs to be placed in the filesystem", "{} grains = {} ret = {} if 'opts' in", "True except ImportError: # pwd is not available on windows", "self.opts['renderer'] mopts['failhard'] = self.opts['failhard'] mopts['state_top'] = self.opts['state_top'] mopts['nodegroups'] = self.opts['nodegroups']", "fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False)) self.event.fire_event({'minions': minions}, load['jid'])", "has a clean_old_jobs, call it fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache']) if fstr", "exc, load['id'] ) ) return ret def _mine_get(self, load, skip_verify=False):", "'loc')): return False if not self.opts['file_recv'] or os.path.isabs(load['path']): return False", "load['tgt_type'] if 'to' in load: pub_load['to'] = load['to'] if 'kwargs'", "not os.path.exists(auth_cache): return else: for (dirpath, dirnames, filenames) in os.walk(auth_cache):", "event self.event.fire_event(load, tagify([load['jid'], 'ret', load['id']], 'job')) self.event.fire_ret_load(load) if not self.opts['job_cache']", "= fs_.envs def __verify_minion_publish(self, load): ''' Verify that the passed", "in load: opts = load['opts'] if 'grains' in load['opts']: grains", "the master minion to access the external job cache self.mminion", "file_lists_cache) try: os.remove(cache_file) except OSError as exc: log.critical( 'Unable to", "''' Gathers the data from the specified minions' mine '''", "if not os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'mine.p') if not", "minion is returning a standalone job, request a jobid prep_fstr", "access to the owner only. # Write access is necessary", "not in load: return False return self.loadauth.get_tok(load['token']) def publish(self, load):", "return dict(error=dict(name='TokenAuthenticationError', message=msg)) if token['eauth'] not in self.opts['external_auth']: msg =", "caches, used when the master starts ''' pillargitfs = []", "stat.S_IWGRP & fmode.st_mode: return True elif stat.S_IWGRP & fmode.st_mode: return", "the caller has root on master elif 'user' in load:", "> opts['keep_jobs']: os.remove(auth_file_path) except (IOError, OSError): log.error('Unable to delete pub", "Set up methods for use only from the local system", "perms = [] for match in self.opts['peer']: if re.match(match, load['id']):", "# Import salt libs import salt.crypt import salt.utils import salt.client", "if load['loc']: fp_.seek(load['loc']) fp_.write(load['data']) return True def _pillar(self, load): '''", "= 'compound_pillar_exact' checker = salt.utils.minions.CkMinions(self.opts) minions = checker.check_minions( load['tgt'], match_type,", "import salt.pillar import salt.state import salt.runner import salt.auth import salt.wheel", "log.error( '{user} does not have permissions to run {function}. Please", "of type \"token\" occurred.' ) return '' load['user'] = token['name']", "list): perms.extend(self.opts['peer'][match]) if ',' in load['fun']: # 'arg': [['cat', '/proc/cpuinfo'],", "= {} for minion in self.local.cmd_iter(**pub_load): if load.get('form', '') ==", "if writable by group or other if not (stat.S_IWGRP &", "Clean expired tokens from the master ''' serializer = salt.payload.Serial(opts)", "the load is valid if 'peer' not in self.opts: return", "load for key in ('return', 'jid', 'id')): return False if", "if salt.utils.is_windows(): return True # After we've ascertained we're not", "Write access is necessary since on subsequent runs, if the", "in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not good: msg =", "fstr in mminion.returners: mminion.returners[fstr]() def access_keys(opts): ''' A key needs", "client self.local = salt.client.get_local_client(mopts=self.opts) # Make an minion checker object", "exc: log.error(exc) log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc))", "'minions': minions, } # Announce the job on the event", ") # always write out to the master job cache", "def __verify_minion_publish(self, load): ''' Verify that the passed information authorized", "'id': key, 'return': item} if 'out' in load: ret['out'] =", "information if self.opts['ext_job_cache']: try: fstr = '{0}.save_load'.format(self.opts['ext_job_cache']) self.mminion.returners[fstr](load['jid'], load) except", "= self._file_envs() for saltenv in envs: if saltenv not in", "if you believe this is in ' 'error.\\n'.format( user=load['user'], function=load['fun']", "can probably do what you want to do another #", "not os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'data.p') tmpfh, tmpfname =", "load['fun'], 'arg': load['arg'], 'tgt': load['tgt'], 'jid': load['jid'], 'ret': load['ret'], }", "cache file {0}: {1}' .format(env_cache, exc) ) file_lists_dir = os.path.join(", "{1}' .format(cache_file, exc) ) def clean_expired_tokens(opts): ''' Clean expired tokens", "in mminion.returners: mminion.returners[fstr]() def access_keys(opts): ''' A key needs to", "tag = load['tag'] self.event.fire_event(load, tag) return True def _return(self, load):", "log.warning('Authentication failure of type \"token\" occurred. \\ Authentication type of", "occurred. \\ Token could not be retrieved.') return '' if", "line in fp_: line = line.strip() if line.startswith('#'): continue else:", "clear env cache file {0}: {1}' .format(env_cache, exc) ) file_lists_dir", "peer: foo.example.com: - test.* This configuration will only allow the", "The ClearFuncs object encapsulates the functions that can be executed", "minion perms = [] for match in self.opts['peer']: if re.match(match,", "the # publish commands. # # In short, check with", "= salt.client.get_local_client(mopts=self.opts) # Create the master minion to access the", "not in load for key in ('jid', 'id')): return {}", "'UNKNOWN')} try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun, **load)", "''' if 'peer_run' not in self.opts: return {} if not", "'' except Exception as exc: log.error( 'Exception occurred while authenticating:", "do another # way that won't have a negative impact.", "return {} return self.local.get_cache_returns(load['jid']) def minion_pub(self, load): ''' Publish a", "with salt.utils.fopen(datap, 'rb') as fp_: new = self.serial.load(fp_) if isinstance(new,", "for arg in load['arg']: arg_.append(arg.split()) load['arg'] = arg_ good =", "'arg', 'id')): return {} perms = set() for match in", "def init_git_pillar(opts): ''' Clear out the ext pillar caches, used", "def pub_ret(self, load, skip_verify=False): ''' Request the return data from", "occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg))", "HAS_PWD: for user in pwd.getpwall(): users.append(user.pw_name) for user in acl_users:", "return data def _minion_event(self, load): ''' Receive an event from", "in self.opts['peer_run']: if re.match(match, load['id']): # This is the list", "exc_info=True ) # always write out to the master job", "in opts.get('ext_pillar', [])]: if 'git' in opts_dict: try: import git", "return self.local.get_cache_returns(load['jid']) def minion_pub(self, load): ''' Publish a command initiated", "to run auto key acceptance and rejection ''' def __init__(self,", "needs to contain the eauth key and the needed authentication", "continue try: ret.update(self.tops[fun](opts=opts, grains=grains)) except Exception as exc: # If", "== salt.utils.get_user(): if load.pop('key') != self.key.get(load['user']): log.warning( 'Authentication failure of", "log.error( 'No fileservers loaded, the master will not be able", "if not skip_verify: if any(key not in load for key", "minion['jid'] data['ret'] = data.pop('return') ret[minion['id']] = data else: ret[minion['id']] =", "val if load.get('form', '') != 'full': ret.pop('__jid__') return ret def", "wheel system ''' # All wheel ops pass through eauth", "'' return self.loadauth.mk_token(load) except Exception as exc: log.error( 'Exception occurred", "key acceptance and rejection ''' def __init__(self, opts): self.opts =", "does not verify against eauth provider: {0}').format( self.opts['external_auth']) return ''", "pub_load['expr_form_type'] = 'compound' pub_load['expr_form'] = load['tgt_type'] else: return {} else:", "in extra: pub_load['id'] = extra['id'] if 'tgt_type' in load: pub_load['tgt_type']", "for match in self.opts['peer_run']: if re.match(match, load['id']): # This is", "not ' 'available.\\n'.format( user ) ) return False fmode =", "return and format it to look like returns from individual", "dict(error=dict(name='EauthAuthenticationError', message=msg)) try: name = self.loadauth.load_name(load) if not ((name in", "publish load is serious!! Changes here # break compatibility with", "type \"token\" occurred. \\ Authentication type of {0} not present.').format(token['eauth'])", "type \"user\" occurred.' ) return '' if load['user'] not in", "details {0}'.format(pub_load)) return {'ret': { 'jid': load['jid'], 'minions': minions },", "for minion ' '{2}'.format( fun, exc, load['id'] ) ) return", "data is invalid, just ignore it if any(key not in", "dirs, filenames in os.walk(autosign_dir): for f in filenames: stub_file =", "group flags if self.opts.get('permissive_pki_access', False) and stat.S_IWGRP & fmode.st_mode: return", "exc: log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) return", "load['id'], load.get('saltenv', load.get('env')), load.get('ext'), self.mminion.functions, pillar=load.get('pillar_override', {})) pillar_dirs = {}", "else: funs_to_check = load['fun'] for fun in funs_to_check: if re.match(module_re,", "fstr = '{0}.returner'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load) def _syndic_return(self, load): ''' Receive a", "# Create the master minion to access the external job", "{0}'.format(exc) ) return '' good = self.ckminions.auth_check( self.opts['external_auth'][extra['eauth']][name] if name", "''' if 'token' not in load: return False return self.loadauth.get_tok(load['token'])", "'tgt': load['tgt'], 'user': load['user'], 'fun': load['fun'], 'arg': load['arg'], 'minions': minions,", "id {0} is not who it says it is!'.format( load['id']", "return True else: if stat.S_IWOTH & fmode.st_mode: # don't allow", "os.path.join( self.opts['cachedir'], 'minions', load['id'], 'files', normpath) cdir = os.path.dirname(cpath) if", "in load): return False if 'events' in load: for event", "try: pub_load['timeout'] = int(load['tmo']) except ValueError: msg = 'Failed to", "{} ret = {} if not salt.utils.verify.valid_id(self.opts, load['id']): return ret", "just ignore it if any(key not in load for key", "config will look like this: peer: .*: - .* This", "'introspecting {0}: {1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) if 'eauth'", "rejection ''' def __init__(self, opts): self.opts = opts def check_permissions(self,", "back to the runner system ''' if 'token' in load:", "\\ Token does not verify against eauth provider: {0}').format( self.opts['external_auth'])", "import salt.wheel import salt.minion import salt.search import salt.key import salt.fileserver", "'opts' in load: opts = load['opts'] if 'grains' in load['opts']:", "'fun')): return {} if 'mine_get' in self.opts: # If master", "('return', 'jid', 'id')): return None # if we have a", "return True def _mine_delete(self, load): ''' Allow the minion to", "in ret: ret[key] = val if load.get('form', '') != 'full':", "'ret'], 'wheel')) return {'tag': tag, 'data': data} if 'eauth' not", "return loads for key, item in six.iteritems(load['return']): ret = {'jid':", "'' good = self.ckminions.auth_check( self.opts['client_acl'][load['user']], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if", "'ret': load['ret'], } if 'id' in extra: pub_load['id'] = extra['id']", "'User {user} Published command {fun} with jid {jid}'.format( **load )", "log.warning('Authentication failure of type \"eauth\" occurred.') return '' try: name", "= self.ckminions.runner_check( self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][load['eauth']]['*'], load['fun'])", "on the master allows minions to be matched to salt", "datap = os.path.join(cdir, 'data.p') tmpfh, tmpfname = tempfile.mkstemp(dir=cdir) os.close(tmpfh) with", "only be used by the LocalClient. ''' extra = load.get('kwargs',", "re.match(user_re, load['user']): good = False break # check if the", "re.match(module_re, fun): good = False break if good is False:", "return '' good = self.ckminions.auth_check( self.opts['external_auth'][extra['eauth']][name] if name in self.opts['external_auth'][extra['eauth']]", "minion ''' mopts = {} file_roots = {} envs =", "minion to request revocation of its own key ''' if", "pass if os.path.isfile(cpath) and load['loc'] != 0: mode = 'ab'", "from the local system ''' # The ClearFuncs object encapsulates", "authenticating: {0}'.format(exc) ) return '' def get_token(self, load): ''' Return", "any(re.match(perm, load['fun']) for perm in perms): return {} ret =", "return dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in", "\"eauth\" occurred.' ) return '' except Exception as exc: log.error(", "if ',' in load['fun']: # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]", "contains all of the routines needed to set up a", "minions to execute all commands. peer: foo.example.com: - test.* This", "return ret def revoke_auth(self, load): ''' Allow a minion to", "class includes the raw routines post validation that make up", "if name in self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type', 'glob'))", "failure of type \"user\" occurred.' ) return '' elif load['user']", "' '\"{0}\" does not have a save_load function!'.format( self.opts['master_job_cache'] )", "authentication token, the clear load needs to contain the eauth", "'doc': False, 'conf_file': self.opts['conf_file']} opts.update(self.opts) runner = salt.runner.Runner(opts) return runner.run()", "list of funcs/modules! if isinstance(self.opts['peer_run'][match], list): perms.update(self.opts['peer_run'][match]) good = False", "backend caches so they get recreated for backend in ('git',", "= salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid()) # If the return data is invalid, just", "''' Send a master control function back to the wheel", "'wheel')) ret = self.wheel_.call_func(fun, **load) data['return'] = ret data['success'] =", "self.key.get(self.opts.get('user', 'root')): log.warning( 'Authentication failure of type \"user\" occurred.' )", "you believe this is in ' 'error.\\n'.format( user=load['user'], function=load['fun'] )", "= val if load.get('form', '') != 'full': ret.pop('__jid__') return ret", "allowed if the requesting minion also initialted the execution. '''", "try: auth_cache = os.path.join(opts['cachedir'], 'publish_auth') if not os.path.exists(auth_cache): return else:", "options to the minion ''' mopts = {} file_roots =", "self.event.fire_event(load, tag) return True def _return(self, load): ''' Handle the", "old dup event self.event.fire_event(new_job_load, tagify([load['jid'], 'new'], 'job')) # Save the", "cache ' '\"{0}\" does not have a save_load function!'.format( self.opts['master_job_cache']", "salt.utils.minions.CkMinions(opts) # Make an Auth object self.loadauth = salt.auth.LoadAuth(opts) #", "# check group flags if self.opts.get('permissive_pki_access', False) and stat.S_IWGRP &", "= fs_.dir_list self._symlink_list = fs_.symlink_list self._file_envs = fs_.envs def __verify_minion_publish(self,", "not isinstance(self.opts['peer_run'], dict): return {} if any(key not in load", "self.opts['jinja_trim_blocks'] return mopts def _ext_nodes(self, load, skip_verify=False): ''' Return the", "or token_data.get('expire', 0) < time.time(): try: os.remove(token_path) except (IOError, OSError):", "False if any(key not in load for key in ('fun',", "a standalone job, request a jobid prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid']", "not in load: return False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache',", "execute ''' # Verify that the load is valid if", "self.opts['job_cache'] or self.opts.get('ext_job_cache'): return fstr = '{0}.update_endtime'.format(self.opts['master_job_cache']) if (self.opts.get('job_cache_store_endtime') and", "function else: funs_to_check = load['fun'] for fun in funs_to_check: if", "item} if 'out' in load: ret['out'] = load['out'] self._return(ret) def", "if not os.path.isfile(auth_file_path): continue if os.path.getmtime(auth_file_path) - time.time() > opts['keep_jobs']:", "# If we order masters (via a syndic), don't short", "object self.ckminions = salt.utils.minions.CkMinions(opts) # Make an Auth object self.loadauth", "call it fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache']) if fstr in mminion.returners: mminion.returners[fstr]()", "key, val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])): if key not in ret: ret[key]", "{0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: fun = load.pop('fun')", "'user' in load: if load['user'].startswith('sudo_'): # If someone can sudo,", "event in load['events']: self.event.fire_event(event, event['tag']) # old dup event if", "return {} perms = set() for match in self.opts['peer_run']: if", "'' else: log.warning( 'Authentication failure of type \"user\" occurred.' )", "if fun not in self.opts.get('master_tops', {}): continue try: ret.update(self.tops[fun](opts=opts, grains=grains))", "'Exception occurred in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc,", "load['ret'], } if 'id' in extra: pub_load['id'] = extra['id'] if", "load.get('clear', False): if os.path.isfile(datap): with salt.utils.fopen(datap, 'rb') as fp_: new", "= ('Authentication failure of type \"eauth\" occurred for ' 'user", "log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) jid = salt.utils.jid.gen_jid() fun = load.pop('fun')", "sent to the master file cache ''' if any(key not", "in load for key in ('id', 'grains')): return False pillar", "= salt.payload.Serial(opts) self.ckminions = salt.utils.minions.CkMinions(opts) # Create the tops dict", "self.opts['external_auth']: # The eauth system is not enabled, fail msg", "states=False, rend=False) # Make a wheel object self.wheel_ = salt.wheel.Wheel(opts)", "return '' elif load['user'] == self.opts.get('user', 'root'): if load.pop('key') !=", "load['jid']: fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False)) self.event.fire_event({'minions': minions},", "pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound' else: return {} else:", "HAS_PWD: try: os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1) except OSError: # The master", "'available.\\n'.format( user ) ) return False fmode = os.stat(filename) if", "fs_.envs def __verify_minion_publish(self, load): ''' Verify that the passed information", "Read and write access to the owner only. # Write", "{1} for minion ' '{2}'.format( fun, exc, load['id'] ) )", "On Windows, os.rename will fail if the destination file exists.", "ClearFuncs object encapsulates the functions that can be executed in", "file_recv_max_size limit: {0}'.format( file_recv_max_size ) ) return False # Normalize", ") ) return '' if not token: log.warning('Authentication failure of", "type of {0} not present.').format(token['eauth']) return '' if not ((token['name']", "self._file_list = fs_.file_list self._file_list_emptydirs = fs_.file_list_emptydirs self._dir_list = fs_.dir_list self._symlink_list", "it is!'.format( load['id'] ) ) return {} # Prepare the", "message=str(exc))) if 'eauth' not in load: msg = ('Authentication failure", "try: token = self.loadauth.get_tok(extra['token']) except Exception as exc: log.error( 'Exception", "if backend in opts['fileserver_backend']: env_cache = os.path.join( opts['cachedir'], '{0}fs'.format(backend), 'envs.p'", "function!'.format( self.opts['master_job_cache'] ) ) except Exception: log.critical( 'The specified returner", "= load['out'] self._return(ret) def minion_runner(self, load): ''' Execute a runner", "try: os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1) except OSError: # The master is", "auth_file_path = os.path.join(dirpath, auth_file) if not os.path.isfile(auth_file_path): continue if os.path.getmtime(auth_file_path)", "the masterminion every time? mminion = salt.minion.MasterMinion( opts, states=False, rend=False,", "'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) self.serial = salt.payload.Serial(opts) self.ckminions =", "extra = load.get('kwargs', {}) # check blacklist/whitelist good = True", "minions: return { 'enc': 'clear', 'load': { 'jid': None, 'minions':", "tiny # additions can have serious implications on the performance", "of type \"token\" occurred. \\ Token does not verify against", "= self.serial.load(fp_).get(load['fun']) if fdata: ret[minion] = fdata except Exception: continue", "user not in users: try: user = pwd.getpwnam(user).pw_name except KeyError:", "minion to delete all of its own mine contents '''", "in load for key in ('id', 'path', 'loc')): return False", "of the routines needed to set up a master server,", "return False return True def _mine_flush(self, load, skip_verify=False): ''' Allow", ") if os.path.exists(keyfile): log.debug('Removing stale keyfile: {0}'.format(keyfile)) os.unlink(keyfile) key =", "load['user'] == self.opts.get('user', 'root'): if load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning(", "log.warning('Authentication failure of type \"eauth\" occurred.') return '' if load['eauth']", "return runner.run() def pub_ret(self, load, skip_verify=False): ''' Request the return", "load['jid'], 'tgt_type': load['tgt_type'], 'tgt': load['tgt'], 'user': load['user'], 'fun': load['fun'], 'arg':", "pub auth file') def clean_old_jobs(opts): ''' Clean out the old", "from __future__ import absolute_import # Import python libs import fnmatch", "try: file_lists_caches = os.listdir(file_lists_dir) except OSError: continue for file_lists_cache in", "break if good is False: log.error( '{user} does not have", "jid_fn = os.path.join(auth_cache, load['jid']) with salt.utils.fopen(jid_fn, 'r') as fp_: if", "self.tops: if fun not in self.opts.get('master_tops', {}): continue try: ret.update(self.tops[fun](opts=opts,", "the minion to delete all of its own mine contents", "log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) data['return'] =", "dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) if 'eauth' not in load: msg =", "{'tag': tag, 'data': data} except Exception as exc: log.error( 'Exception", "so they get recreated for backend in ('git', 'hg', 'svn'):", "= line.strip() if line.startswith('#'): continue else: if salt.utils.expr_match(keyid, line): return", "line.startswith('#'): continue else: if salt.utils.expr_match(keyid, line): return True return False", "if not salt.utils.verify.valid_id(self.opts, load['id']): return ret match_type = load.get('expr_form', 'glob')", "occurred. \\ Token does not verify against eauth provider: {0}').format(", "event interface ''' if 'id' not in load: return False", "serious implications on the performance of the # publish commands.", "for key in ('return', 'jid', 'id')): return False if load['jid']", "message=str(exc))) except Exception as exc: log.error( 'Exception occurred in the", "additions can have serious implications on the performance of the", ") if HAS_PWD: if user not in users: try: user", "LocalClient) # _auth def __init__(self, opts, key): self.opts = opts", "if 'id' not in load or 'data' not in load:", "and load['loc'] != 0: mode = 'ab' else: mode =", "any(key not in load for key in ('fun', 'arg', 'id')):", "& fmode.st_mode): return True return False def check_signing_file(self, keyid, signing_file):", "in self.local.cmd_iter(**pub_load): if load.get('form', '') == 'full': data = minion", "data = pillar.compile_pillar(pillar_dirs=pillar_dirs) if self.opts.get('minion_data_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions',", "own mine contents ''' if not skip_verify and 'id' not", "name # Verify that the caller has root on master", "to the file return False # check group flags if", "time.time() - (60 * int(expire_minutes)) for root, dirs, filenames in", "fmode.st_mode): return True return False def check_signing_file(self, keyid, signing_file): '''", "not in token_data or token_data.get('expire', 0) < time.time(): try: os.remove(token_path)", "This is the list of funcs/modules! if isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match])", "in load: log.info( 'User {user} Published command {fun} with jid", "self.opts['state_events'] mopts['state_aggregate'] = self.opts['state_aggregate'] mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']", "publication payload pub_load = { 'fun': load['fun'], 'arg': load['arg'], 'expr_form':", "= set(opts['client_acl'].keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.get_user()) if HAS_PWD: for user", "'glob') ) # If we order masters (via a syndic),", "{ 'jid': load['jid'], 'tgt_type': load['tgt_type'], 'tgt': load['tgt'], 'user': load['user'], 'fun':", "log.warn(msg) return {} if 'timeout' in load: try: pub_load['timeout'] =", "for x in opts.get('ext_pillar', [])]: if 'git' in opts_dict: try:", "False datap = os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try: with salt.utils.fopen(datap,", "this is a regular command, its a single function if", "exc, ) self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data':", "# if we have a load, save it if 'load'", "windows HAS_PWD = False log = logging.getLogger(__name__) # Things to", "from the minions ''' # Generate EndTime endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid())", "listen to it! log.warn( 'Minion id {0} is not who", "if not (name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']]): msg", "os.path.join( self.opts['cachedir'], 'minions', minion, 'mine.p') try: with salt.utils.fopen(mine, 'rb') as", "Make a client self.local = salt.client.get_local_client(mopts=self.opts) # Create the master", "self.opts['peer_run']: if re.match(match, load['id']): # This is the list of", "as root. ''' users = [] keys = {} acl_users", "or self.opts.get('enforce_mine_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not", "'metadata' in load['kwargs']: pub_load['metadata'] = load['kwargs'].get('metadata') if 'user' in load:", "to make sure we don't step on anyone else's toes", "of its own key ''' if 'id' not in load:", "check it try: token = self.loadauth.get_tok(extra['token']) except Exception as exc:", "load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure of type \"eauth\" occurred.'", "return ret def minion_publish(self, load): ''' Publish a command initiated", "{0} not present.').format(token['eauth']) return '' if not ((token['name'] in self.opts['external_auth'][token['eauth']])", "configured master_tops interfaces opts = {} grains = {} ret", "return self.loadauth.get_tok(load['token']) def publish(self, load): ''' This method sends out", "load): ''' Receive a syndic minion return and format it", "by group or other if not (stat.S_IWGRP & fmode.st_mode or", "(60 * int(expire_minutes)) for root, dirs, filenames in os.walk(autosign_dir): for", "in load for key in ('id', 'tgt', 'fun')): return {}", "runner_client.async(fun, load.get('kwarg', {}), load.get('username', 'UNKNOWN')) except Exception as exc: log.error('Exception", "not have permissions to run {function}. Please ' 'contact your", "continue for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'): cache_file = os.path.join(file_lists_dir, file_lists_cache)", "the ext pillar caches, used when the master starts '''", "be rejected. ''' return self.check_signing_file( keyid, self.opts.get('autoreject_file', None) ) def", "False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): return", "'job')) self.event.fire_ret_load(load) if not self.opts['job_cache'] or self.opts.get('ext_job_cache'): return fstr =", "# A token was passed, check it try: token =", "the master. ''' from __future__ import absolute_import # Import python", "not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, str(ret['jid'])) with salt.utils.fopen(jid_fn, 'w+')", "good: msg = ('Authentication failure of type \"eauth\" occurred for", "in token_data or token_data.get('expire', 0) < time.time(): try: os.remove(token_path) except", "'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) jid = salt.utils.jid.gen_jid() fun =", "mopts['file_roots'] = file_roots if load.get('env_only'): return mopts mopts['renderer'] = self.opts['renderer']", "'id')): return {} else: auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if", "'minions', load['id']) if not os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'data.p')", "# check blacklist/whitelist good = True # Check if the", "type \"token\" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.wheel_check(", "token: {0}'.format( exc ) ) return '' if not token:", "# Retrieve the jid if not load['jid']: fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])", "if 'eauth' not in load: log.warning('Authentication failure of type \"eauth\"", "* int(expire_minutes)) for root, dirs, filenames in os.walk(autosign_dir): for f", "sudo, allow them to act as root if load.get('key', 'invalid')", "backend in opts['fileserver_backend']: env_cache = os.path.join( opts['cachedir'], '{0}fs'.format(backend), 'envs.p' )", "for membership in a autosign directory. ''' autosign_dir = os.path.join(self.opts['pki_dir'],", "# to make sure we don't step on anyone else's", "return '' try: name = self.loadauth.load_name(load) if not ((name in", "if not token: log.warning('Authentication failure of type \"token\" occurred. \\", "'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id'], } if 'tmo' in", "to the owner only. # Write access is necessary since", "server interface ''' fs_ = salt.fileserver.Fileserver(self.opts) self._serve_file = fs_.serve_file self._file_hash", "data} except Exception as exc: log.error(exc) log.error('Exception occurred while '", "= True ret = {} for minion in self.local.cmd_iter(**pub_load): if", "Import 3rd-party libs import salt.ext.six as six try: import pwd", "the {0} key for local communication'.format( user ) ) if", "& fmode.st_mode: # don't allow others to write to the", "is not enabled, fail log.warning( 'Authentication failure of type \"eauth\"", "'' elif load['user'] == salt.utils.get_user(): if load.pop('key') != self.key.get(load['user']): log.warning(", "says it is! # We don't want to listen to", "load: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load['load']) # Format individual return", "mode = 'ab' else: mode = 'wb' with salt.utils.fopen(cpath, mode)", "# pwd is not available on windows HAS_PWD = False", "client self.local = salt.client.get_local_client(mopts=self.opts) # Create the master minion to", "This configuration will enable all minions to execute all commands.", "= int(load['timeout']) except ValueError: msg = 'Failed to parse timeout", "salt.utils.jid.gen_jid() fun = load.pop('fun') tag = tagify(jid, prefix='wheel') data =", "subsequent runs, if the file # exists, it needs to", "checker.check_minions( load['tgt'], match_type, greedy=False ) for minion in minions: mine", "# Announce the job on the event bus self.event.fire_event(new_job_load, 'new_job')", ") return '' if not token: log.warning('Authentication failure of type", "execution. ''' if not skip_verify and any(key not in load", "load['fun']): return False # Check the permissions for this minion", "# The eauth system is not enabled, fail msg =", "load): ''' Allows minions to send files to the master,", "Gathers the data from the specified minions' mine ''' if", "{0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][load['eauth']][name]", "self.opts['external_auth']: msg = 'Authentication failure of type \"token\" occurred.' log.warning(msg)", "function data ''' if 'peer_run' not in self.opts: return {}", "self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} except", "failure of type \"eauth\" occurred.' ) return '' except Exception", "['foo']] load['fun'] = load['fun'].split(',') arg_ = [] for arg in", "anything happens in the top generation, log it and move", "('*' in self.opts['external_auth'][extra['eauth']])): log.warning( 'Authentication failure of type \"eauth\" occurred.'", "token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not good: msg", "{1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) if 'eauth' not in", "if load.pop('key') != self.key.get(self.opts.get('user', 'root')): log.warning( 'Authentication failure of type", "can have serious implications on the performance of the #", "KeyError: log.error('ACL user {0} is not available'.format(user)) continue keyfile =", "'error.\\n'.format( user=load['user'], function=load['fun'] ) ) return '' # to make", "tempfile # Import salt libs import salt.crypt import salt.utils import", "with minion/master versions and even tiny # additions can have", "load): ''' Send a master control function back to the", "syndic minion return and format it to look like returns", "True def _master_opts(self, load): ''' Return the master options to", "self.event.fire_event(event['data'], tagify(event['tag'], base=load['pretag'])) else: self.event.fire_event(event, tagify(event['tag'], base=load['pretag'])) else: tag =", "master event interface ''' if 'id' not in load: return", "pub_load['expr_form']) auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache)", "skip_verify=False): ''' Gathers the data from the specified minions' mine", "''' if not signing_file or not os.path.exists(signing_file): return False if", "occurred.' ) return '' except Exception as exc: log.error( 'Exception", "mtime = os.path.getmtime(stub_file) if mtime < min_time: log.warn('Autosign keyid expired", "and move on log.error( 'Top function {0} failed with error", "salt.minion import salt.search import salt.key import salt.fileserver import salt.utils.atomicfile import", "= salt.minion.MasterMinion( opts, states=False, rend=False, ) # If the master", "except OSError as exc: log.critical( 'Unable to clear env cache", "== 'compound': match_type = 'compound_pillar_exact' checker = salt.utils.minions.CkMinions(self.opts) minions =", "any(key not in load for key in ('return', 'jid', 'id')):", "exc_info_on_loglevel=logging.DEBUG ) class AutoKey(object): ''' Implement the methods to run", "salt functions, so the minions can only publish allowed salt", "Exception as exc: msg = 'Exception occurred when generating auth", "not self.__verify_minion_publish(load): return {} # Set up the publication payload", "{} return self.local.get_cache_returns(load['jid']) def minion_pub(self, load): ''' Publish a command", "needed by the master. ''' from __future__ import absolute_import #", "invalid, just ignore it if any(key not in load for", "fp_: if load['loc']: fp_.seek(load['loc']) fp_.write(load['data']) return True def _pillar(self, load):", "salt.crypt import salt.utils import salt.client import salt.payload import salt.pillar import", "returner threw a stack trace:\\n', exc_info=True ) # Altering the", "Prepare the runner object opts = {'fun': load['fun'], 'arg': load['arg'],", "'expire' not in token_data or token_data.get('expire', 0) < time.time(): try:", "- (60 * int(expire_minutes)) for root, dirs, filenames in os.walk(autosign_dir):", "present.').format(token['eauth']) return '' if not ((token['name'] in self.opts['external_auth'][token['eauth']]) | ('*'", "as exc: log.error( 'Exception occurred while authenticating: {0}'.format(exc) ) return", "Import salt libs import salt.crypt import salt.utils import salt.client import", "= load['opts']['grains'] for fun in self.tops: if fun not in", "= salt.runner.RunnerClient(self.opts) return runner_client.async( fun, load.get('kwarg', {}), token['name']) except Exception", "'' elif load['user'] == 'root': if load.pop('key') != self.key.get(self.opts.get('user', 'root')):", "class RemoteFuncs(object): ''' Funcitons made available to minions, this class", "root. ''' users = [] keys = {} acl_users =", ") return '' good = self.ckminions.auth_check( self.opts['client_acl'][load['user']], load['fun'], load['tgt'], load.get('tgt_type',", "{} if not salt.utils.verify.valid_id(self.opts, load['id']): return {} # Evaluate all", "The eauth system is not enabled, fail msg = ('Authentication", "load: if load['user'].startswith('sudo_'): # If someone can sudo, allow them", "backends ''' # Clear remote fileserver backend caches so they", "calls if extra.get('token', False): # A token was passed, check", "self.ckminions.wheel_check( self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if", "' 'contact your local administrator if you believe this is", "exc, ) data['success'] = False self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return", "file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'): cache_file = os.path.join(file_lists_dir, file_lists_cache) try: os.remove(cache_file)", "self.key: # User is authorised, check key and check perms", ".format(cache_file, exc) ) def clean_expired_tokens(opts): ''' Clean expired tokens from", "msg = ('Authentication failure of type \"token\" occurred for '", "self.opts['external_auth'][extra['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not good: # Accept", "self.opts['external_auth'][load['eauth']]): msg = ('Authentication failure of type \"eauth\" occurred for", "pub_load['expr_form'] = load['tgt_type'] ret = {} ret['jid'] = self.local.cmd_async(**pub_load) ret['minions']", "in self.opts['client_acl']: log.warning( 'Authentication failure of type \"user\" occurred.' )", "type \"eauth\" occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return", "while ' 'introspecting {0}: {1}'.format(fun, exc)) data['return'] = 'Exception occurred", "and load['loc'] < 0: log.error('Invalid file pointer: load[loc] < 0')", "= pwnam[3] groups = salt.utils.get_gid_list(user, include_default=False) except KeyError: log.error( 'Failed", "if not os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'data.p') tmpfh, tmpfname", "can only publish allowed salt functions The config will look", "try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun, **load) data['return']", "((name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']])): log.warning('Authentication failure of", "not minions: return { 'enc': 'clear', 'load': { 'jid': None,", "Make an minion checker object self.ckminions = salt.utils.minions.CkMinions(opts) # Make", "not isinstance(self.opts['mine_get'], dict): return {} perms = set() for match", "= os.listdir(file_lists_dir) except OSError: continue for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'):", "interface ''' fs_ = salt.fileserver.Fileserver(self.opts) self._serve_file = fs_.serve_file self._file_hash =", "= '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[saveload_fstr](load['jid'], load) log.info('Got return from {id} for job", "configuration on the master allows minions to be matched to", "six.iteritems(self.local.get_cache_returns(ret['__jid__'])): if key not in ret: ret[key] = val if", "salt.client.get_local_client(mopts=self.opts) # Make an minion checker object self.ckminions = salt.utils.minions.CkMinions(opts)", "continue keyfile = os.path.join( opts['cachedir'], '.{0}_key'.format(user) ) if os.path.exists(keyfile): log.debug('Removing", "in a signing file ''' if not signing_file or not", "self.opts['mine_get']: if re.match(match, load['id']): if isinstance(self.opts['mine_get'][match], list): perms.update(self.opts['mine_get'][match]) if not", "not ((name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']])): log.warning('Authentication failure", "occurred.') return '' if not self.loadauth.time_auth(load): log.warning('Authentication failure of type", "funs_to_check = [load['fun']] # if this a compound function else:", "\"{0}\"'.format(load['user'])) elif 'eauth' in extra: if extra['eauth'] not in self.opts['external_auth']:", "except KeyError: log.critical( 'The specified returner used for the external", "opts_dict: try: import git except ImportError: return pillargitfs parts =", "return mopts def _ext_nodes(self, load, skip_verify=False): ''' Return the results", "grains=grains)) except Exception as exc: # If anything happens in", "parse timeout value: {0}'.format( load['tmo']) log.warn(msg) return {} if 'timeout'", "specified returner used for the external job cache ' '\"{0}\"", "'') == 'full': data = minion if 'jid' in minion:", "return dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in", "runner system ''' if 'token' in load: try: token =", ") return '' elif load['user'] == salt.utils.get_user(): if load.pop('key') !=", "or 'fun' not in load: return False if self.opts.get('minion_data_cache', False)", "check perms if load.pop('key') != self.key[load['user']]: log.warning( 'Authentication failure of", "passed information authorized a minion to execute ''' # Verify", "{ 'jid': None, 'minions': minions } } # Retrieve the", "from individual minions. ''' # Verify the load if any(key", "{2}'.format( fun, exc.__class__.__name__, exc, ) self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return", "'Authentication failure of type \"token\" occurred.' ) return '' load['user']", "key needs to be placed in the filesystem with permissions", "not good: msg = ('Authentication failure of type \"token\" occurred", "= load['opts'] if 'grains' in load['opts']: grains = load['opts']['grains'] for", "except ImportError: # pwd is not available on windows HAS_PWD", "in perms: if re.match(perm, load['fun']): good = True if not", "self.mminion.returners[fstr](load['jid'], load['load']) # Format individual return loads for key, item", "# Stand up the master Minion to access returner data", "invalid ''' if 'token' not in load: return False return", "= extra['id'] if 'tgt_type' in load: pub_load['tgt_type'] = load['tgt_type'] if", "dict): return {} if any(key not in load for key", "automatically be rejected. ''' return self.check_signing_file( keyid, self.opts.get('autoreject_file', None) )", "name = self.loadauth.load_name(load) if not (name in self.opts['external_auth'][load['eauth']]) | ('*'", "the minion access to the master ''' def __init__(self, opts):", "used by the LocalClient. ''' extra = load.get('kwargs', {}) #", "for ' 'user {0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) jid =", "= self.loadauth.get_tok(extra['token']) except Exception as exc: log.error( 'Exception occurred when", "message=str(exc))) def mk_token(self, load): ''' Create and return an authentication", "load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer'][match],", "import git_pillar from salt.utils.event import tagify from salt.exceptions import SaltMasterError", "load: return False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cdir", "as fp_: for line in fp_: line = line.strip() if", "if load['eauth'] not in self.opts['external_auth']: # The eauth system is", "authorised, check key and check perms if load.pop('key') != self.key[load['user']]:", "others to write to the file return False # check", "Allow the minion to delete a specific function from its", "# The ClearFuncs object encapsulates the functions that can be", "('id', 'path', 'loc')): return False if not self.opts['file_recv'] or os.path.isabs(load['path']):", "= salt.utils.minions.CkMinions(self.opts) minions = checker.check_minions( load['tgt'], match_type, greedy=False ) for", "self.event.fire_event(event, event['tag']) # old dup event if load.get('pretag') is not", "match_type.lower() == 'compound': match_type = 'compound_pillar_exact' checker = salt.utils.minions.CkMinions(self.opts) minions", "'invalid') == self.key.get('root'): load.pop('key') elif load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning(", "normpath = os.path.normpath(normpath) cpath = os.path.join( self.opts['cachedir'], 'minions', load['id'], 'files',", "= load['path'] if ':' in normpath: # make sure double", "'loc' in load and load['loc'] < 0: log.error('Invalid file pointer:", "exc: log.error( 'Exception occurred in the runner system: {0}'.format(exc) )", "blacklisted for user_re in self.opts['client_acl_blacklist'].get('users', []): if re.match(user_re, load['user']): good", "log it and move on log.error( 'Top function {0} failed", "If we order masters (via a syndic), don't short circuit", "log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: name = self.loadauth.load_name(load) if not", "load.pop('key') != self.key[load['user']]: log.warning( 'Authentication failure of type \"user\" occurred.'", "import salt.utils.verify import salt.utils.minions import salt.utils.gzip_util import salt.utils.jid from salt.pillar", "specific function from its own mine ''' if 'id' not", "self.key.get(load['user']): log.warning( 'Authentication failure of type \"user\" occurred.' ) return", "chown the key file pass keys[user] = key return keys", "self.opts['client_acl']: log.warning( 'Authentication failure of type \"user\" occurred.' ) return", "message = 'Wrong permissions for {0}, ignoring content' log.warn(message.format(signing_file)) return", "'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if load['eauth'] not", "if load['user'] in self.key: # User is authorised, check key", "the master will not be able to ' 'serve files", "not os.path.isfile(auth_file_path): continue if os.path.getmtime(auth_file_path) - time.time() > opts['keep_jobs']: os.remove(auth_file_path)", "logging.getLogger(__name__) # Things to do in lower layers: # only", "in load: ret['out'] = load['out'] self._return(ret) def minion_runner(self, load): '''", "''' autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign') # cleanup expired files expire_minutes", "False if load['jid'] == 'req': # The minion is returning", "if load['tgt_type'].startswith('node'): if load['tgt'] in self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type']", "the master event interface ''' if 'id' not in load:", "in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) data['success']", "from a minion, this method executes minion restrictions so that", "load): ''' Create and return an authentication token, the clear", "'' elif load['user'] == self.opts.get('user', 'root'): if load.pop('key') != self.key[self.opts.get('user',", "salt.key.Key(self.opts) keyapi.delete_key(load['id'], preserve_minions=load.get('preserve_minion_cache', False)) return True class LocalFuncs(object): ''' Set", "= opts def check_permissions(self, filename): ''' Check if the specified", "'mine.p') if not load.get('clear', False): if os.path.isfile(datap): with salt.utils.fopen(datap, 'rb')", "self.opts['external_auth']: # The eauth system is not enabled, fail log.warning('Authentication", "jid, only allowed if the requesting minion also initialted the", "the user is blacklisted for user_re in self.opts['client_acl_blacklist'].get('users', []): if", "runner.run() def pub_ret(self, load, skip_verify=False): ''' Request the return data", "return '' good = self.ckminions.auth_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']]", "'jid': load['jid'], 'ret': load['ret'], } if 'id' in extra: pub_load['id']", "publish_validate=True) if not good: return False return True def _master_opts(self,", "automatically be signed. ''' if self.opts['auto_accept']: return True if self.check_signing_file(keyid,", "self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) # Make", "not os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'mine.p') if not load.get('clear',", "or stat.S_IWOTH & fmode.st_mode): return True return False def check_signing_file(self,", "file server update'.format(exc), exc_info_on_loglevel=logging.DEBUG ) class AutoKey(object): ''' Implement the", "mopts = {} file_roots = {} envs = self._file_envs() for", "if HAS_PWD: try: os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1) except OSError: # The", "'path', 'loc')): return False if not self.opts['file_recv'] or os.path.isabs(load['path']): return", "occurred for ' 'user {0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) jid", "load, skip_verify=False): ''' Return the mine data ''' if not", "cache file {0}: {1}' .format(cache_file, exc) ) def clean_expired_tokens(opts): '''", "if not ((name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']])): msg", "of {0} not present.').format(token['eauth']) return '' if not ((token['name'] in", "token_file: token_data = serializer.loads(token_file.read()) if 'expire' not in token_data or", "if the file # exists, it needs to be written", "fun, exc, load['id'] ) ) return ret def _mine_get(self, load,", ") except Exception: log.critical( 'The specified returner threw a stack", "with permissions 0400 so clients are required to run as", "needs to be placed in the filesystem with permissions 0400", "This module contains all of the routines needed to set", "load if any(key not in load for key in ('return',", "self.mminion.returners[fstr](load['jid'], endtime) fstr = '{0}.returner'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load) def _syndic_return(self, load): '''", "the master job cache ' '\"{0}\" does not have a", ") ) return '' # to make sure we don't", "return {} # Set up the publication payload pub_load =", "if load.get('key', 'invalid') == self.key.get('root'): load.pop('key') elif load.pop('key') != self.key[self.opts.get('user',", "enforces this. os.chmod(keyfile, 0o600) if HAS_PWD: try: os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1)", "type \"other\" occurred.' ) return '' # Retrieve the minions", "= os.path.join( opts['cachedir'], '.{0}_key'.format(user) ) if os.path.exists(keyfile): log.debug('Removing stale keyfile:", "Check a keyid for membership in a signing file '''", "from the job cache ''' # TODO: better way to", "self.check_permissions(signing_file): message = 'Wrong permissions for {0}, ignoring content' log.warn(message.format(signing_file))", "self.opts, states=False, rend=False) self.__setup_fileserver() def __setup_fileserver(self): ''' Set the local", "if no minions # are found if not self.opts.get('order_masters'): #", "not skip_verify and 'id' not in load: return False if", "self.event.fire_ret_load(load) if not self.opts['job_cache'] or self.opts.get('ext_job_cache'): return fstr = '{0}.update_endtime'.format(self.opts['master_job_cache'])", "so clients are required to run as root. ''' users", "for {0}, ignoring content' log.warn(message.format(signing_file)) return False with salt.utils.fopen(signing_file, 'r')", "skip_verify: if 'id' not in load or 'data' not in", "minion is not who it says it is! # We", "= pillar.compile_pillar(pillar_dirs=pillar_dirs) if self.opts.get('minion_data_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])", "for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) good", "log.error( 'Exception occurred when generating auth token: {0}'.format( exc )", "owner only. # Write access is necessary since on subsequent", "dict(error=dict(name='EauthAuthenticationError', message=msg)) jid = salt.utils.jid.gen_jid() fun = load.pop('fun') tag =", "raw routines post validation that make up the minion access", "master ''' def __init__(self, opts): self.opts = opts self.event =", "# Accept find_job so the CLI will function cleanly if", "' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: name", "return self.loadauth.mk_token(load) except Exception as exc: log.error( 'Exception occurred while", "'id')): return False # If the command will make a", "Create the event manager self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'],", "| ('*' in self.opts['external_auth'][token['eauth']])): log.warning('Authentication failure of type \"token\" occurred.", "self.opts.get('ext_job_cache'): return fstr = '{0}.update_endtime'.format(self.opts['master_job_cache']) if (self.opts.get('job_cache_store_endtime') and fstr in", ".*: - .* This configuration will enable all minions to", "external nodes without an id') return {} if not salt.utils.verify.valid_id(self.opts,", "to delete a specific function from its own mine '''", "load: for event in load['events']: self.event.fire_event(event, event['tag']) # old dup", "'id': load['id'], } if 'tmo' in load: try: pub_load['timeout'] =", "'Exception occurred while authenticating: {0}'.format(exc) ) return '' good =", "os.path.join( opts['cachedir'], 'file_lists', '{0}fs'.format(backend) ) try: file_lists_caches = os.listdir(file_lists_dir) except", "against eauth provider: {0}').format( self.opts['external_auth']) return '' good = self.ckminions.auth_check(", "0400 so clients are required to run as root. '''", "return False return True def _file_recv(self, load): ''' Allows minions", "wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) data['success'] =", "True elif stat.S_IWGRP & fmode.st_mode: return False # check if", "publish commands. # # In short, check with <NAME> before", "ImportError: return pillargitfs parts = opts_dict['git'].strip().split() try: br = parts[0]", "if not fileserver.servers: log.error( 'No fileservers loaded, the master will", "if 'jid' in minion: ret['__jid__'] = minion['jid'] data['ret'] = data.pop('return')", "def _pillar(self, load): ''' Return the pillar data for the", "'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if not self.loadauth.time_auth(load): msg =", "from salt.pillar import git_pillar from salt.utils.event import tagify from salt.exceptions", "minion to execute ''' # Verify that the load is", "return {} ret = {} if not salt.utils.verify.valid_id(self.opts, load['id']): return", "occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if token['eauth'] not in self.opts['external_auth']:", "''' def __init__(self, opts): self.opts = opts self.event = salt.utils.event.get_event(", "load['id'] ) ) return ret def _mine_get(self, load, skip_verify=False): '''", "occurred.' ) return '' elif load['user'] == 'root': if load.pop('key')", "except OSError: continue for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'): cache_file =", "signing file ''' if not signing_file or not os.path.exists(signing_file): return", "salt.wheel.Wheel(opts) def runner(self, load): ''' Send a master control function", "in load or 'fun' not in load: return False if", "since on subsequent runs, if the file # exists, it", "= os.path.join(cdir, 'data.p') tmpfh, tmpfname = tempfile.mkstemp(dir=cdir) os.close(tmpfh) with salt.utils.fopen(tmpfname,", "return {'ret': { 'jid': load['jid'], 'minions': minions }, 'pub': pub_load", "> 0: min_time = time.time() - (60 * int(expire_minutes)) for", "with salt.utils.fopen(token_path) as token_file: token_data = serializer.loads(token_file.read()) if 'expire' not", "The eauth system is not enabled, fail log.warning('Authentication failure of", "master_tops interfaces opts = {} grains = {} ret =", "load['tgt_type'].startswith('node'): if load['tgt'] in self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] =", "user_re in self.opts['client_acl_blacklist'].get('users', []): if re.match(user_re, load['user']): good = False", "os.path.dirname(cpath) if not os.path.isdir(cdir): try: os.makedirs(cdir) except os.error: pass if", "data = minion if 'jid' in minion: ret['__jid__'] = minion['jid']", "key # Create the event manager self.event = salt.utils.event.get_event( 'master',", "fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The specified", "'peer_run' not in self.opts: return {} if not isinstance(self.opts['peer_run'], dict):", "pwnam[3] groups = salt.utils.get_gid_list(user, include_default=False) except KeyError: log.error( 'Failed to", "and ('tag' not in load or 'data' not in load):", "to be written to again. Windows enforces this. os.chmod(keyfile, 0o600)", "token_data or token_data.get('expire', 0) < time.time(): try: os.remove(token_path) except (IOError,", "if the specified keyid should automatically be signed. ''' if", "ret = self.wheel_.call_func(fun, **load) data['return'] = ret data['success'] = True", "tag, 'data': data} except Exception as exc: log.error('Exception occurred while", "salt libs import salt.crypt import salt.utils import salt.client import salt.payload", ".format(opts_dict['git']) ) else: pillargitfs.append( git_pillar.GitPillar( br, loc, opts ) )", "make a recursive publish don't run if re.match('publish.*', load['fun']): return", "master file cache ''' if any(key not in load for", "if 'mine_get' in self.opts: # If master side acl defined.", "self.opts['file_recv_max_size'] if 'loc' in load and load['loc'] < 0: log.error('Invalid", "the master job cache has a clean_old_jobs, call it fstr", "an event from the minion and fire it on the", "self.event.fire_event(new_job_load, 'new_job') # old dup event self.event.fire_event(new_job_load, tagify([load['jid'], 'new'], 'job'))", "OSError as exc: log.critical( 'Unable to clear env cache file", "fs_ = salt.fileserver.Fileserver(self.opts) self._serve_file = fs_.serve_file self._file_hash = fs_.file_hash self._file_list", "# Make a client self.local = salt.client.get_local_client(mopts=self.opts) # Make an", "minions list minions = self.ckminions.check_minions( load['tgt'], load.get('tgt_type', 'glob') ) #", "if load.get('form', '') == 'full': data = minion if 'jid'", "{0}'.format( exc) log.error(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if not token: msg", "return '' try: name = self.loadauth.load_name(extra) if not ((name in", "self.opts['external_auth'][extra['eauth']][name] if name in self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type',", "we order masters (via a syndic), don't short circuit if", "{} # Set up the publication payload pub_load = {", "pillar=load.get('pillar_override', {})) pillar_dirs = {} data = pillar.compile_pillar(pillar_dirs=pillar_dirs) if self.opts.get('minion_data_cache',", "# Retrieve the minions list minions = self.ckminions.check_minions( load['tgt'], load.get('tgt_type',", "fp_: fdata = self.serial.load(fp_).get(load['fun']) if fdata: ret[minion] = fdata except", "= os.path.join( opts['cachedir'], 'file_lists', '{0}fs'.format(backend) ) try: file_lists_caches = os.listdir(file_lists_dir)", "filesystem with permissions 0400 so clients are required to run", "= self.opts['jinja_trim_blocks'] return mopts def _ext_nodes(self, load, skip_verify=False): ''' Return", "in os.walk(opts['token_dir']): for token in filenames: token_path = os.path.join(dirpath, token)", "return True return False class RemoteFuncs(object): ''' Funcitons made available", "return True def _pillar(self, load): ''' Return the pillar data", "not in self.opts: return {} if not isinstance(self.opts['peer_run'], dict): return", "good = self.ckminions.wheel_check( self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][token['eauth']]['*'],", "log.warning( 'Authentication failure of type \"user\" occurred.' ) return ''", "to do another # way that won't have a negative", "delete a specific function from its own mine ''' if", "'minions_autosign') # cleanup expired files expire_minutes = self.opts.get('autosign_expire_minutes', 10) if", "A token was passed, check it try: token = self.loadauth.get_tok(extra['token'])", "= os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try: with salt.utils.fopen(datap, 'rb') as", "for use only from the local system ''' # The", "key for local communication'.format( user ) ) if HAS_PWD: if", "(self.opts.get('job_cache_store_endtime') and fstr in self.mminion.returners): self.mminion.returners[fstr](load['jid'], endtime) fstr = '{0}.returner'.format(self.opts['master_job_cache'])", "salt.utils import salt.client import salt.payload import salt.pillar import salt.state import", "continue if os.path.getmtime(auth_file_path) - time.time() > opts['keep_jobs']: os.remove(auth_file_path) except (IOError,", "data else: ret[minion['id']] = minion['return'] if 'jid' in minion: ret['__jid__']", "to listen to it! log.warn( 'Minion id {0} is not", "function back to the runner system ''' if 'token' in", "import tagify from salt.exceptions import SaltMasterError # Import 3rd-party libs", "extra: pub_load['id'] = extra['id'] if 'tgt_type' in load: pub_load['tgt_type'] =", "= self.ckminions.wheel_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun'])", "_auth def __init__(self, opts, key): self.opts = opts self.serial =", "if not self.loadauth.time_auth(extra): log.warning( 'Authentication failure of type \"eauth\" occurred.'", "configuration will enable all minions to execute all commands. peer:", "external auth calls if extra.get('token', False): # A token was", "= self.ckminions.runner_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun'])", "expire_minutes = self.opts.get('autosign_expire_minutes', 10) if expire_minutes > 0: min_time =", "in self.opts['external_auth']: log.warning('Authentication failure of type \"token\" occurred. \\ Authentication", "and rejection ''' def __init__(self, opts): self.opts = opts def", "the runner's function data ''' if 'peer_run' not in self.opts:", "'root'): if load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication failure of", "load['grains'], load['id'], load.get('saltenv', load.get('env')), load.get('ext'), self.mminion.functions, pillar=load.get('pillar_override', {})) pillar_dirs =", "Minion to access returner data self.mminion = salt.minion.MasterMinion( self.opts, states=False,", "minion, this method executes minion restrictions so that the minion", "load['loc']: fp_.seek(load['loc']) fp_.write(load['data']) return True def _pillar(self, load): ''' Return", "module contains all of the routines needed to set up", "can sudo, allow them to act as root if load.get('key',", "self.mminion.functions, pillar=load.get('pillar_override', {})) pillar_dirs = {} data = pillar.compile_pillar(pillar_dirs=pillar_dirs) if", "self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication failure of type \"user\" occurred.' )", "KeyError: log.critical( 'The specified returner used for the master job", "used when the master starts ''' pillargitfs = [] for", "self.local.get_cache_returns(load['jid']) def minion_pub(self, load): ''' Publish a command initiated from", "ret['__jid__'] = minion['jid'] data['ret'] = data.pop('return') ret[minion['id']] = data else:", "failure of type \"token\" occurred. \\ Token could not be", "\"user\" ' 'occurred.' ) return '' else: log.warning( 'Authentication failure", "to salt functions, so the minions can only publish allowed", "def check_autosign(self, keyid): ''' Checks if the specified keyid should", "load['jid'], 'ret': load['ret'], } if 'id' in extra: pub_load['id'] =", "''' Clean out the old fileserver backends ''' # Clear", "''' Allow the minion to delete a specific function from", "tokenized user = \"{0}\"'.format(load['user'])) elif 'eauth' in extra: if extra['eauth']", "''' if salt.utils.is_windows(): return True # After we've ascertained we're", "expire_minutes > 0: min_time = time.time() - (60 * int(expire_minutes))", "False break # check if the cmd is blacklisted for", "all configured master_tops interfaces opts = {} grains = {}", "# This is the list of funcs/modules! if isinstance(self.opts['peer'][match], list):", "self.opts, load['grains'], load['id'], load.get('saltenv', load.get('env')), load.get('ext'), self.mminion.functions, pillar=load.get('pillar_override', {})) pillar_dirs", "not in load: msg = ('Authentication failure of type \"eauth\"", "not verify against eauth provider: {0}').format( self.opts['external_auth']) return '' good", "this class includes the raw routines post validation that make", "os.path.join(auth_cache, load['jid']) with salt.utils.fopen(jid_fn, 'r') as fp_: if not load['id']", "blacklist/whitelist good = True # Check if the user is", "except Exception: log.critical( 'The specified returner threw a stack trace:\\n',", "uid = pwnam[2] gid = pwnam[3] groups = salt.utils.get_gid_list(user, include_default=False)", "rejected. ''' return self.check_signing_file( keyid, self.opts.get('autoreject_file', None) ) def check_autosign(self,", "not in load: log.error('Received call for external nodes without an", "# The master is not being run as root and", "False self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data}", "any(key not in load for key in ('fun', 'arg', 'tgt',", "pub_load['expr_form'] = load['tgt_type'] else: return {} else: pub_load['expr_form'] = load['tgt_type']", "the filesystem with permissions 0400 so clients are required to", "else: pub_load['expr_form'] = load['tgt_type'] ret = {} ret['jid'] = self.local.cmd_async(**pub_load)", "= os.path.getmtime(stub_file) if mtime < min_time: log.warn('Autosign keyid expired {0}'.format(stub_file))", "to delete all of its own mine contents ''' if", "False)) return True class LocalFuncs(object): ''' Set up methods for", "salt.auth.LoadAuth(opts) # Stand up the master Minion to access returner", "mk_token(self, load): ''' Create and return an authentication token, the", "master elif 'user' in load: if load['user'].startswith('sudo_'): # If someone", "# cleanup expired files expire_minutes = self.opts.get('autosign_expire_minutes', 10) if expire_minutes", "pub_load['timeout'] = int(load['timeout']) except ValueError: msg = 'Failed to parse", "results from an external node classifier if one is specified", "in filenames: token_path = os.path.join(dirpath, token) with salt.utils.fopen(token_path) as token_file:", "def _master_opts(self, load): ''' Return the master options to the", "that make up the minion access to the master '''", "checker object self.ckminions = salt.utils.minions.CkMinions(opts) # Make an Auth object", "load['path'] if ':' in normpath: # make sure double backslashes", "(name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']]): msg = ('Authentication", "parse timeout value: {0}'.format( load['timeout']) log.warn(msg) return {} if 'tgt_type'", "= self.ckminions.auth_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun'],", "import salt.utils.jid from salt.pillar import git_pillar from salt.utils.event import tagify", "'timeout' in load: try: pub_load['timeout'] = int(load['timeout']) except ValueError: msg", "as fp_: fp_.write( self.serial.dumps( {'grains': load['grains'], 'pillar': data}) ) #", "the old jobs from the job cache ''' # TODO:", "elif self.opts.get('permissive_pki_access', False) \\ and fmode.st_gid in groups: return True", "look like this: peer: .*: - .* This configuration will", "and the needed authentication creds. ''' if 'eauth' not in", "funs_to_check: if re.match(module_re, fun): good = False break if good", "Accept find_job so the CLI will function cleanly if load['fun']", "time import stat import tempfile # Import salt libs import", "libs import salt.crypt import salt.utils import salt.client import salt.payload import", "Check the permissions for this minion perms = [] for", "= salt.utils.jid.gen_jid() fun = load.pop('fun') tag = tagify(jid, prefix='wheel') data", "= fs_.file_list self._file_list_emptydirs = fs_.file_list_emptydirs self._dir_list = fs_.dir_list self._symlink_list =", "'Authentication failure of type \"user\" occurred.' ) return '' good", "return False keyapi = salt.key.Key(self.opts) keyapi.delete_key(load['id'], preserve_minions=load.get('preserve_minion_cache', False)) return True", "external pillar data: {0}' .format(opts_dict['git']) ) else: pillargitfs.append( git_pillar.GitPillar( br,", "'new_job') # old dup event self.event.fire_event(new_job_load, tagify([load['jid'], 'new'], 'job')) #", "} if 'tgt_type' in load: if load['tgt_type'].startswith('node'): if load['tgt'] in", "data self.tops = salt.loader.tops(self.opts) # Make a client self.local =", "def minion_publish(self, load): ''' Publish a command initiated from a", "minion['return'] if 'jid' in minion: ret['__jid__'] = minion['jid'] for key,", "absolute_import # Import python libs import fnmatch import logging import", "# The eauth system is not enabled, fail log.warning('Authentication failure", "os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, load['jid']) with salt.utils.fopen(jid_fn, 'r') as", "def minion_runner(self, load): ''' Execute a runner from a minion,", "in load for key in ('fun', 'arg', 'tgt', 'ret', 'id')):", "in load['arg']: arg_.append(arg.split()) load['arg'] = arg_ good = self.ckminions.auth_check( perms,", "try: user = pwd.getpwnam(user).pw_name except KeyError: log.error('ACL user {0} is", "and the workers needed by the master. ''' from __future__", "((token['name'] in self.opts['external_auth'][token['eauth']]) | ('*' in self.opts['external_auth'][token['eauth']])): log.warning('Authentication failure of", "a stack trace:\\n', exc_info=True ) # always write out to", "call for external nodes without an id') return {} if", "minions' mine ''' if not skip_verify: if any(key not in", "enable all minions to execute all commands. peer: foo.example.com: -", "_syndic_return(self, load): ''' Receive a syndic minion return and format", "object encapsulates the functions that can be executed in #", "load: return False keyapi = salt.key.Key(self.opts) keyapi.delete_key(load['id'], preserve_minions=load.get('preserve_minion_cache', False)) return", "= self.loadauth.get_tok(load['token']) except Exception as exc: msg = 'Exception occurred", "= fs_.serve_file self._file_hash = fs_.file_hash self._file_list = fs_.file_list self._file_list_emptydirs =", "} if 'tmo' in load: try: pub_load['timeout'] = int(load['tmo']) except", "0') return False if len(load['data']) + load.get('loc', 0) > file_recv_max_size:", "system ''' if 'token' in load: try: token = self.loadauth.get_tok(load['token'])", "if os.path.isfile(datap): try: os.remove(datap) except OSError: return False return True", "'data': data} except Exception as exc: log.error( 'Exception occurred in", "backends available') fileserver.update() except Exception as exc: log.error( 'Exception {0}", "OSError: continue for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'): cache_file = os.path.join(file_lists_dir,", "__init__(self, opts): self.opts = opts def check_permissions(self, filename): ''' Check", "in load or 'data' not in load: return False if", "= '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False)) # save the load,", "{} perms = set() for match in self.opts['peer_run']: if re.match(match,", "load['ret'], 'id': load['id'], } if 'tmo' in load: try: pub_load['timeout']", ") return '' except Exception as exc: log.error( 'Exception occurred", "user {0} is not available'.format(user)) continue keyfile = os.path.join( opts['cachedir'],", "root, dirs, filenames in os.walk(autosign_dir): for f in filenames: stub_file", "minions to send files to the master, files are sent", "in load for key in ('return', 'jid', 'id')): return False", "load: try: pub_load['timeout'] = int(load['tmo']) except ValueError: msg = 'Failed", "a master control function back to the runner system '''", "retrieved.') return '' if token['eauth'] not in self.opts['external_auth']: log.warning('Authentication failure", "= { 'fun': load['fun'], 'arg': load['arg'], 'expr_form': load.get('tgt_type', 'glob'), 'tgt':", "return True class LocalFuncs(object): ''' Set up methods for use", "'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.wheel_check(", "in self.mminion.returners): self.mminion.returners[fstr](load['jid'], endtime) fstr = '{0}.returner'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load) def _syndic_return(self,", "check_signing_file(self, keyid, signing_file): ''' Check a keyid for membership in", "extra.get('token', False): # A token was passed, check it try:", "load.get('key', 'invalid') == self.key.get('root'): load.pop('key') elif load.pop('key') != self.key[self.opts.get('user', 'root')]:", "keyid for membership in a signing file ''' if not", "salt.utils.event import tagify from salt.exceptions import SaltMasterError # Import 3rd-party", "{0}. The user is not ' 'available.\\n'.format( user ) )", "if fdata: ret[minion] = fdata except Exception: continue return ret", "except Exception as exc: log.error( 'Exception {0} occurred in file", "= load.get('expr_form', 'glob') if match_type.lower() == 'pillar': match_type = 'pillar_exact'", "occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][token['eauth']][token['name']] if", "needed to set up a master server, this involves preparing", "ret def _mine_get(self, load, skip_verify=False): ''' Gathers the data from", "listen=False) # Make a client self.local = salt.client.get_local_client(mopts=self.opts) # Make", "env cache file {0}: {1}' .format(env_cache, exc) ) file_lists_dir =", "if the requesting minion also initialted the execution. ''' if", "return False if os.path.isabs(load['path']) or '../' in load['path']: # Can", "good = False for perm in perms: if re.match(perm, load['fun']):", "log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][load['eauth']][name] if name", "good = self.ckminions.auth_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'],", "system ''' # The ClearFuncs object encapsulates the functions that", ") # Altering the contents of the publish load is", "want to do another # way that won't have a", "ext pillar caches, used when the master starts ''' pillargitfs", "False return True def _master_opts(self, load): ''' Return the master", "'jid' in minion: ret['__jid__'] = minion['jid'] for key, val in", "{0}'.format( load['timeout']) log.warn(msg) return {} if 'tgt_type' in load: if", "'rb') as fp_: mine_data = self.serial.load(fp_) if isinstance(mine_data, dict): if", "list): perms.update(self.opts['mine_get'][match]) if not any(re.match(perm, load['fun']) for perm in perms):", "in load for key in ('jid', 'id')): return {} else:", "= os.umask(191) with salt.utils.fopen(keyfile, 'w+') as fp_: fp_.write(key) os.umask(cumask) #", "if not os.path.exists(stub_file): return False os.remove(stub_file) return True def check_autoreject(self,", "{0}: {1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) if 'eauth' not", "blacklisted for module_re in self.opts['client_acl_blacklist'].get('modules', []): # if this is", "the external job cache self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False)", "in self.opts['external_auth'][extra['eauth']])): log.warning( 'Authentication failure of type \"eauth\" occurred.' )", "fp_: fp_.write(self.serial.dumps(mine_data)) except OSError: return False return True def _mine_flush(self,", "it if any(key not in load for key in ('return',", "not token: msg = 'Authentication failure of type \"token\" occurred.'", "even think about # touching this stuff, we can probably", "module ''' if not self.__verify_minion_publish(load): return {} # Set up", "log.warning('Authentication failure of type \"token\" occurred. \\ Token could not", "match in self.opts['mine_get']: if re.match(match, load['id']): if isinstance(self.opts['mine_get'][match], list): perms.update(self.opts['mine_get'][match])", "load['fun'].split(',') arg_ = [] for arg in load['arg']: arg_.append(arg.split()) load['arg']", "will only allow the minion foo.example.com to execute commands from", "through eauth if 'token' in load: try: token = self.loadauth.get_tok(load['token'])", "up the minion access to the master ''' def __init__(self,", "Windows paths normpath = load['path'] if ':' in normpath: #", "of type \"user\" occurred.' ) return '' elif load['user'] ==", "'fun': load['fun'], 'arg': load['arg'], 'expr_form': load.get('tgt_type', 'glob'), 'tgt': load['tgt'], 'ret':", "to the minions, it can only be used by the", "log.error( 'Exceeding file_recv_max_size limit: {0}'.format( file_recv_max_size ) ) return False", "'w+') as fp_: fp_.write(key) os.umask(cumask) # 600 octal: Read and", "load['jid']) with salt.utils.fopen(jid_fn, 'r') as fp_: if not load['id'] ==", "{0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def mk_token(self, load): '''", "True def _mine_flush(self, load, skip_verify=False): ''' Allow the minion to", "ImportError: # pwd is not available on windows HAS_PWD =", "= False for perm in perms: if re.match(perm, load['fun']): good", "foo.example.com to execute commands from the test module ''' if", "is serious!! Changes here # break compatibility with minion/master versions", "is valid if 'peer' not in self.opts: return False if", "'id')): return False if load['jid'] == 'req': # The minion", "= salt.client.get_local_client(mopts=self.opts) # Make an minion checker object self.ckminions =", "in self.opts['external_auth'][load['eauth']])): log.warning('Authentication failure of type \"eauth\" occurred.') return ''", "if not self.check_permissions(signing_file): message = 'Wrong permissions for {0}, ignoring", "'mine.p') if os.path.isfile(datap): try: with salt.utils.fopen(datap, 'rb') as fp_: mine_data", "== 'pillar': match_type = 'pillar_exact' if match_type.lower() == 'compound': match_type", "tag) return True def _return(self, load): ''' Handle the return", "Exception: log.critical( 'The specified returner threw a stack trace:\\n', exc_info=True", "ret match_type = load.get('expr_form', 'glob') if match_type.lower() == 'pillar': match_type", "key in ('id', 'tgt', 'fun')): return {} if 'mine_get' in", "minion to delete a specific function from its own mine", "not ((name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']])): msg =", "of type \"eauth\" occurred.') return '' try: name = self.loadauth.load_name(load)", "# User is authorised, check key and check perms if", "!= self.key[salt.utils.get_user()]: log.warning( 'Authentication failure of type \"other\" occurred.' )", "minion, 'mine.p') try: with salt.utils.fopen(mine, 'rb') as fp_: fdata =", "eauth system is not enabled, fail log.warning('Authentication failure of type", "file_lists_dir = os.path.join( opts['cachedir'], 'file_lists', '{0}fs'.format(backend) ) try: file_lists_caches =", "= arg_ good = self.ckminions.auth_check( perms, load['fun'], load['tgt'], load.get('tgt_type', 'glob'),", "system is not enabled, fail msg = ('Authentication failure of", "not enabled, fail log.warning('Authentication failure of type \"eauth\" occurred.') return", "self.loadauth.time_auth(load): log.warning('Authentication failure of type \"eauth\" occurred.') return '' return", "def _syndic_return(self, load): ''' Receive a syndic minion return and", "'introspecting {0}: {1}'.format(fun, exc)) data['return'] = 'Exception occurred in wheel", "salt.client import salt.payload import salt.pillar import salt.state import salt.runner import", "log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if load['eauth'] not in self.opts['external_auth']: #", "function if isinstance(load['fun'], str): funs_to_check = [load['fun']] # if this", "OSError: return False return True def _mine_flush(self, load, skip_verify=False): '''", "isinstance(self.opts['mine_get'], dict): return {} perms = set() for match in", "in fp_: line = line.strip() if line.startswith('#'): continue else: if", "load.get('kwargs', {}) # check blacklist/whitelist good = True # Check", "elif load['user'] == 'root': if load.pop('key') != self.key.get(self.opts.get('user', 'root')): log.warning(", "pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound' pub_load['expr_form'] = load['tgt_type'] else:", "we can probably do what you want to do another", "from a specific jid, only allowed if the requesting minion", "dict for loading external top data self.tops = salt.loader.tops(self.opts) #", "Import python libs import fnmatch import logging import os import", "cache ''' if any(key not in load for key in", "if 'eauth' not in load: msg = ('Authentication failure of", "'Unable to file_lists cache file {0}: {1}' .format(cache_file, exc) )", "fun): good = False break if good is False: log.error(", "users: try: user = pwd.getpwnam(user).pw_name except KeyError: log.error('ACL user {0}", "return '' if not ((token['name'] in self.opts['external_auth'][token['eauth']]) | ('*' in", "with a token or False if the token is invalid", "self.opts['state_top'] mopts['nodegroups'] = self.opts['nodegroups'] mopts['state_auto_order'] = self.opts['state_auto_order'] mopts['state_events'] = self.opts['state_events']", "key return keys def fileserver_update(fileserver): ''' Update the fileserver backends,", "salt.loader.tops(self.opts) # Make a client self.local = salt.client.get_local_client(mopts=self.opts) # Create", "= tempfile.mkstemp(dir=cdir) os.close(tmpfh) with salt.utils.fopen(tmpfname, 'w+b') as fp_: fp_.write( self.serial.dumps(", "so the minions can only publish allowed salt functions The", "item in six.iteritems(load['return']): ret = {'jid': load['jid'], 'id': key, 'return':", "check if writable by group or other if not (stat.S_IWGRP", "foo.example.com: - test.* This configuration will only allow the minion", "= 'Failed to parse timeout value: {0}'.format( load['timeout']) log.warn(msg) return", "load.pop('key') != self.key.get(self.opts.get('user', 'root')): log.warning( 'Authentication failure of type \"user\"", "skip_verify: if any(key not in load for key in ('id',", "initiated from a minion, this method executes minion restrictions so", "good = self.ckminions.auth_check( perms, load['fun'], load['tgt'], load.get('tgt_type', 'glob'), publish_validate=True) if", "The master is not being run as root and can", "on master elif 'user' in load: if load['user'].startswith('sudo_'): # If", "Exception as exc: log.error( 'Exception {0} occurred in file server", "workers needed by the master. ''' from __future__ import absolute_import", "''' # Generate EndTime endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid()) # If the", "= new with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(load['data'])) return True", "load['user']): good = False break # check if the cmd", "fs_.file_hash self._file_list = fs_.file_list self._file_list_emptydirs = fs_.file_list_emptydirs self._dir_list = fs_.dir_list", "import SaltMasterError # Import 3rd-party libs import salt.ext.six as six", "self.ckminions.auth_check( perms, load['fun'], load['tgt'], load.get('tgt_type', 'glob'), publish_validate=True) if not good:", "'' # Retrieve the minions list minions = self.ckminions.check_minions( load['tgt'],", "= [] for arg in load['arg']: arg_.append(arg.split()) load['arg'] = arg_", "states=False, rend=False, ) # If the master job cache has", "os.path.isabs(load['path']): return False if os.path.isabs(load['path']) or '../' in load['path']: #", "'rb') as fp_: new = self.serial.load(fp_) if isinstance(new, dict): new.update(load['data'])", "timeout value: {0}'.format( load['tmo']) log.warn(msg) return {} if 'timeout' in", "of type \"user\" occurred.' ) return '' good = self.ckminions.auth_check(", "self.serial.load(fp_) if isinstance(new, dict): new.update(load['data']) load['data'] = new with salt.utils.fopen(datap,", "save the load, since we don't have it saveload_fstr =", "in six.iteritems(self.local.get_cache_returns(ret['__jid__'])): if key not in ret: ret[key] = val", "'load': { 'jid': None, 'minions': minions } } # Retrieve", "don't have it saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[saveload_fstr](load['jid'], load) log.info('Got return", "return True if self.check_signing_file(keyid, self.opts.get('autosign_file', None)): return True if self.check_autosign_dir(keyid):", "'data': data} if 'eauth' not in load: msg = ('Authentication", "log.critical( 'Unable to clear env cache file {0}: {1}' .format(env_cache,", "pass def clean_pub_auth(opts): try: auth_cache = os.path.join(opts['cachedir'], 'publish_auth') if not", "object self.loadauth = salt.auth.LoadAuth(opts) # Stand up the master Minion", "'minions': minions } } # Retrieve the jid if not", "msg = 'Authentication failure of type \"token\" occurred.' log.warning(msg) return", "fileserver_update(fileserver): ''' Update the fileserver backends, requires that a built", "os.path.isfile(datap): with salt.utils.fopen(datap, 'rb') as fp_: new = self.serial.load(fp_) if", "minions to be matched to salt functions, so the minions", "{} if 'timeout' in load: try: pub_load['timeout'] = int(load['timeout']) except", "= os.path.dirname(cpath) if not os.path.isdir(cdir): try: os.makedirs(cdir) except os.error: pass", "for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try:", "''' Check if the specified filename has correct permissions '''", "name = self.loadauth.load_name(load) if not ((name in self.opts['external_auth'][load['eauth']]) | ('*'", "good = self.ckminions.auth_check( self.opts['client_acl'][load['user']], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not", "log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if not self.loadauth.time_auth(load): msg = ('Authentication", "fmode.st_gid in groups: return True else: if stat.S_IWOTH & fmode.st_mode:", "Set the local file objects from the file server interface", "with salt.utils.fopen(mine, 'rb') as fp_: fdata = self.serial.load(fp_).get(load['fun']) if fdata:", "list): perms.update(self.opts['peer_run'][match]) good = False for perm in perms: if", "if 'tgt_type' in load: pub_load['tgt_type'] = load['tgt_type'] if 'to' in", "in self.opts['external_auth'][token['eauth']])): log.warning('Authentication failure of type \"token\" occurred. \\ Token", "salt.utils.get_user(): if load.pop('key') != self.key.get(load['user']): log.warning( 'Authentication failure of type", "envs: if saltenv not in file_roots: file_roots[saltenv] = [] mopts['file_roots']", "methods to run auto key acceptance and rejection ''' def", "arg in load['arg']: arg_.append(arg.split()) load['arg'] = arg_ good = self.ckminions.auth_check(", "data from a specific jid, only allowed if the requesting", "message=msg)) try: fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async(", "in ' 'error.\\n'.format( user=load['user'], function=load['fun'] ) ) return '' #", "{0}, ignoring content' log.warn(message.format(signing_file)) return False with salt.utils.fopen(signing_file, 'r') as", "new with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(load['data'])) return True def", "if 'token' not in load: return False return self.loadauth.get_tok(load['token']) def", "= load['user'] else: log.info( 'Published command {fun} with jid {jid}'.format(", "publish allowed salt functions The config will look like this:", "token['eauth'] not in self.opts['external_auth']: log.warning('Authentication failure of type \"token\" occurred.", "not be able to ' 'serve files to minions' )", "os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, load['jid']) with salt.utils.fopen(jid_fn, 'r') as fp_:", "'' good = self.ckminions.auth_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else", "of the # publish commands. # # In short, check", "os.path.join( opts['cachedir'], '.{0}_key'.format(user) ) if os.path.exists(keyfile): log.debug('Removing stale keyfile: {0}'.format(keyfile))", "publish (The publish from the LocalClient) # _auth def __init__(self,", "if fstr in mminion.returners: mminion.returners[fstr]() def access_keys(opts): ''' A key", "not signing_file or not os.path.exists(signing_file): return False if not self.check_permissions(signing_file):", "# Make a client self.local = salt.client.get_local_client(mopts=self.opts) # Create the", "for key in ('return', 'jid', 'id')): return None # if", "''' Check a keyid for membership in a autosign directory.", "opts.get('ext_pillar', [])]: if 'git' in opts_dict: try: import git except", "in load['path']: # Can overwrite master files!! return False if", "len(load['data']) + load.get('loc', 0) > file_recv_max_size: log.error( 'Exceeding file_recv_max_size limit:", "if not isinstance(self.opts['peer'], dict): return False if any(key not in", "of type \"eauth\" occurred.' ) return '' if not self.loadauth.time_auth(extra):", "in the runner system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc)))", "} # Announce the job on the event bus self.event.fire_event(new_job_load,", "load['id']], 'job')) self.event.fire_ret_load(load) if not self.opts['job_cache'] or self.opts.get('ext_job_cache'): return fstr", "as exc: log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc))", "= salt.runner.RunnerClient(self.opts) return runner_client.async(fun, load.get('kwarg', {}), load.get('username', 'UNKNOWN')) except Exception", "publications to the minions, it can only be used by", "for user_re in self.opts['client_acl_blacklist'].get('users', []): if re.match(user_re, load['user']): good =", "os.path.isfile(datap): try: with salt.utils.fopen(datap, 'rb') as fp_: mine_data = self.serial.load(fp_)", "files!! return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False file_recv_max_size", "new = self.serial.load(fp_) if isinstance(new, dict): new.update(load['data']) load['data'] = new", "for event in load['events']: self.event.fire_event(event, event['tag']) # old dup event", "exc: log.error( 'Exception {0} occurred in file server update'.format(exc), exc_info_on_loglevel=logging.DEBUG", "of type \"eauth\" occurred.') return '' return self.loadauth.mk_token(load) except Exception", "file_roots if load.get('env_only'): return mopts mopts['renderer'] = self.opts['renderer'] mopts['failhard'] =", "self.loadauth.mk_token(load) except Exception as exc: log.error( 'Exception occurred while authenticating:", "manager self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) #", "!= self.key.get(load['user']): log.warning( 'Authentication failure of type \"user\" occurred.' )", "mine_data = self.serial.load(fp_) if isinstance(mine_data, dict): if mine_data.pop(load['fun'], False): with", "'token' in load: try: token = self.loadauth.get_tok(load['token']) except Exception as", "return pillargitfs parts = opts_dict['git'].strip().split() try: br = parts[0] loc", "= {} grains = {} ret = {} if 'opts'", "load['load']) # Format individual return loads for key, item in", "(stat.S_IWGRP & fmode.st_mode or stat.S_IWOTH & fmode.st_mode): return True return", "in self.opts['client_acl_blacklist'].get('users', []): if re.match(user_re, load['user']): good = False break", "not isinstance(self.opts['peer'], dict): return False if any(key not in load", "in ('git', 'hg', 'svn'): if backend in opts['fileserver_backend']: env_cache =", "self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} if", "Changes here # break compatibility with minion/master versions and even", "return False if not self.check_permissions(signing_file): message = 'Wrong permissions for", "# Clear remote fileserver backend caches so they get recreated", "RemoteFuncs(object): ''' Funcitons made available to minions, this class includes", "in ('id', 'grains')): return False pillar = salt.pillar.Pillar( self.opts, load['grains'],", "'user {0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) try: fun = load.pop('fun')", "the LocalClient. ''' extra = load.get('kwargs', {}) # check blacklist/whitelist", "who it says it is! # We don't want to", ") # If we order masters (via a syndic), don't", "the master, files are sent to the master file cache", "We don't want to listen to it! log.warn( 'Minion id", "(dirpath, dirnames, filenames) in os.walk(auth_cache): for auth_file in filenames: auth_file_path", "= data.pop('return') ret[minion['id']] = data else: ret[minion['id']] = minion['return'] if", "= self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks'] return mopts def _ext_nodes(self, load,", "'new'], 'job')) # Save the invocation information if self.opts['ext_job_cache']: try:", "occurred when generating auth token: {0}'.format( exc) log.error(msg) return dict(error=dict(name='TokenAuthenticationError',", "in load: pub_load['tgt_type'] = load['tgt_type'] if 'to' in load: pub_load['to']", "import fnmatch import logging import os import re import time", "for token in filenames: token_path = os.path.join(dirpath, token) with salt.utils.fopen(token_path)", "''' if not skip_verify and any(key not in load for", "= salt.utils.minions.CkMinions(opts) # Create the tops dict for loading external", "load['data'] = new with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(load['data'])) return", "'../' in load['path']: # Can overwrite master files!! return False", "salt.utils.fopen(cpath, mode) as fp_: if load['loc']: fp_.seek(load['loc']) fp_.write(load['data']) return True", "in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']])): msg = ('Authentication failure", "'pillar': data}) ) # On Windows, os.rename will fail if", "filenames: auth_file_path = os.path.join(dirpath, auth_file) if not os.path.isfile(auth_file_path): continue if", "dict(error=dict(name='TokenAuthenticationError', message=msg)) try: fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return", "= True self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data':", "out the old fileserver backends ''' # Clear remote fileserver", "tokens from the master ''' serializer = salt.payload.Serial(opts) for (dirpath,", "salt.runner import salt.auth import salt.wheel import salt.minion import salt.search import", "mopts['state_aggregate'] = self.opts['state_aggregate'] mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks'] return", "{} if 'mine_get' in self.opts: # If master side acl", "for key, val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])): if key not in ret:", "does not have permissions to run {function}. Please ' 'contact", "# check if writable by group or other if not", "False if 'events' in load: for event in load['events']: self.event.fire_event(event,", "not in load: return False keyapi = salt.key.Key(self.opts) keyapi.delete_key(load['id'], preserve_minions=load.get('preserve_minion_cache',", "own key ''' if 'id' not in load: return False", "mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks'] return mopts def _ext_nodes(self, load, skip_verify=False): '''", "and fire it on the master event interface ''' if", "this is in ' 'error.\\n'.format( user=load['user'], function=load['fun'] ) ) return", "invocation information if self.opts['ext_job_cache']: try: fstr = '{0}.save_load'.format(self.opts['ext_job_cache']) self.mminion.returners[fstr](load['jid'], load)", "salt.utils.fopen(token_path) as token_file: token_data = serializer.loads(token_file.read()) if 'expire' not in", "'conf_file': self.opts['conf_file']} opts.update(self.opts) runner = salt.runner.Runner(opts) return runner.run() def pub_ret(self,", "\"wheel.{0}\".format(fun), 'jid': jid, 'tag': tag, 'user': token['name']} try: self.event.fire_event(data, tagify([jid,", "not ((token['name'] in self.opts['external_auth'][token['eauth']]) | ('*' in self.opts['external_auth'][token['eauth']])): log.warning('Authentication failure", "= salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) # Make a", "= {'fun': \"wheel.{0}\".format(fun), 'jid': jid, 'tag': tag, 'user': token['name']} try:", "files to minions' ) raise SaltMasterError('No fileserver backends available') fileserver.update()", "('git', 'hg', 'svn'): if backend in opts['fileserver_backend']: env_cache = os.path.join(", "ret['out'] = load['out'] self._return(ret) def minion_runner(self, load): ''' Execute a", "to write to the file return False # check group", "overwrite master files!! return False if not salt.utils.verify.valid_id(self.opts, load['id']): return", "the mine data ''' if not skip_verify: if 'id' not", "\"wheel.{0}\".format(fun), 'jid': jid, 'tag': tag, 'user': load.get('username', 'UNKNOWN')} try: self.event.fire_event(data,", "False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): os.makedirs(cdir)", "env_cache = os.path.join( opts['cachedir'], '{0}fs'.format(backend), 'envs.p' ) if os.path.isfile(env_cache): log.debug('Clearing", "if 'out' in load: ret['out'] = load['out'] self._return(ret) def minion_runner(self,", "file cache ''' if any(key not in load for key", "not skip_verify: if 'id' not in load or 'data' not", "return True # After we've ascertained we're not on windows", "ret: ret[key] = val if load.get('form', '') != 'full': ret.pop('__jid__')", "occurred.' ) return '' load['user'] = name # Verify that", "load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication failure of type \"user\"", "key = salt.crypt.Crypticle.generate_key_string() cumask = os.umask(191) with salt.utils.fopen(keyfile, 'w+') as", "Announce the job on the event bus self.event.fire_event(new_job_load, 'new_job') #", "opts, states=False, rend=False, ) # If the master job cache", "'Authentication failure of type \"user\" occurred.' ) return '' if", "self.opts['external_auth'][extra['eauth']])): log.warning( 'Authentication failure of type \"eauth\" occurred.' ) return", "except Exception as exc: log.error('Exception occurred while ' 'introspecting {0}:", "load.get('env_only'): return mopts mopts['renderer'] = self.opts['renderer'] mopts['failhard'] = self.opts['failhard'] mopts['state_top']", "in filenames: auth_file_path = os.path.join(dirpath, auth_file) if not os.path.isfile(auth_file_path): continue", "# Format individual return loads for key, item in six.iteritems(load['return']):", "file server interface ''' fs_ = salt.fileserver.Fileserver(self.opts) self._serve_file = fs_.serve_file", "load['user'] = token['name'] log.debug('Minion tokenized user = \"{0}\"'.format(load['user'])) elif 'eauth'", "from salt.exceptions import SaltMasterError # Import 3rd-party libs import salt.ext.six", "log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name']", "self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not good: msg = ('Authentication", "tagify([load['jid'], 'new'], 'job')) # Save the invocation information if self.opts['ext_job_cache']:", "file exists. salt.utils.atomicfile.atomic_rename(tmpfname, datap) return data def _minion_event(self, load): '''", "== uid or fmode.st_gid != gid: return True elif self.opts.get('permissive_pki_access',", "minion ''' if any(key not in load for key in", "isinstance(self.opts['peer_run'], dict): return {} if any(key not in load for", "def check_signing_file(self, keyid, signing_file): ''' Check a keyid for membership", "log = logging.getLogger(__name__) # Things to do in lower layers:", "= salt.payload.Serial(opts) self.key = key # Create the event manager", "fp_: new = self.serial.load(fp_) if isinstance(new, dict): new.update(load['data']) load['data'] =", "!= 0: mode = 'ab' else: mode = 'wb' with", "int(load['timeout']) except ValueError: msg = 'Failed to parse timeout value:", "exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) if 'eauth' not in load:", "endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid()) # If the return data is invalid,", "fail log.warning('Authentication failure of type \"eauth\" occurred.') return '' try:", "load['to'] if 'kwargs' in load: if 'ret_config' in load['kwargs']: pub_load['ret_config']", ") return '' load['user'] = token['name'] log.debug('Minion tokenized user =", "self.opts, states=False, rend=False) # Make a wheel object self.wheel_ =", "''' if not skip_verify and 'id' not in load: return", "' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if not", "self.opts.get('order_masters'): # Check for no minions if not minions: return", "save_load function!'.format( self.opts['master_job_cache'] ) ) except Exception: log.critical( 'The specified", "'id' not in load: return False if 'events' not in", "'The specified returner used for the master job cache '", "self.opts.get('permissive_pki_access', False) and stat.S_IWGRP & fmode.st_mode: return True elif stat.S_IWGRP", "not have a save_load function!'.format( self.opts['master_job_cache'] ) ) except Exception:", "opts = {} grains = {} ret = {} if", "as fp_: new = self.serial.load(fp_) if isinstance(new, dict): new.update(load['data']) load['data']", "have a save_load function!'.format( self.opts['ext_job_cache'] ) ) except Exception: log.critical(", "True if self.check_autosign_dir(keyid): return True return False class RemoteFuncs(object): '''", "also initialted the execution. ''' if not skip_verify and any(key", "not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, load['jid']) with salt.utils.fopen(jid_fn, 'r')", "self.opts['peer']: if re.match(match, load['id']): # This is the list of", "key in ('return', 'jid', 'id')): return None # if we", "data def _minion_event(self, load): ''' Receive an event from the", "minions. ''' # Verify the load if any(key not in", "def _minion_event(self, load): ''' Receive an event from the minion", "of type \"token\" occurred for ' 'user {0}.').format(token['name']) log.warning(msg) return", "Clean out the old fileserver backends ''' # Clear remote", "'arg': load['arg'], 'id': load['id'], 'doc': False, 'conf_file': self.opts['conf_file']} opts.update(self.opts) runner", "for key in ('id', 'grains')): return False pillar = salt.pillar.Pillar(", "'ret', load['id']], 'job')) self.event.fire_ret_load(load) if not self.opts['job_cache'] or self.opts.get('ext_job_cache'): return", "= True if not good: # The minion is not", "salt.utils.minions.CkMinions(self.opts) minions = checker.check_minions( load['tgt'], match_type, greedy=False ) for minion", "occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args,", "data['return'] = ret data['success'] = True self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))", "tag, 'data': data} if 'eauth' not in load: msg =", "type \"user\" occurred.' ) return '' elif load['user'] == self.opts.get('user',", "= logging.getLogger(__name__) # Things to do in lower layers: #", "key, 'return': item} if 'out' in load: ret['out'] = load['out']", "function!'.format( self.opts['ext_job_cache'] ) ) except Exception: log.critical( 'The specified returner", "ascertained we're not on windows try: user = self.opts['user'] pwnam", "Auth object self.loadauth = salt.auth.LoadAuth(opts) # Stand up the master", "__setup_fileserver(self): ''' Set the local file objects from the file", "back to the wheel system ''' # All wheel ops", "the master allows minions to be matched to salt functions,", "if not skip_verify: if 'id' not in load: log.error('Received call", "{} if 'opts' in load: opts = load['opts'] if 'grains'", "if load.get('env_only'): return mopts mopts['renderer'] = self.opts['renderer'] mopts['failhard'] = self.opts['failhard']", "an id') return {} if not salt.utils.verify.valid_id(self.opts, load['id']): return {}", "minions # are found if not self.opts.get('order_masters'): # Check for", "minions, } # Announce the job on the event bus", "elif load['user'] == salt.utils.get_user(): if load.pop('key') != self.key.get(load['user']): log.warning( 'Authentication", "in ('fun', 'arg', 'id')): return {} perms = set() for", "load['jid'], 'id': key, 'return': item} if 'out' in load: ret['out']", "'{0}.returner'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load) def _syndic_return(self, load): ''' Receive a syndic minion", "if re.match(match, load['id']): if isinstance(self.opts['mine_get'][match], list): perms.update(self.opts['mine_get'][match]) if not any(re.match(perm,", "on the event bus self.event.fire_event(new_job_load, 'new_job') # old dup event", "log.error( 'Exception occurred while authenticating: {0}'.format(exc) ) return '' good", "load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False)) # save the load, since we", "''' serializer = salt.payload.Serial(opts) for (dirpath, dirnames, filenames) in os.walk(opts['token_dir']):", "salt.utils.expr_match(keyid, line): return True return False def check_autosign_dir(self, keyid): '''", "'tgt_type': load['tgt_type'], 'tgt': load['tgt'], 'user': load['user'], 'fun': load['fun'], 'arg': load['arg'],", "as fp_: fp_.write(key) os.umask(cumask) # 600 octal: Read and write", "dirnames, filenames) in os.walk(auth_cache): for auth_file in filenames: auth_file_path =", "clean_pub_auth(opts): try: auth_cache = os.path.join(opts['cachedir'], 'publish_auth') if not os.path.exists(auth_cache): return", "and write access to the owner only. # Write access", "load for key in ('jid', 'id')): return {} else: auth_cache", "skip_verify: if 'id' not in load: log.error('Received call for external", "type \"token\" occurred. \\ Token does not verify against eauth", "for perm in perms): return {} ret = {} if", "load for key in ('fun', 'arg', 'tgt', 'ret', 'id')): return", "self.opts['external_auth'][load['eauth']])): log.warning('Authentication failure of type \"eauth\" occurred.') return '' if", "to determine groups for user {0}. The user is not", "env cache'.format(backend)) try: os.remove(env_cache) except OSError as exc: log.critical( 'Unable", "os.stat(filename) if os.getuid() == 0: if fmode.st_uid == uid or", "encapsulates the functions that can be executed in # the", "eauth key and the needed authentication creds. ''' if 'eauth'", "bus self.event.fire_event(new_job_load, 'new_job') # old dup event self.event.fire_event(new_job_load, tagify([load['jid'], 'new'],", "'{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False)) # save the load, since", "'pillar': match_type = 'pillar_exact' if match_type.lower() == 'compound': match_type =", "'eauth' in extra: if extra['eauth'] not in self.opts['external_auth']: # The", "load, since we don't have it saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[saveload_fstr](load['jid'],", "not any(re.match(perm, load['fun']) for perm in perms): return {} ret", "= self.ckminions.check_minions( load['tgt'], load.get('tgt_type', 'glob') ) # If we order", "time? mminion = salt.minion.MasterMinion( opts, states=False, rend=False, ) # If", "ignore it if any(key not in load for key in", "Allow a minion to request revocation of its own key", "# Create the tops dict for loading external top data", "if not signing_file or not os.path.exists(signing_file): return False if not", "self.loadauth.get_tok(load['token']) def publish(self, load): ''' This method sends out publications", "list minions = self.ckminions.check_minions( load['tgt'], load.get('tgt_type', 'glob') ) # If", ") ) except Exception: log.critical( 'The specified returner threw a", "minions, this class includes the raw routines post validation that", "False if len(load['data']) + load.get('loc', 0) > file_recv_max_size: log.error( 'Exceeding", "failure of type \"token\" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if", "return '' # Retrieve the minions list minions = self.ckminions.check_minions(", "os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try: with salt.utils.fopen(datap, 'rb') as fp_:", "return {'tag': tag, 'data': data} except Exception as exc: log.error(exc)", "int(load['tmo']) except ValueError: msg = 'Failed to parse timeout value:", "[load['fun']] # if this a compound function else: funs_to_check =", "os.path.join(dirpath, auth_file) if not os.path.isfile(auth_file_path): continue if os.path.getmtime(auth_file_path) - time.time()", "and fmode.st_gid in groups: return True else: if stat.S_IWOTH &", "file {0}: {1}' .format(cache_file, exc) ) def clean_expired_tokens(opts): ''' Clean", "users.append(user.pw_name) for user in acl_users: log.info( 'Preparing the {0} key", "'introspecting {0}: {1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) except Exception", "'ret': load['ret'], 'id': load['id'], } if 'tmo' in load: try:", "= self.opts['nodegroups'] mopts['state_auto_order'] = self.opts['state_auto_order'] mopts['state_events'] = self.opts['state_events'] mopts['state_aggregate'] =", "normpath: # make sure double backslashes are normalized normpath =", "if load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication failure of type", "load: ret['out'] = load['out'] self._return(ret) def minion_runner(self, load): ''' Execute", "from the LocalClient) # _auth def __init__(self, opts, key): self.opts", "gid = pwnam[3] groups = salt.utils.get_gid_list(user, include_default=False) except KeyError: log.error(", "token or False if the token is invalid ''' if", "if the token is invalid ''' if 'token' not in", "generation, log it and move on log.error( 'Top function {0}", "fmode.st_mode or stat.S_IWOTH & fmode.st_mode): return True return False def", "the specified minions' mine ''' if not skip_verify: if any(key", "''' This method sends out publications to the minions, it", "self.opts['client_acl_blacklist'].get('modules', []): # if this is a regular command, its", "fmode = os.stat(filename) if os.getuid() == 0: if fmode.st_uid ==", "{'tag': tag, 'data': data} except Exception as exc: log.error(exc) log.error('Exception", "os.path.join(autosign_dir, keyid) if not os.path.exists(stub_file): return False os.remove(stub_file) return True", "= [] keys = {} acl_users = set(opts['client_acl'].keys()) if opts.get('user'):", "''' Execute a runner from a minion, return the runner's", "os import re import time import stat import tempfile #", "'Unable to clear env cache file {0}: {1}' .format(env_cache, exc)", "name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not good: msg", "self.key = key # Create the event manager self.event =", "the routines needed to set up a master server, this", "return {} if 'timeout' in load: try: pub_load['timeout'] = int(load['timeout'])", "except IndexError: log.critical( 'Unable to extract external pillar data: {0}'", "[]): # if this is a regular command, its a", "in load and load['loc'] < 0: log.error('Invalid file pointer: load[loc]", "opts = load['opts'] if 'grains' in load['opts']: grains = load['opts']['grains']", "accept valid minion ids def init_git_pillar(opts): ''' Clear out the", "in load for key in ('return', 'jid', 'id')): return None", "auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn", "like this: peer: .*: - .* This configuration will enable", ") return '' def get_token(self, load): ''' Return the name", "_ext_nodes(self, load, skip_verify=False): ''' Return the results from an external", "if saltenv not in file_roots: file_roots[saltenv] = [] mopts['file_roots'] =", "a specific jid, only allowed if the requesting minion also", "'arg': load['arg'], 'minions': minions, } # Announce the job on", "jid {jid}'.format( **load ) ) log.debug('Published command details {0}'.format(pub_load)) return", "\"user\" occurred.' ) return '' good = self.ckminions.auth_check( self.opts['client_acl'][load['user']], load['fun'],", "self.event.fire_event(load, load['jid']) # old dup event self.event.fire_event(load, tagify([load['jid'], 'ret', load['id']],", "'wb' with salt.utils.fopen(cpath, mode) as fp_: if load['loc']: fp_.seek(load['loc']) fp_.write(load['data'])", "we've ascertained we're not on windows try: user = self.opts['user']", "''' Return the pillar data for the minion ''' if", "if not load['jid']: fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False))", "log.warning('Authentication failure of type \"token\" occurred. \\ Token does not", "0o600) if HAS_PWD: try: os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1) except OSError: #", "in self.opts['peer']: if re.match(match, load['id']): # This is the list", "mine contents ''' if not skip_verify and 'id' not in", "without an id') return {} if not salt.utils.verify.valid_id(self.opts, load['id']): return", "salt.wheel import salt.minion import salt.search import salt.key import salt.fileserver import", "if isinstance(self.opts['mine_get'][match], list): perms.update(self.opts['mine_get'][match]) if not any(re.match(perm, load['fun']) for perm", "' 'serve files to minions' ) raise SaltMasterError('No fileserver backends", "'Exception occurred in the runner system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__,", "the fileserver backends, requires that a built fileserver object be", "occurred.' ) return '' else: if load.pop('key') != self.key[salt.utils.get_user()]: log.warning(", "keys[user] = key return keys def fileserver_update(fileserver): ''' Update the", "up the publication payload pub_load = { 'fun': load['fun'], 'arg':", "minion_publish(self, load): ''' Publish a command initiated from a minion,", "autosign directory. ''' autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign') # cleanup expired", "is specified ''' if not skip_verify: if 'id' not in", "enabled, fail log.warning('Authentication failure of type \"eauth\" occurred.') return ''", "= opts self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False)", "perms): return {} ret = {} if not salt.utils.verify.valid_id(self.opts, load['id']):", "parts = opts_dict['git'].strip().split() try: br = parts[0] loc = parts[1]", "Windows enforces this. os.chmod(keyfile, 0o600) if HAS_PWD: try: os.chown(keyfile, pwd.getpwnam(user).pw_uid,", "'user' in load: log.info( 'User {user} Published command {fun} with", "if 'ret_config' in load['kwargs']: pub_load['ret_config'] = load['kwargs'].get('ret_config') if 'metadata' in", "list of funcs/modules! if isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match]) if ',' in", "msg = 'Failed to parse timeout value: {0}'.format( load['tmo']) log.warn(msg)", "Evaluate all configured master_tops interfaces opts = {} grains =", "jobid prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False)) # save", "to extract external pillar data: {0}' .format(opts_dict['git']) ) else: pillargitfs.append(", "tmpfname = tempfile.mkstemp(dir=cdir) os.close(tmpfh) with salt.utils.fopen(tmpfname, 'w+b') as fp_: fp_.write(", "function=load['fun'] ) ) return '' # to make sure we", "not os.path.exists(signing_file): return False if not self.check_permissions(signing_file): message = 'Wrong", "import salt.payload import salt.pillar import salt.state import salt.runner import salt.auth", "{}): continue try: ret.update(self.tops[fun](opts=opts, grains=grains)) except Exception as exc: #", "load['arg']: arg_.append(arg.split()) load['arg'] = arg_ good = self.ckminions.auth_check( perms, load['fun'],", "if not os.path.isdir(cdir): try: os.makedirs(cdir) except os.error: pass if os.path.isfile(cpath)", "log.warn( 'Minion id {0} is not who it says it", "only from the local system ''' # The ClearFuncs object", "filename): ''' Check if the specified filename has correct permissions", "move on log.error( 'Top function {0} failed with error {1}", "if 'git' in opts_dict: try: import git except ImportError: return", "exc)) data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(", "that won't have a negative impact. pub_load = { 'fun':", "specified keyid should automatically be rejected. ''' return self.check_signing_file( keyid,", "in funs_to_check: if re.match(module_re, fun): good = False break if", "= parts[0] loc = parts[1] except IndexError: log.critical( 'Unable to", "import salt.key import salt.fileserver import salt.utils.atomicfile import salt.utils.event import salt.utils.verify", "',' in load['fun']: # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']] load['fun']", "self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound' else: return {}", "states=False, rend=False) self.__setup_fileserver() def __setup_fileserver(self): ''' Set the local file", "''' if not skip_verify: if any(key not in load for", "_file_recv(self, load): ''' Allows minions to send files to the", "a clean_old_jobs, call it fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache']) if fstr in", "be passed in ''' try: if not fileserver.servers: log.error( 'No", "return True if self.check_autosign_dir(keyid): return True return False class RemoteFuncs(object):", "return {} if not isinstance(self.opts['peer_run'], dict): return {} if any(key", "((name in self.opts['external_auth'][extra['eauth']]) | ('*' in self.opts['external_auth'][extra['eauth']])): log.warning( 'Authentication failure", "the external job cache ' '\"{0}\" does not have a", "''' Clear out the ext pillar caches, used when the", "import re import time import stat import tempfile # Import", "load[loc] < 0') return False if len(load['data']) + load.get('loc', 0)", "log.error('ACL user {0} is not available'.format(user)) continue keyfile = os.path.join(", "a client self.local = salt.client.get_local_client(mopts=self.opts) # Make an minion checker", "saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[saveload_fstr](load['jid'], load) log.info('Got return from {id} for", "log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) try: fun = load.pop('fun') runner_client =", "# Make an Auth object self.loadauth = salt.auth.LoadAuth(opts) # Stand", "don't want to listen to it! log.warn( 'Minion id {0}", "msg = 'Failed to parse timeout value: {0}'.format( load['timeout']) log.warn(msg)", "load or 'data' not in load: return False if self.opts.get('minion_data_cache',", "keyid): ''' Checks if the specified keyid should automatically be", "return dict(error=dict(name='TokenAuthenticationError', message=msg)) jid = salt.utils.jid.gen_jid() fun = load.pop('fun') tag", "from the specified minions' mine ''' if not skip_verify: if", "authorized a minion to execute ''' # Verify that the", "self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not good:", "self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, load['jid'])", "'' if load['user'] not in self.opts['client_acl']: log.warning( 'Authentication failure of", "self.opts['external_auth']: # The eauth system is not enabled, fail log.warning(", "file {0}: {1}' .format(env_cache, exc) ) file_lists_dir = os.path.join( opts['cachedir'],", "self.ckminions = salt.utils.minions.CkMinions(opts) # Create the tops dict for loading", "if isinstance(mine_data, dict): if mine_data.pop(load['fun'], False): with salt.utils.fopen(datap, 'w+b') as", "{'tag': tag, 'data': data} if 'eauth' not in load: msg", "will function cleanly if load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure", "it try: token = self.loadauth.get_tok(extra['token']) except Exception as exc: log.error(", "failure of type \"eauth\" occurred.' ) return '' if not", "git_pillar from salt.utils.event import tagify from salt.exceptions import SaltMasterError #", "are found if not self.opts.get('order_masters'): # Check for no minions", "a syndic minion return and format it to look like", "by the master. ''' from __future__ import absolute_import # Import", "in opts['fileserver_backend']: env_cache = os.path.join( opts['cachedir'], '{0}fs'.format(backend), 'envs.p' ) if", "in os.walk(auth_cache): for auth_file in filenames: auth_file_path = os.path.join(dirpath, auth_file)", "{'grains': load['grains'], 'pillar': data}) ) # On Windows, os.rename will", "in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not", "fp_: fp_.write(key) os.umask(cumask) # 600 octal: Read and write access", "# check if the cmd is blacklisted for module_re in", "is blacklisted for module_re in self.opts['client_acl_blacklist'].get('modules', []): # if this", "load: if load['tgt_type'].startswith('node'): if load['tgt'] in self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']]", "functions, so the minions can only publish allowed salt functions", "fnmatch.filter(file_lists_caches, '*.p'): cache_file = os.path.join(file_lists_dir, file_lists_cache) try: os.remove(cache_file) except OSError", "import salt.state import salt.runner import salt.auth import salt.wheel import salt.minion", "return False def check_signing_file(self, keyid, signing_file): ''' Check a keyid", "master, files are sent to the master file cache '''", "if 'events' in load: for event in load['events']: self.event.fire_event(event, event['tag'])", "load['kwargs'].get('ret_config') if 'metadata' in load['kwargs']: pub_load['metadata'] = load['kwargs'].get('metadata') if 'user'", "load.pop('fun') tag = tagify(jid, prefix='wheel') data = {'fun': \"wheel.{0}\".format(fun), 'jid':", "stale keyfile: {0}'.format(keyfile)) os.unlink(keyfile) key = salt.crypt.Crypticle.generate_key_string() cumask = os.umask(191)", "'compound_pillar_exact' checker = salt.utils.minions.CkMinions(self.opts) minions = checker.check_minions( load['tgt'], match_type, greedy=False", "keyid, signing_file): ''' Check a keyid for membership in a", "os.path.join(dirpath, token) with salt.utils.fopen(token_path) as token_file: token_data = serializer.loads(token_file.read()) if", "or self.opts.get('ext_job_cache'): return fstr = '{0}.update_endtime'.format(self.opts['master_job_cache']) if (self.opts.get('job_cache_store_endtime') and fstr", "This configuration will only allow the minion foo.example.com to execute", "uid or fmode.st_gid != gid: return True elif self.opts.get('permissive_pki_access', False)", "self.serial.load(fp_) if isinstance(mine_data, dict): if mine_data.pop(load['fun'], False): with salt.utils.fopen(datap, 'w+b')", "occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) data['return'] = 'Exception", "of type \"token\" occurred. \\ Token could not be retrieved.')", "the minion publication will only work if it is enabled", "= set() for match in self.opts['peer_run']: if re.match(match, load['id']): #", "salt.fileserver.Fileserver(self.opts) self._serve_file = fs_.serve_file self._file_hash = fs_.file_hash self._file_list = fs_.file_list", "return {} else: auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not", "in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not good: msg =", "we don't step on anyone else's toes del good #", "master control function back to the runner system ''' if", "\"token\" occurred. \\ Token does not verify against eauth provider:", "Published command {fun} with jid {jid}'.format( **load ) ) pub_load['user']", "'{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[saveload_fstr](load['jid'], load) log.info('Got return from {id} for job {jid}'.format(**load))", "None)): return True if self.check_autosign_dir(keyid): return True return False class", "salt.utils.event import salt.utils.verify import salt.utils.minions import salt.utils.gzip_util import salt.utils.jid from", "event: self.event.fire_event(event['data'], tagify(event['tag'], base=load['pretag'])) else: self.event.fire_event(event, tagify(event['tag'], base=load['pretag'])) else: tag", "occurred when generating auth token: {0}'.format( exc ) ) return", "= key return keys def fileserver_update(fileserver): ''' Update the fileserver", "match in self.opts['peer']: if re.match(match, load['id']): # This is the", "'{2}'.format( fun, exc, load['id'] ) ) return ret def _mine_get(self,", "fire it on the master event interface ''' if 'id'", "in perms): return {} ret = {} if not salt.utils.verify.valid_id(self.opts,", "signing_file or not os.path.exists(signing_file): return False if not self.check_permissions(signing_file): message", "# old dup event self.event.fire_event(load, tagify([load['jid'], 'ret', load['id']], 'job')) self.event.fire_ret_load(load)", "'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) # Make a client self.local", "log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__,", "minions, it can only be used by the LocalClient. '''", "for user in acl_users: log.info( 'Preparing the {0} key for", "data['ret'] = data.pop('return') ret[minion['id']] = data else: ret[minion['id']] = minion['return']", "dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) except Exception as exc: log.error( 'Exception occurred", "' 'occurred.' ) return '' else: log.warning( 'Authentication failure of", "= normpath.replace('\\\\', '/') normpath = os.path.normpath(normpath) cpath = os.path.join( self.opts['cachedir'],", "= [] for match in self.opts['peer']: if re.match(match, load['id']): #", "'minions', load['id'], 'files', normpath) cdir = os.path.dirname(cpath) if not os.path.isdir(cdir):", "load['loc'] < 0: log.error('Invalid file pointer: load[loc] < 0') return", "exists, it needs to be written to again. Windows enforces", "perms.update(self.opts['mine_get'][match]) if not any(re.match(perm, load['fun']) for perm in perms): return", "Return the mine data ''' if not skip_verify: if 'id'", "Verify that the caller has root on master elif 'user'", "built fileserver object be passed in ''' try: if not", "good = True if not good: # The minion is", "= self.serial.load(fp_) if isinstance(new, dict): new.update(load['data']) load['data'] = new with", "base=load['pretag'])) else: self.event.fire_event(event, tagify(event['tag'], base=load['pretag'])) else: tag = load['tag'] self.event.fire_event(load,", "[] keys = {} acl_users = set(opts['client_acl'].keys()) if opts.get('user'): acl_users.add(opts['user'])", "def _ext_nodes(self, load, skip_verify=False): ''' Return the results from an", "def _mine(self, load, skip_verify=False): ''' Return the mine data '''", "log.warning( 'Authentication failure of type \"user\" ' 'occurred.' ) return", ") log.debug('Published command details {0}'.format(pub_load)) return {'ret': { 'jid': load['jid'],", "for perm in perms: if re.match(perm, load['fun']): good = True", "load['tgt'], 'jid': load['jid'], 'ret': load['ret'], } if 'id' in extra:", "a client self.local = salt.client.get_local_client(mopts=self.opts) # Create the master minion", "'id')): return None # if we have a load, save", "= os.path.join(auth_cache, str(ret['jid'])) with salt.utils.fopen(jid_fn, 'w+') as fp_: fp_.write(load['id']) return", "try: import pwd HAS_PWD = True except ImportError: # pwd", "log.critical( 'Unable to file_lists cache file {0}: {1}' .format(cache_file, exc)", "else: tag = load['tag'] self.event.fire_event(load, tag) return True def _return(self,", ") self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data}", "in load: log.error('Received call for external nodes without an id')", "if load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure of type \"token\"", "in # the clear: # publish (The publish from the", "' 'introspecting {0}: {1}'.format(fun, exc)) data['return'] = 'Exception occurred in", "mminion.returners[fstr]() def access_keys(opts): ''' A key needs to be placed", "= fs_.file_hash self._file_list = fs_.file_list self._file_list_emptydirs = fs_.file_list_emptydirs self._dir_list =", "for module_re in self.opts['client_acl_blacklist'].get('modules', []): # if this is a", "from the master ''' serializer = salt.payload.Serial(opts) for (dirpath, dirnames,", "in extra: if extra['eauth'] not in self.opts['external_auth']: # The eauth", "to again. Windows enforces this. os.chmod(keyfile, 0o600) if HAS_PWD: try:", "load['id'] ) ) return {} # Prepare the runner object", "salt.utils.get_gid_list(user, include_default=False) except KeyError: log.error( 'Failed to determine groups for", "[] for match in self.opts['peer']: if re.match(match, load['id']): # This", "log.warning( 'Authentication failure of type \"token\" occurred.' ) return ''", "is the list of funcs/modules! if isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match]) if", "user = \"{0}\"'.format(load['user'])) elif 'eauth' in extra: if extra['eauth'] not", "# Write access is necessary since on subsequent runs, if", "'serve files to minions' ) raise SaltMasterError('No fileserver backends available')", "Allow the minion to delete all of its own mine", "# The eauth system is not enabled, fail log.warning( 'Authentication", "is not enabled, fail msg = ('Authentication failure of type", "to request revocation of its own key ''' if 'id'", "= os.path.join(dirpath, auth_file) if not os.path.isfile(auth_file_path): continue if os.path.getmtime(auth_file_path) -", "not in self.opts.get('master_tops', {}): continue try: ret.update(self.tops[fun](opts=opts, grains=grains)) except Exception", "as fp_: mine_data = self.serial.load(fp_) if isinstance(mine_data, dict): if mine_data.pop(load['fun'],", "and even tiny # additions can have serious implications on", "root if load.get('key', 'invalid') == self.key.get('root'): load.pop('key') elif load.pop('key') !=", "self.event.fire_event(new_job_load, tagify([load['jid'], 'new'], 'job')) # Save the invocation information if", "rend=False) self.__setup_fileserver() def __setup_fileserver(self): ''' Set the local file objects", "self.ckminions.runner_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if", "fileserver.update() except Exception as exc: log.error( 'Exception {0} occurred in", "False): if os.path.isfile(datap): with salt.utils.fopen(datap, 'rb') as fp_: new =", "an minion checker object self.ckminions = salt.utils.minions.CkMinions(opts) # Make an", "= salt.minion.MasterMinion( self.opts, states=False, rend=False) # Make a wheel object", "user = pwd.getpwnam(user).pw_name except KeyError: log.error('ACL user {0} is not", "= salt.auth.LoadAuth(opts) # Stand up the master Minion to access", "data self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False) # Make a", "[], ['foo']] load['fun'] = load['fun'].split(',') arg_ = [] for arg", "# If the return data is invalid, just ignore it", "\"eauth\" occurred.' ) return '' if not self.loadauth.time_auth(extra): log.warning( 'Authentication", "user in pwd.getpwall(): users.append(user.pw_name) for user in acl_users: log.info( 'Preparing", "exc: log.critical( 'Unable to file_lists cache file {0}: {1}' .format(cache_file,", "exc) ) def clean_expired_tokens(opts): ''' Clean expired tokens from the", "as exc: log.error( 'Exception occurred when generating auth token: {0}'.format(", "{} if any(key not in load for key in ('fun',", "pwd.getpwnam(user).pw_name except KeyError: log.error('ACL user {0} is not available'.format(user)) continue", "system ''' # All wheel ops pass through eauth if", "permissions 0400 so clients are required to run as root.", "self.mminion.returners[fstr](load) def _syndic_return(self, load): ''' Receive a syndic minion return", "up a master server, this involves preparing the three listeners", "load.get('tgt_type', 'glob'), 'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id'], } if", "fdata: ret[minion] = fdata except Exception: continue return ret def", "' 'error.\\n'.format( user=load['user'], function=load['fun'] ) ) return '' # to", "commands. # # In short, check with <NAME> before you", "False return True def _mine_flush(self, load, skip_verify=False): ''' Allow the", "os.walk(opts['token_dir']): for token in filenames: token_path = os.path.join(dirpath, token) with", "caches so they get recreated for backend in ('git', 'hg',", "on windows try: user = self.opts['user'] pwnam = pwd.getpwnam(user) uid", "loading external top data self.tops = salt.loader.tops(self.opts) # Make a", "configuration will only allow the minion foo.example.com to execute commands", "!= 'full': ret.pop('__jid__') return ret def revoke_auth(self, load): ''' Allow", "a save_load function!'.format( self.opts['master_job_cache'] ) ) except Exception: log.critical( 'The", "not fileserver.servers: log.error( 'No fileservers loaded, the master will not", "try: os.remove(datap) except OSError: return False return True def _file_recv(self,", "load['kwargs']: pub_load['ret_config'] = load['kwargs'].get('ret_config') if 'metadata' in load['kwargs']: pub_load['metadata'] =", "= 'compound' else: return {} else: pub_load['expr_form'] = load['tgt_type'] pub_load['raw']", "fmode.st_mode: return True elif stat.S_IWGRP & fmode.st_mode: return False #", "= '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load['load']) # Format individual return loads for", "master is not being run as root and can therefore", "in self.opts['external_auth'][extra['eauth']]) | ('*' in self.opts['external_auth'][extra['eauth']])): log.warning( 'Authentication failure of", "load['tgt_type'], 'tgt': load['tgt'], 'user': load['user'], 'fun': load['fun'], 'arg': load['arg'], 'minions':", "skip_verify=False): ''' Return the results from an external node classifier", "load.pop('key') elif load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication failure of", "caller has root on master elif 'user' in load: if", "{0}: {1}' .format(env_cache, exc) ) file_lists_dir = os.path.join( opts['cachedir'], 'file_lists',", "& fmode.st_mode: return False # check if writable by group", "not load['id'] == fp_.read(): return {} return self.local.get_cache_returns(load['jid']) def minion_pub(self,", "self.local.cmd_iter(**pub_load): if load.get('form', '') == 'full': data = minion if", "'user': load.get('username', 'UNKNOWN')} try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret =", "pub_load['tgt_type'] = load['tgt_type'] if 'to' in load: pub_load['to'] = load['to']", "pillargitfs = [] for opts_dict in [x for x in", "auth file') def clean_old_jobs(opts): ''' Clean out the old jobs", "files expire_minutes = self.opts.get('autosign_expire_minutes', 10) if expire_minutes > 0: min_time", "'full': data = minion if 'jid' in minion: ret['__jid__'] =", "load.get('username', 'UNKNOWN')} try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun,", "the permissions for this minion perms = [] for match", "[] for arg in load['arg']: arg_.append(arg.split()) load['arg'] = arg_ good", "((name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']])): msg = ('Authentication", "master files!! return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False", "if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not good:", "'Authentication failure of type \"eauth\" occurred.' ) return '' try:", "serializer.loads(token_file.read()) if 'expire' not in token_data or token_data.get('expire', 0) <", "else: pillargitfs.append( git_pillar.GitPillar( br, loc, opts ) ) return pillargitfs", "be retrieved.') return '' if token['eauth'] not in self.opts['external_auth']: log.warning('Authentication", "| ('*' in self.opts['external_auth'][load['eauth']]): msg = ('Authentication failure of type", "fail log.warning( 'Authentication failure of type \"eauth\" occurred.' ) return", "keys = {} acl_users = set(opts['client_acl'].keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.get_user())", "other if not (stat.S_IWGRP & fmode.st_mode or stat.S_IWOTH & fmode.st_mode):", "double backslashes are normalized normpath = normpath.replace('\\\\', '/') normpath =", "for minion in self.local.cmd_iter(**pub_load): if load.get('form', '') == 'full': data", "= \"{0}\"'.format(load['user'])) elif 'eauth' in extra: if extra['eauth'] not in", "in normpath: # make sure double backslashes are normalized normpath", "'' else: if load.pop('key') != self.key[salt.utils.get_user()]: log.warning( 'Authentication failure of", "= os.path.join( opts['cachedir'], '{0}fs'.format(backend), 'envs.p' ) if os.path.isfile(env_cache): log.debug('Clearing {0}fs", "the performance of the # publish commands. # # In", "to ' 'serve files to minions' ) raise SaltMasterError('No fileserver", "fp_.write(self.serial.dumps(mine_data)) except OSError: return False return True def _mine_flush(self, load,", "the token is invalid ''' if 'token' not in load:", "' 'user {0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) try: fun =", "include_default=False) except KeyError: log.error( 'Failed to determine groups for user", "any(key not in load for key in ('id', 'path', 'loc')):", "correct permissions ''' if salt.utils.is_windows(): return True # After we've", "return False if 'events' not in load and ('tag' not", "three listeners and the workers needed by the master. '''", "normpath.replace('\\\\', '/') normpath = os.path.normpath(normpath) cpath = os.path.join( self.opts['cachedir'], 'minions',", "if os.path.getmtime(auth_file_path) - time.time() > opts['keep_jobs']: os.remove(auth_file_path) except (IOError, OSError):", "normalized normpath = normpath.replace('\\\\', '/') normpath = os.path.normpath(normpath) cpath =", "or os.path.isabs(load['path']): return False if os.path.isabs(load['path']) or '../' in load['path']:", "command details {0}'.format(pub_load)) return {'ret': { 'jid': load['jid'], 'minions': minions", "find_job so the CLI will function cleanly if load['fun'] !=", "do what you want to do another # way that", ") ) return {} # Prepare the runner object opts", "os.path.exists(stub_file): return False os.remove(stub_file) return True def check_autoreject(self, keyid): '''", "dup event self.event.fire_event(new_job_load, tagify([load['jid'], 'new'], 'job')) # Save the invocation", "in self.opts['external_auth']: msg = 'Authentication failure of type \"token\" occurred.'", "filenames: token_path = os.path.join(dirpath, token) with salt.utils.fopen(token_path) as token_file: token_data", "'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: fun =", "of type \"user\" occurred.' ) return '' else: if load.pop('key')", "Return the name associated with a token or False if", "else: if load.pop('key') != self.key[salt.utils.get_user()]: log.warning( 'Authentication failure of type", "key in ('return', 'jid', 'id')): return False if load['jid'] ==", "False)) self.event.fire_event({'minions': minions}, load['jid']) new_job_load = { 'jid': load['jid'], 'tgt_type':", "return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: name = self.loadauth.load_name(load) if not ((name", "salt.payload import salt.pillar import salt.state import salt.runner import salt.auth import", "it says it is!'.format( load['id'] ) ) return {} #", "self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun, **load) data['return'] =", "user is blacklisted for user_re in self.opts['client_acl_blacklist'].get('users', []): if re.match(user_re,", "valid minion ids def init_git_pillar(opts): ''' Clear out the ext", "except (IOError, OSError): log.error('Unable to delete pub auth file') def", "# Create the event manager self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'],", "load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async( fun, load.get('kwarg', {}), token['name'])", "if os.path.isfile(datap): with salt.utils.fopen(datap, 'rb') as fp_: new = self.serial.load(fp_)", "load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer_run'][match],", "load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async(fun, load.get('kwarg', {}), load.get('username', 'UNKNOWN'))", "wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) self.event.fire_event(data, tagify([jid,", "run if re.match('publish.*', load['fun']): return False # Check the permissions", "return '' if not self.loadauth.time_auth(load): log.warning('Authentication failure of type \"eauth\"", "for the master job cache ' '\"{0}\" does not have", "load): ''' Verify that the passed information authorized a minion", "'tgt_type' in load: pub_load['tgt_type'] = load['tgt_type'] if 'to' in load:", "user = self.opts['user'] pwnam = pwd.getpwnam(user) uid = pwnam[2] gid", "os.listdir(file_lists_dir) except OSError: continue for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'): cache_file", "self.opts.get('permissive_pki_access', False) \\ and fmode.st_gid in groups: return True else:", "The minion is returning a standalone job, request a jobid", "= load['kwargs'].get('ret_config') if 'metadata' in load['kwargs']: pub_load['metadata'] = load['kwargs'].get('metadata') if", "pwd is not available on windows HAS_PWD = False log", "impact. pub_load = { 'fun': load['fun'], 'arg': load['arg'], 'tgt': load['tgt'],", "if not self.__verify_minion_publish(load): return {} # Set up the publication", "not in load or 'fun' not in load: return False", "pub_load = { 'fun': load['fun'], 'arg': load['arg'], 'expr_form': load.get('tgt_type', 'glob'),", "load['eauth'] not in self.opts['external_auth']: # The eauth system is not", "self.opts.get('autosign_expire_minutes', 10) if expire_minutes > 0: min_time = time.time() -", "good # Check for external auth calls if extra.get('token', False):", "'glob'), publish_validate=True) if not good: return False return True def", "if match_type.lower() == 'pillar': match_type = 'pillar_exact' if match_type.lower() ==", "def _mine_get(self, load, skip_verify=False): ''' Gathers the data from the", "load: try: pub_load['timeout'] = int(load['timeout']) except ValueError: msg = 'Failed", "# Check the permissions for this minion perms = []", "load.get('env')), load.get('ext'), self.mminion.functions, pillar=load.get('pillar_override', {})) pillar_dirs = {} data =", "will only work if it is enabled in the config.", "load.get('ext'), self.mminion.functions, pillar=load.get('pillar_override', {})) pillar_dirs = {} data = pillar.compile_pillar(pillar_dirs=pillar_dirs)", "load['user'] = name # Verify that the caller has root", ") return False fmode = os.stat(filename) if os.getuid() == 0:", "self.check_signing_file(keyid, self.opts.get('autosign_file', None)): return True if self.check_autosign_dir(keyid): return True return", "= self.opts['state_auto_order'] mopts['state_events'] = self.opts['state_events'] mopts['state_aggregate'] = self.opts['state_aggregate'] mopts['jinja_lstrip_blocks'] =", "in load: try: pub_load['timeout'] = int(load['timeout']) except ValueError: msg =", "= False break # check if the cmd is blacklisted", "administrator if you believe this is in ' 'error.\\n'.format( user=load['user'],", "dirnames, filenames) in os.walk(opts['token_dir']): for token in filenames: token_path =", "master options to the minion ''' mopts = {} file_roots", "''' try: if not fileserver.servers: log.error( 'No fileservers loaded, the", "= os.stat(filename) if os.getuid() == 0: if fmode.st_uid == uid", "# If someone can sudo, allow them to act as", "# break compatibility with minion/master versions and even tiny #", "''' Set the local file objects from the file server", "load for key in ('fun', 'arg', 'id')): return {} perms", "will make a recursive publish don't run if re.match('publish.*', load['fun']):", "match_type.lower() == 'pillar': match_type = 'pillar_exact' if match_type.lower() == 'compound':", "local administrator if you believe this is in ' 'error.\\n'.format(", "'user': token['name']} try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun,", "in self.opts['mine_get']: if re.match(match, load['id']): if isinstance(self.opts['mine_get'][match], list): perms.update(self.opts['mine_get'][match]) if", "it can only be used by the LocalClient. ''' extra", "old fileserver backends ''' # Clear remote fileserver backend caches", "good = True # Check if the user is blacklisted", "is necessary since on subsequent runs, if the file #", "in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][load['eauth']]['*'], load['fun']) if not good: msg =", "log.warn(msg) return {} if 'tgt_type' in load: if load['tgt_type'].startswith('node'): if", "mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks'] return mopts def _ext_nodes(self,", "has correct permissions ''' if salt.utils.is_windows(): return True # After", "runner(self, load): ''' Send a master control function back to", "method executes minion restrictions so that the minion publication will", "ret def _mine(self, load, skip_verify=False): ''' Return the mine data", "try: token = self.loadauth.get_tok(load['token']) except Exception as exc: msg =", "message=msg)) if not self.loadauth.time_auth(load): msg = ('Authentication failure of type", "backend in ('git', 'hg', 'svn'): if backend in opts['fileserver_backend']: env_cache", "salt.utils.verify.valid_id(self.opts, load['id']): return ret match_type = load.get('expr_form', 'glob') if match_type.lower()", "load): ''' Publish a command initiated from a minion, this", "load.get('pretag') is not None: if 'data' in event: self.event.fire_event(event['data'], tagify(event['tag'],", "return False os.remove(stub_file) return True def check_autoreject(self, keyid): ''' Checks", "in minion: ret['__jid__'] = minion['jid'] data['ret'] = data.pop('return') ret[minion['id']] =", "return fstr = '{0}.update_endtime'.format(self.opts['master_job_cache']) if (self.opts.get('job_cache_store_endtime') and fstr in self.mminion.returners):", "in the config. The configuration on the master allows minions", "minion ' '{2}'.format( fun, exc, load['id'] ) ) return ret", "False, 'conf_file': self.opts['conf_file']} opts.update(self.opts) runner = salt.runner.Runner(opts) return runner.run() def", "if not good: msg = ('Authentication failure of type \"token\"", "jid = salt.utils.jid.gen_jid() fun = load.pop('fun') tag = tagify(jid, prefix='wheel')", "except Exception as exc: log.error( 'Exception occurred when generating auth", "if os.path.isabs(load['path']) or '../' in load['path']: # Can overwrite master", "in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) self.event.fire_event(data,", "tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} except Exception", "\"user\" occurred.' ) return '' elif load['user'] == 'root': if", "return {} if 'tgt_type' in load: if load['tgt_type'].startswith('node'): if load['tgt']", "return '' if not token: log.warning('Authentication failure of type \"token\"", "with salt.utils.fopen(keyfile, 'w+') as fp_: fp_.write(key) os.umask(cumask) # 600 octal:", "\"user\" occurred.' ) return '' if load['user'] not in self.opts['client_acl']:", "False return True def _file_recv(self, load): ''' Allows minions to", "mine data ''' if not skip_verify: if 'id' not in", "''' Request the return data from a specific jid, only", "system is not enabled, fail log.warning('Authentication failure of type \"eauth\"", "cleanly if load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure of type", "= {'fun': load['fun'], 'arg': load['arg'], 'id': load['id'], 'doc': False, 'conf_file':", "minion return and format it to look like returns from", "_mine_get(self, load, skip_verify=False): ''' Gathers the data from the specified", "self.check_autosign_dir(keyid): return True return False class RemoteFuncs(object): ''' Funcitons made", "load['tgt_type'] else: return {} else: pub_load['expr_form'] = load['tgt_type'] ret =", "file pointer: load[loc] < 0') return False if len(load['data']) +", "type \"eauth\" occurred.' ) return '' try: name = self.loadauth.load_name(extra)", "a regular command, its a single function if isinstance(load['fun'], str):", ".format(env_cache, exc) ) file_lists_dir = os.path.join( opts['cachedir'], 'file_lists', '{0}fs'.format(backend) )", "data} if 'eauth' not in load: msg = ('Authentication failure", ") return '' # to make sure we don't step", "limit: {0}'.format( file_recv_max_size ) ) return False # Normalize Windows", "{0}').format( self.opts['external_auth']) return '' good = self.ckminions.auth_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name']", "be matched to salt functions, so the minions can only", "and check perms if load.pop('key') != self.key[load['user']]: log.warning( 'Authentication failure", "master control function back to the wheel system ''' #", "return dict(error=dict(name='TokenAuthenticationError', message=msg)) try: fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts)", "return False def check_autosign_dir(self, keyid): ''' Check a keyid for", "occurred in the runner system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args,", "creating the masterminion every time? mminion = salt.minion.MasterMinion( opts, states=False,", "in opts_dict: try: import git except ImportError: return pillargitfs parts", "False def check_signing_file(self, keyid, signing_file): ''' Check a keyid for", "token['eauth'] not in self.opts['external_auth']: msg = 'Authentication failure of type", ") return '' elif load['user'] == 'root': if load.pop('key') !=", "return '' if not self.loadauth.time_auth(extra): log.warning( 'Authentication failure of type", "implications on the performance of the # publish commands. #", "self.local.cmd_async(**pub_load) ret['minions'] = self.ckminions.check_minions( load['tgt'], pub_load['expr_form']) auth_cache = os.path.join( self.opts['cachedir'],", "for line in fp_: line = line.strip() if line.startswith('#'): continue", "'' if load['eauth'] not in self.opts['external_auth']: # The eauth system", "checker = salt.utils.minions.CkMinions(self.opts) minions = checker.check_minions( load['tgt'], match_type, greedy=False )", "load): ''' Return the master options to the minion '''", "contents of the publish load is serious!! Changes here #", "if 'user' in load: log.info( 'User {user} Published command {fun}", "signing_file): ''' Check a keyid for membership in a signing", "import pwd HAS_PWD = True except ImportError: # pwd is", "True class LocalFuncs(object): ''' Set up methods for use only", "executes minion restrictions so that the minion publication will only", "if 'tgt_type' in load: if load['tgt_type'].startswith('node'): if load['tgt'] in self.opts['nodegroups']:", ") return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def wheel(self, load): ''' Send", "'' if token['eauth'] not in self.opts['external_auth']: log.warning('Authentication failure of type", "not self.opts.get('order_masters'): # Check for no minions if not minions:", "object opts = {'fun': load['fun'], 'arg': load['arg'], 'id': load['id'], 'doc':", "in load['opts']: grains = load['opts']['grains'] for fun in self.tops: if", "dict(error=dict(name='TokenAuthenticationError', message=msg)) jid = salt.utils.jid.gen_jid() fun = load.pop('fun') tag =", "function back to the wheel system ''' # All wheel", "signed. ''' if self.opts['auto_accept']: return True if self.check_signing_file(keyid, self.opts.get('autosign_file', None)):", "isinstance(self.opts['peer'], dict): return False if any(key not in load for", "# We don't want to listen to it! log.warn( 'Minion", "in load: return False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):", "the jid if not load['jid']: fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] =", "master job cache has a clean_old_jobs, call it fstr =", "in users: try: user = pwd.getpwnam(user).pw_name except KeyError: log.error('ACL user", "pwd HAS_PWD = True except ImportError: # pwd is not", "load.get('loc', 0) > file_recv_max_size: log.error( 'Exceeding file_recv_max_size limit: {0}'.format( file_recv_max_size", "to look like returns from individual minions. ''' # Verify", "for loading external top data self.tops = salt.loader.tops(self.opts) # Make", "return {'tag': tag, 'data': data} except Exception as exc: log.error('Exception", "module_re in self.opts['client_acl_blacklist'].get('modules', []): # if this is a regular", "import os import re import time import stat import tempfile", "to execute commands from the test module ''' if not", "not in load: log.warning('Authentication failure of type \"eauth\" occurred.') return", "if os.path.isfile(env_cache): log.debug('Clearing {0}fs env cache'.format(backend)) try: os.remove(env_cache) except OSError", "def clean_expired_tokens(opts): ''' Clean expired tokens from the master '''", "and format it to look like returns from individual minions.", "require creating the masterminion every time? mminion = salt.minion.MasterMinion( opts,", "return True def _mine_flush(self, load, skip_verify=False): ''' Allow the minion", "the config. The configuration on the master allows minions to", "False if the token is invalid ''' if 'token' not", "'Authentication failure of type \"eauth\" occurred.' ) return '' except", "have serious implications on the performance of the # publish", "name in self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if", "{user} Published command {fun} with jid {jid}'.format( **load ) )", "load['arg'], 'id': load['id'], 'doc': False, 'conf_file': self.opts['conf_file']} opts.update(self.opts) runner =", "new_job_load = { 'jid': load['jid'], 'tgt_type': load['tgt_type'], 'tgt': load['tgt'], 'user':", "0: log.error('Invalid file pointer: load[loc] < 0') return False if", "it says it is! # We don't want to listen", "False log = logging.getLogger(__name__) # Things to do in lower", "# Normalize Windows paths normpath = load['path'] if ':' in", "!= 'saltutil.find_job': log.warning( 'Authentication failure of type \"token\" occurred.' )", "load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure of type \"user\" '", "cache ' '\"{0}\" does not have a save_load function!'.format( self.opts['ext_job_cache']", "key ''' if 'id' not in load: return False keyapi", "fp_.seek(load['loc']) fp_.write(load['data']) return True def _pillar(self, load): ''' Return the", "{}), token['name']) except Exception as exc: log.error('Exception occurred while '", "in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']])): log.warning('Authentication failure of type", ") for minion in minions: mine = os.path.join( self.opts['cachedir'], 'minions',", "for the minion ''' if any(key not in load for", "self._serve_file = fs_.serve_file self._file_hash = fs_.file_hash self._file_list = fs_.file_list self._file_list_emptydirs", "if 'token' in load: try: token = self.loadauth.get_tok(load['token']) except Exception", "load.get('form', '') == 'full': data = minion if 'jid' in", "not self.opts['file_recv'] or os.path.isabs(load['path']): return False if os.path.isabs(load['path']) or '../'", "token['name']} try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun, **load)", "'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) jid = salt.utils.jid.gen_jid()", "# always write out to the master job cache try:", "if this a compound function else: funs_to_check = load['fun'] for", "'{0}.update_endtime'.format(self.opts['master_job_cache']) if (self.opts.get('job_cache_store_endtime') and fstr in self.mminion.returners): self.mminion.returners[fstr](load['jid'], endtime) fstr", "content' log.warn(message.format(signing_file)) return False with salt.utils.fopen(signing_file, 'r') as fp_: for", "('fun', 'arg', 'id')): return {} perms = set() for match", "self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']])): msg = ('Authentication failure of", "is not available on windows HAS_PWD = False log =", "# are found if not self.opts.get('order_masters'): # Check for no", "a master control function back to the wheel system '''", "allow the minion foo.example.com to execute commands from the test", "# If master side acl defined. if not isinstance(self.opts['mine_get'], dict):", "in acl_users: log.info( 'Preparing the {0} key for local communication'.format(", "After we've ascertained we're not on windows try: user =", "as fp_: fp_.write(self.serial.dumps(mine_data)) except OSError: return False return True def", "for auth_file in filenames: auth_file_path = os.path.join(dirpath, auth_file) if not", "not None: if 'data' in event: self.event.fire_event(event['data'], tagify(event['tag'], base=load['pretag'])) else:", "not available on windows HAS_PWD = False log = logging.getLogger(__name__)", "self.wheel_ = salt.wheel.Wheel(opts) def runner(self, load): ''' Send a master", "+ load.get('loc', 0) > file_recv_max_size: log.error( 'Exceeding file_recv_max_size limit: {0}'.format(", "try: os.remove(token_path) except (IOError, OSError): pass def clean_pub_auth(opts): try: auth_cache", "self.loadauth.get_tok(extra['token']) except Exception as exc: log.error( 'Exception occurred when generating", "tops dict for loading external top data self.tops = salt.loader.tops(self.opts)", "os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1) except OSError: # The master is not", "the results from an external node classifier if one is", "if any(key not in load for key in ('id', 'grains')):", "self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) self.serial =", "load['fun'] for fun in funs_to_check: if re.match(module_re, fun): good =", "'out' in load: ret['out'] = load['out'] self._return(ret) def minion_runner(self, load):", "if any(key not in load for key in ('id', 'path',", "load['path']: # Can overwrite master files!! return False if not", "to the minion ''' mopts = {} file_roots = {}", "for membership in a signing file ''' if not signing_file", "lower layers: # only accept valid minion ids def init_git_pillar(opts):", "'data' in event: self.event.fire_event(event['data'], tagify(event['tag'], base=load['pretag'])) else: self.event.fire_event(event, tagify(event['tag'], base=load['pretag']))", "False keyapi = salt.key.Key(self.opts) keyapi.delete_key(load['id'], preserve_minions=load.get('preserve_minion_cache', False)) return True class", "fdata except Exception: continue return ret def _mine(self, load, skip_verify=False):", "in self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound' pub_load['expr_form'] =", "the functions that can be executed in # the clear:", "try: with salt.utils.fopen(datap, 'rb') as fp_: mine_data = self.serial.load(fp_) if", "this involves preparing the three listeners and the workers needed", "from the test module ''' if not self.__verify_minion_publish(load): return {}", "way to not require creating the masterminion every time? mminion", "if self.opts['auto_accept']: return True if self.check_signing_file(keyid, self.opts.get('autosign_file', None)): return True", "False pillar = salt.pillar.Pillar( self.opts, load['grains'], load['id'], load.get('saltenv', load.get('env')), load.get('ext'),", "it to look like returns from individual minions. ''' #", "if re.match(match, load['id']): # This is the list of funcs/modules!", "minion['jid'] for key, val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])): if key not in", "'return': item} if 'out' in load: ret['out'] = load['out'] self._return(ret)", "data} except Exception as exc: log.error('Exception occurred while ' 'introspecting", "acceptance and rejection ''' def __init__(self, opts): self.opts = opts", "| ('*' in self.opts['external_auth'][extra['eauth']])): log.warning( 'Authentication failure of type \"eauth\"", "AutoKey(object): ''' Implement the methods to run auto key acceptance", "= 'ab' else: mode = 'wb' with salt.utils.fopen(cpath, mode) as", "return '' # to make sure we don't step on", "**load) data['return'] = ret data['success'] = True self.event.fire_event(data, tagify([jid, 'ret'],", "if not load.get('clear', False): if os.path.isfile(datap): with salt.utils.fopen(datap, 'rb') as", "pub_load['expr_form_type'] = 'compound' else: return {} else: pub_load['expr_form'] = load['tgt_type']", "= { 'jid': load['jid'], 'tgt_type': load['tgt_type'], 'tgt': load['tgt'], 'user': load['user'],", "'token' not in load: return False return self.loadauth.get_tok(load['token']) def publish(self,", "def clean_pub_auth(opts): try: auth_cache = os.path.join(opts['cachedir'], 'publish_auth') if not os.path.exists(auth_cache):", "not in self.opts: return False if not isinstance(self.opts['peer'], dict): return", "{0} key for local communication'.format( user ) ) if HAS_PWD:", "salt.runner.RunnerClient(self.opts) return runner_client.async(fun, load.get('kwarg', {}), load.get('username', 'UNKNOWN')) except Exception as", "in self.opts.get('master_tops', {}): continue try: ret.update(self.tops[fun](opts=opts, grains=grains)) except Exception as", "if load['user'] not in self.opts['client_acl']: log.warning( 'Authentication failure of type", "return '' if token['eauth'] not in self.opts['external_auth']: log.warning('Authentication failure of", "''' Clean expired tokens from the master ''' serializer =", "def __init__(self, opts, key): self.opts = opts self.serial = salt.payload.Serial(opts)", "salt.utils.minions.CkMinions(opts) # Create the tops dict for loading external top", "'\"{0}\" does not have a save_load function!'.format( self.opts['ext_job_cache'] ) )", "The user is not ' 'available.\\n'.format( user ) ) return", "f) mtime = os.path.getmtime(stub_file) if mtime < min_time: log.warn('Autosign keyid", "as six try: import pwd HAS_PWD = True except ImportError:", "are required to run as root. ''' users = []", "if self.opts['ext_job_cache']: try: fstr = '{0}.save_load'.format(self.opts['ext_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError:", "mode = 'wb' with salt.utils.fopen(cpath, mode) as fp_: if load['loc']:", ") return '' if not self.loadauth.time_auth(extra): log.warning( 'Authentication failure of", "{} if not salt.utils.verify.valid_id(self.opts, load['id']): return ret match_type = load.get('expr_form',", "out the ext pillar caches, used when the master starts", "if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions',", "os.path.isabs(load['path']) or '../' in load['path']: # Can overwrite master files!!", "True def _file_recv(self, load): ''' Allows minions to send files", "exc: log.error( 'Exception occurred when generating auth token: {0}'.format( exc", "return '' else: if load['user'] in self.key: # User is", "= load['fun'] for fun in funs_to_check: if re.match(module_re, fun): good", "= '{0}.clean_old_jobs'.format(opts['master_job_cache']) if fstr in mminion.returners: mminion.returners[fstr]() def access_keys(opts): '''", "execute all commands. peer: foo.example.com: - test.* This configuration will", "\"token\" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][token['eauth']][token['name']]", "stack trace:\\n', exc_info=True ) # Altering the contents of the", "as exc: log.critical( 'Unable to file_lists cache file {0}: {1}'", "it is enabled in the config. The configuration on the", "salt.utils.verify.valid_id(self.opts, load['id']): return False file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size'] if", "Generate EndTime endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid()) # If the return data", "permissions to run {function}. Please ' 'contact your local administrator", "for job {jid}'.format(**load)) self.event.fire_event(load, load['jid']) # old dup event self.event.fire_event(load,", "= load['tgt_type'] ret = {} ret['jid'] = self.local.cmd_async(**pub_load) ret['minions'] =", "good: # The minion is not who it says it", "dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def mk_token(self, load): ''' Create and return", "if HAS_PWD: for user in pwd.getpwall(): users.append(user.pw_name) for user in", "!= self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication failure of type \"user\" occurred.'", "line.strip() if line.startswith('#'): continue else: if salt.utils.expr_match(keyid, line): return True", "regular command, its a single function if isinstance(load['fun'], str): funs_to_check", "= { 'fun': load['fun'], 'arg': load['arg'], 'tgt': load['tgt'], 'jid': load['jid'],", "self.tops = salt.loader.tops(self.opts) # Make a client self.local = salt.client.get_local_client(mopts=self.opts)", "the destination file exists. salt.utils.atomicfile.atomic_rename(tmpfname, datap) return data def _minion_event(self,", "master. ''' from __future__ import absolute_import # Import python libs", "(IOError, OSError): log.error('Unable to delete pub auth file') def clean_old_jobs(opts):", "0: if fmode.st_uid == uid or fmode.st_gid != gid: return", "pillar data for the minion ''' if any(key not in", "message=msg)) if token['eauth'] not in self.opts['external_auth']: msg = 'Authentication failure", "rend=False) # Make a wheel object self.wheel_ = salt.wheel.Wheel(opts) def", "# Things to do in lower layers: # only accept", "{}) # check blacklist/whitelist good = True # Check if", "= load['tag'] self.event.fire_event(load, tag) return True def _return(self, load): '''", "file objects from the file server interface ''' fs_ =", "== 'root': if load.pop('key') != self.key.get(self.opts.get('user', 'root')): log.warning( 'Authentication failure", "versions and even tiny # additions can have serious implications", "'saltutil.find_job': log.warning( 'Authentication failure of type \"eauth\" occurred.' ) return", "master server, this involves preparing the three listeners and the", "user ) ) if HAS_PWD: if user not in users:", "_return(self, load): ''' Handle the return data sent from the", "Exception as exc: log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun,", "in self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not", "'The specified returner threw a stack trace:\\n', exc_info=True ) #", "{0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) try: fun = load.pop('fun') runner_client", "log.info('Got return from {id} for job {jid}'.format(**load)) self.event.fire_event(load, load['jid']) #", "{0}' .format(opts_dict['git']) ) else: pillargitfs.append( git_pillar.GitPillar( br, loc, opts )", "the LocalClient) # _auth def __init__(self, opts, key): self.opts =", "load['user'] not in self.opts['client_acl']: log.warning( 'Authentication failure of type \"user\"", "False)) # save the load, since we don't have it", "a minion to request revocation of its own key '''", "the key file pass keys[user] = key return keys def", "in self.opts['external_auth'][load['eauth']])): msg = ('Authentication failure of type \"eauth\" occurred", "''' Return the master options to the minion ''' mopts", "< 0') return False if len(load['data']) + load.get('loc', 0) >", "'svn'): if backend in opts['fileserver_backend']: env_cache = os.path.join( opts['cachedir'], '{0}fs'.format(backend),", "if 'grains' in load['opts']: grains = load['opts']['grains'] for fun in", "write access to the owner only. # Write access is", "re.match(match, load['id']): # This is the list of funcs/modules! if", "to clear env cache file {0}: {1}' .format(env_cache, exc) )", "for minion in minions: mine = os.path.join( self.opts['cachedir'], 'minions', minion,", "in load: for event in load['events']: self.event.fire_event(event, event['tag']) # old", "to the master, files are sent to the master file", "allowed salt functions The config will look like this: peer:", "for saltenv in envs: if saltenv not in file_roots: file_roots[saltenv]", ") return '' else: if load['user'] in self.key: # User", "short circuit if no minions # are found if not", "run as root and can therefore not # chown the", "opts self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) self.serial", "fp_: fp_.write( self.serial.dumps( {'grains': load['grains'], 'pillar': data}) ) # On", "os.close(tmpfh) with salt.utils.fopen(tmpfname, 'w+b') as fp_: fp_.write( self.serial.dumps( {'grains': load['grains'],", "the master starts ''' pillargitfs = [] for opts_dict in", "= '{0}.update_endtime'.format(self.opts['master_job_cache']) if (self.opts.get('job_cache_store_endtime') and fstr in self.mminion.returners): self.mminion.returners[fstr](load['jid'], endtime)", "{0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if load['eauth'] not in", "try: import git except ImportError: return pillargitfs parts = opts_dict['git'].strip().split()", "'UNKNOWN')) except Exception as exc: log.error('Exception occurred while ' 'introspecting", "command, its a single function if isinstance(load['fun'], str): funs_to_check =", "with salt.utils.fopen(cpath, mode) as fp_: if load['loc']: fp_.seek(load['loc']) fp_.write(load['data']) return", "log.error(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if not token: msg = 'Authentication", "salt.state import salt.runner import salt.auth import salt.wheel import salt.minion import", "cache self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False) self.__setup_fileserver() def __setup_fileserver(self):", "not being run as root and can therefore not #", "opts['cachedir'], '.{0}_key'.format(user) ) if os.path.exists(keyfile): log.debug('Removing stale keyfile: {0}'.format(keyfile)) os.unlink(keyfile)", "will look like this: peer: .*: - .* This configuration", "True # Check if the user is blacklisted for user_re", "'tgt_type' in load: if load['tgt_type'].startswith('node'): if load['tgt'] in self.opts['nodegroups']: pub_load['tgt']", "except Exception: continue return ret def _mine(self, load, skip_verify=False): '''", "tagify(jid, prefix='wheel') data = {'fun': \"wheel.{0}\".format(fun), 'jid': jid, 'tag': tag,", "import salt.auth import salt.wheel import salt.minion import salt.search import salt.key", "'tmo' in load: try: pub_load['timeout'] = int(load['tmo']) except ValueError: msg", "fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async( fun, load.get('kwarg',", "load for key in ('id', 'grains')): return False pillar =", "not who it says it is!'.format( load['id'] ) ) return", "token: {0}'.format( exc) log.error(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if not token:", "# Check for external auth calls if extra.get('token', False): #", "returner used for the external job cache ' '\"{0}\" does", "no minions # are found if not self.opts.get('order_masters'): # Check", "load, skip_verify=False): ''' Allow the minion to delete all of", "'{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The specified returner used", "good: return False return True def _master_opts(self, load): ''' Return", "args=exc.args, message=str(exc))) except Exception as exc: log.error( 'Exception occurred in", "return None # if we have a load, save it", "for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) jid", "for backend in ('git', 'hg', 'svn'): if backend in opts['fileserver_backend']:", "Execute a runner from a minion, return the runner's function", "log.debug('Clearing {0}fs env cache'.format(backend)) try: os.remove(env_cache) except OSError as exc:", "as fp_: if not load['id'] == fp_.read(): return {} return", "OSError): log.error('Unable to delete pub auth file') def clean_old_jobs(opts): '''", "failure of type \"token\" occurred. \\ Token does not verify", "message=msg)) try: fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async(fun,", "'jid': jid, 'tag': tag, 'user': load.get('username', 'UNKNOWN')} try: self.event.fire_event(data, tagify([jid,", "if not self.loadauth.time_auth(load): msg = ('Authentication failure of type \"eauth\"", "runner system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def wheel(self,", "as exc: # If anything happens in the top generation,", "Handle the return data sent from the minions ''' #", "pwnam[2] gid = pwnam[3] groups = salt.utils.get_gid_list(user, include_default=False) except KeyError:", "''' if not skip_verify: if 'id' not in load: log.error('Received", "specified ''' if not skip_verify: if 'id' not in load:", "self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])", "runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async(fun, load.get('kwarg', {}), load.get('username', 'UNKNOWN')) except", "return True def _return(self, load): ''' Handle the return data", "passed, check it try: token = self.loadauth.get_tok(extra['token']) except Exception as", "None) ) def check_autosign(self, keyid): ''' Checks if the specified", "for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'): cache_file = os.path.join(file_lists_dir, file_lists_cache) try:", "HAS_PWD: if user not in users: try: user = pwd.getpwnam(user).pw_name", "return True def _master_opts(self, load): ''' Return the master options", "ret.update(self.tops[fun](opts=opts, grains=grains)) except Exception as exc: # If anything happens", "token_data = serializer.loads(token_file.read()) if 'expire' not in token_data or token_data.get('expire',", "''' if not skip_verify: if 'id' not in load or", "return dict(error=dict(name='EauthAuthenticationError', message=msg)) if load['eauth'] not in self.opts['external_auth']: # The", "this stuff, we can probably do what you want to", "All wheel ops pass through eauth if 'token' in load:", "(dirpath, dirnames, filenames) in os.walk(opts['token_dir']): for token in filenames: token_path", "'Exception occurred in the wheel system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__,", "{0}: {1}' .format(cache_file, exc) ) def clean_expired_tokens(opts): ''' Clean expired", "= file_roots if load.get('env_only'): return mopts mopts['renderer'] = self.opts['renderer'] mopts['failhard']", "of type \"eauth\" occurred.') return '' if load['eauth'] not in", "load['grains'], 'pillar': data}) ) # On Windows, os.rename will fail", "function cleanly if load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure of", "datap = os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try: with salt.utils.fopen(datap, 'rb')", "salt.search import salt.key import salt.fileserver import salt.utils.atomicfile import salt.utils.event import", "auth_cache = os.path.join(opts['cachedir'], 'publish_auth') if not os.path.exists(auth_cache): return else: for", "access_keys(opts): ''' A key needs to be placed in the", "test module ''' if not self.__verify_minion_publish(load): return {} # Set", "fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache']) if fstr in mminion.returners: mminion.returners[fstr]() def access_keys(opts):", "# Prepare the runner object opts = {'fun': load['fun'], 'arg':", "'Published command {fun} with jid {jid}'.format( **load ) ) log.debug('Published", "revoke_auth(self, load): ''' Allow a minion to request revocation of", "of type \"eauth\" occurred.') return '' if not self.loadauth.time_auth(load): log.warning('Authentication", "returner threw a stack trace:\\n', exc_info=True ) # always write", "return '' load['user'] = token['name'] log.debug('Minion tokenized user = \"{0}\"'.format(load['user']))", "'' if not self.loadauth.time_auth(extra): log.warning( 'Authentication failure of type \"eauth\"", "os.path.getmtime(auth_file_path) - time.time() > opts['keep_jobs']: os.remove(auth_file_path) except (IOError, OSError): log.error('Unable", "returning a standalone job, request a jobid prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])", "its own mine contents ''' if not skip_verify and 'id'", "line): return True return False def check_autosign_dir(self, keyid): ''' Check", "value: {0}'.format( load['tmo']) log.warn(msg) return {} if 'timeout' in load:", "ret[minion['id']] = minion['return'] if 'jid' in minion: ret['__jid__'] = minion['jid']", "= load.pop('fun') tag = tagify(jid, prefix='wheel') data = {'fun': \"wheel.{0}\".format(fun),", "can only be used by the LocalClient. ''' extra =", "!= gid: return True elif self.opts.get('permissive_pki_access', False) \\ and fmode.st_gid", "for key in ('fun', 'arg', 'id')): return {} perms =", "# If anything happens in the top generation, log it", "if 'timeout' in load: try: pub_load['timeout'] = int(load['timeout']) except ValueError:", "''' Allow a minion to request revocation of its own", "dict(error=dict(name='EauthAuthenticationError', message=msg)) if load['eauth'] not in self.opts['external_auth']: # The eauth", "return False datap = os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try: with", "''' Return the results from an external node classifier if", "= os.path.join(auth_cache, load['jid']) with salt.utils.fopen(jid_fn, 'r') as fp_: if not", "''' if not self.__verify_minion_publish(load): return {} # Set up the", "{'tag': tag, 'data': data} except Exception as exc: log.error('Exception occurred", "pub_load['id'] = extra['id'] if 'tgt_type' in load: pub_load['tgt_type'] = load['tgt_type']", "from the minion and fire it on the master event", "load['fun'], 'arg': load['arg'], 'id': load['id'], 'doc': False, 'conf_file': self.opts['conf_file']} opts.update(self.opts)", "way that won't have a negative impact. pub_load = {", "os.error: pass if os.path.isfile(cpath) and load['loc'] != 0: mode =", "load['id'], 'files', normpath) cdir = os.path.dirname(cpath) if not os.path.isdir(cdir): try:", "in lower layers: # only accept valid minion ids def", "False # If the command will make a recursive publish", "return data from a specific jid, only allowed if the", "only allowed if the requesting minion also initialted the execution.", ") return '' # Retrieve the minions list minions =", "if not self.opts.get('order_masters'): # Check for no minions if not", "threw a stack trace:\\n', exc_info=True ) # Altering the contents", ") ) if HAS_PWD: if user not in users: try:", "os.umask(191) with salt.utils.fopen(keyfile, 'w+') as fp_: fp_.write(key) os.umask(cumask) # 600", "'{0}fs'.format(backend) ) try: file_lists_caches = os.listdir(file_lists_dir) except OSError: continue for", "load['out'] self._return(ret) def minion_runner(self, load): ''' Execute a runner from", "del good # Check for external auth calls if extra.get('token',", "'ret', 'id')): return False # If the command will make", "'jid' in minion: ret['__jid__'] = minion['jid'] data['ret'] = data.pop('return') ret[minion['id']]", "a wheel object self.wheel_ = salt.wheel.Wheel(opts) def runner(self, load): '''", "type \"token\" occurred for ' 'user {0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError',", "''' pillargitfs = [] for opts_dict in [x for x", "interface ''' if 'id' not in load: return False if", "opts['cachedir'], 'file_lists', '{0}fs'.format(backend) ) try: file_lists_caches = os.listdir(file_lists_dir) except OSError:", "stack trace:\\n', exc_info=True ) # always write out to the", "if extra['eauth'] not in self.opts['external_auth']: # The eauth system is", "while ' 'introspecting {0}: {1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc)))", "salt.payload.Serial(opts) self.key = key # Create the event manager self.event", "return False class RemoteFuncs(object): ''' Funcitons made available to minions,", "test.* This configuration will only allow the minion foo.example.com to", "return False # Check the permissions for this minion perms", "the minion foo.example.com to execute commands from the test module", "act as root if load.get('key', 'invalid') == self.key.get('root'): load.pop('key') elif", "token: msg = 'Authentication failure of type \"token\" occurred.' log.warning(msg)", "log.warning( 'Authentication failure of type \"eauth\" occurred.' ) return ''", "if load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure of type \"user\"", "keyid) if not os.path.exists(stub_file): return False os.remove(stub_file) return True def", "\"eauth\" occurred.') return '' if not self.loadauth.time_auth(load): log.warning('Authentication failure of", "keyid for membership in a autosign directory. ''' autosign_dir =", "fs_.dir_list self._symlink_list = fs_.symlink_list self._file_envs = fs_.envs def __verify_minion_publish(self, load):", "command will make a recursive publish don't run if re.match('publish.*',", "opts['cachedir'], '{0}fs'.format(backend), 'envs.p' ) if os.path.isfile(env_cache): log.debug('Clearing {0}fs env cache'.format(backend))", "request a jobid prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False))", "load.pop('key') != self.key.get(load['user']): log.warning( 'Authentication failure of type \"user\" occurred.'", "\"eauth\" occurred.') return '' try: name = self.loadauth.load_name(load) if not", "args=exc.args, message=str(exc))) def mk_token(self, load): ''' Create and return an", "is a regular command, its a single function if isinstance(load['fun'],", "= [load['fun']] # if this a compound function else: funs_to_check", "\"user\" occurred.' ) return '' elif load['user'] == self.opts.get('user', 'root'):", "not salt.utils.verify.valid_id(self.opts, load['id']): return ret match_type = load.get('expr_form', 'glob') if", "self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The specified returner used for", "True if self.check_signing_file(keyid, self.opts.get('autosign_file', None)): return True if self.check_autosign_dir(keyid): return", "= self.loadauth.load_name(load) if not (name in self.opts['external_auth'][load['eauth']]) | ('*' in", "ret def revoke_auth(self, load): ''' Allow a minion to request", ") ) log.debug('Published command details {0}'.format(pub_load)) return {'ret': { 'jid':", "[['cat', '/proc/cpuinfo'], [], ['foo']] load['fun'] = load['fun'].split(',') arg_ = []", "they get recreated for backend in ('git', 'hg', 'svn'): if", "from the file server interface ''' fs_ = salt.fileserver.Fileserver(self.opts) self._serve_file", "os.walk(auth_cache): for auth_file in filenames: auth_file_path = os.path.join(dirpath, auth_file) if", "load['id']): return False file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size'] if 'loc'", "# old dup event if load.get('pretag') is not None: if", "event manager self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False)", "''' # Verify that the load is valid if 'peer'", "Exception as exc: log.error( 'Exception occurred when generating auth token:", "the CLI will function cleanly if load['fun'] != 'saltutil.find_job': log.warning(", "check key and check perms if load.pop('key') != self.key[load['user']]: log.warning(", "SaltMasterError('No fileserver backends available') fileserver.update() except Exception as exc: log.error(", "Normalize Windows paths normpath = load['path'] if ':' in normpath:", ") data['success'] = False self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag':", "'jid': load['jid'], 'tgt_type': load['tgt_type'], 'tgt': load['tgt'], 'user': load['user'], 'fun': load['fun'],", "negative impact. pub_load = { 'fun': load['fun'], 'arg': load['arg'], 'tgt':", "publish don't run if re.match('publish.*', load['fun']): return False # Check", "Create the tops dict for loading external top data self.tops", "{0} failed with error {1} for minion ' '{2}'.format( fun,", "args=exc.args, message=str(exc))) if 'eauth' not in load: msg = ('Authentication", "load['id'], } if 'tmo' in load: try: pub_load['timeout'] = int(load['tmo'])", "is invalid ''' if 'token' not in load: return False", "Token does not verify against eauth provider: {0}').format( self.opts['external_auth']) return", "log.debug('Published command details {0}'.format(pub_load)) return {'ret': { 'jid': load['jid'], 'minions':", "write to the file return False # check group flags", "in load for key in ('fun', 'arg', 'id')): return {}", "for root, dirs, filenames in os.walk(autosign_dir): for f in filenames:", "skip_verify=False): ''' Allow the minion to delete all of its", "'id' not in load: return False if self.opts.get('minion_data_cache', False) or", "if isinstance(load['fun'], str): funs_to_check = [load['fun']] # if this a", "'grains')): return False pillar = salt.pillar.Pillar( self.opts, load['grains'], load['id'], load.get('saltenv',", "self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound' pub_load['expr_form'] = load['tgt_type'] else: return {}", "jid, 'tag': tag, 'user': token['name']} try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))", "ops pass through eauth if 'token' in load: try: token", "= os.path.join(self.opts['pki_dir'], 'minions_autosign') # cleanup expired files expire_minutes = self.opts.get('autosign_expire_minutes',", "ValueError: msg = 'Failed to parse timeout value: {0}'.format( load['timeout'])", "# Import python libs import fnmatch import logging import os", "good = self.ckminions.runner_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'],", "cleanup expired files expire_minutes = self.opts.get('autosign_expire_minutes', 10) if expire_minutes >", "this. os.chmod(keyfile, 0o600) if HAS_PWD: try: os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1) except", "happens in the top generation, log it and move on", "a compound function else: funs_to_check = load['fun'] for fun in", "time.time(): try: os.remove(token_path) except (IOError, OSError): pass def clean_pub_auth(opts): try:", "opts def check_permissions(self, filename): ''' Check if the specified filename", "when generating auth token: {0}'.format( exc) log.error(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg))", "always write out to the master job cache try: fstr", "if 'jid' in minion: ret['__jid__'] = minion['jid'] for key, val", "os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache,", "log.info( 'Preparing the {0} key for local communication'.format( user )", "{} data = pillar.compile_pillar(pillar_dirs=pillar_dirs) if self.opts.get('minion_data_cache', False): cdir = os.path.join(self.opts['cachedir'],", "def check_autosign_dir(self, keyid): ''' Check a keyid for membership in", "self.__setup_fileserver() def __setup_fileserver(self): ''' Set the local file objects from", "''' Return the name associated with a token or False", "with <NAME> before you even think about # touching this", "Check if the specified filename has correct permissions ''' if", "\"eauth\" occurred.') return '' if load['eauth'] not in self.opts['external_auth']: #", "payload pub_load = { 'fun': load['fun'], 'arg': load['arg'], 'expr_form': load.get('tgt_type',", "os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, str(ret['jid'])) with salt.utils.fopen(jid_fn, 'w+') as fp_:", "def get_token(self, load): ''' Return the name associated with a", "master ''' serializer = salt.payload.Serial(opts) for (dirpath, dirnames, filenames) in", "single function if isinstance(load['fun'], str): funs_to_check = [load['fun']] # if", "normpath) cdir = os.path.dirname(cpath) if not os.path.isdir(cdir): try: os.makedirs(cdir) except", "'file_lists', '{0}fs'.format(backend) ) try: file_lists_caches = os.listdir(file_lists_dir) except OSError: continue", "# publish (The publish from the LocalClient) # _auth def", "with salt.utils.fopen(signing_file, 'r') as fp_: for line in fp_: line", "log.warn(message.format(signing_file)) return False with salt.utils.fopen(signing_file, 'r') as fp_: for line", "= self.mminion.returners[prep_fstr](nocache=load.get('nocache', False)) # save the load, since we don't", "0) < time.time(): try: os.remove(token_path) except (IOError, OSError): pass def", "os.path.join(self.opts['pki_dir'], 'minions_autosign') # cleanup expired files expire_minutes = self.opts.get('autosign_expire_minutes', 10)", "return dict(error=dict(name='EauthAuthenticationError', message=msg)) jid = salt.utils.jid.gen_jid() fun = load.pop('fun') tag", "return {} perms = set() for match in self.opts['mine_get']: if", "self.opts: # If master side acl defined. if not isinstance(self.opts['mine_get'],", "Clean out the old jobs from the job cache '''", "fail if the destination file exists. salt.utils.atomicfile.atomic_rename(tmpfname, datap) return data", "sends out publications to the minions, it can only be", "''' fs_ = salt.fileserver.Fileserver(self.opts) self._serve_file = fs_.serve_file self._file_hash = fs_.file_hash", "load['tag'] self.event.fire_event(load, tag) return True def _return(self, load): ''' Handle", "side acl defined. if not isinstance(self.opts['mine_get'], dict): return {} perms", "dict(error=dict(name='TokenAuthenticationError', message=msg)) if token['eauth'] not in self.opts['external_auth']: msg = 'Authentication", "get_token(self, load): ''' Return the name associated with a token", "available to minions, this class includes the raw routines post", "minion to access the external job cache self.mminion = salt.minion.MasterMinion(", "update'.format(exc), exc_info_on_loglevel=logging.DEBUG ) class AutoKey(object): ''' Implement the methods to", "load['tgt'], load.get('tgt_type', 'glob'), publish_validate=True) if not good: return False return", "files are sent to the master file cache ''' if", "pointer: load[loc] < 0') return False if len(load['data']) + load.get('loc',", "{} if not isinstance(self.opts['peer_run'], dict): return {} if any(key not", "{} # Prepare the runner object opts = {'fun': load['fun'],", "minion if 'jid' in minion: ret['__jid__'] = minion['jid'] data['ret'] =", "load.get('kwarg', {}), token['name']) except Exception as exc: log.error('Exception occurred while", "= ret data['success'] = True self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return", "peer: .*: - .* This configuration will enable all minions", "= 'Exception occurred in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__,", "import salt.search import salt.key import salt.fileserver import salt.utils.atomicfile import salt.utils.event", "False break if good is False: log.error( '{user} does not", "self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) self.serial = salt.payload.Serial(opts) self.ckminions = salt.utils.minions.CkMinions(opts)", "as fp_: if load['loc']: fp_.seek(load['loc']) fp_.write(load['data']) return True def _pillar(self,", "log.critical( 'The specified returner used for the master job cache", "if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, str(ret['jid'])) with salt.utils.fopen(jid_fn,", "''' Send a master control function back to the runner", "if user not in users: try: user = pwd.getpwnam(user).pw_name except", "return mopts mopts['renderer'] = self.opts['renderer'] mopts['failhard'] = self.opts['failhard'] mopts['state_top'] =", "pub_load['ret_config'] = load['kwargs'].get('ret_config') if 'metadata' in load['kwargs']: pub_load['metadata'] = load['kwargs'].get('metadata')", "tempfile.mkstemp(dir=cdir) os.close(tmpfh) with salt.utils.fopen(tmpfname, 'w+b') as fp_: fp_.write( self.serial.dumps( {'grains':", "# # In short, check with <NAME> before you even", "= 'wb' with salt.utils.fopen(cpath, mode) as fp_: if load['loc']: fp_.seek(load['loc'])", "control function back to the wheel system ''' # All", "if 'to' in load: pub_load['to'] = load['to'] if 'kwargs' in", "every time? mminion = salt.minion.MasterMinion( opts, states=False, rend=False, ) #", "in file_roots: file_roots[saltenv] = [] mopts['file_roots'] = file_roots if load.get('env_only'):", "self.opts = opts self.serial = salt.payload.Serial(opts) self.key = key #", "return ret def _mine(self, load, skip_verify=False): ''' Return the mine", "in minion: ret['__jid__'] = minion['jid'] for key, val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])):", "of type \"token\" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if token['eauth']", "runner_client.async( fun, load.get('kwarg', {}), token['name']) except Exception as exc: log.error('Exception", "('*' in self.opts['external_auth'][load['eauth']])): msg = ('Authentication failure of type \"eauth\"", "'Authentication failure of type \"eauth\" occurred.' ) return '' load['user']", "normpath = normpath.replace('\\\\', '/') normpath = os.path.normpath(normpath) cpath = os.path.join(", "external job cache self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False) self.__setup_fileserver()", "fileserver backends, requires that a built fileserver object be passed", "salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(mine_data)) except OSError: return False return", "if 'loc' in load and load['loc'] < 0: log.error('Invalid file", "file_recv_max_size: log.error( 'Exceeding file_recv_max_size limit: {0}'.format( file_recv_max_size ) ) return", "self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks'] return mopts def _ext_nodes(self, load, skip_verify=False):", "to send files to the master, files are sent to", "# The minion is returning a standalone job, request a", "name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][load['eauth']]['*'], load['fun']) if not good: msg", "load['tgt'], match_type, greedy=False ) for minion in minions: mine =", "necessary since on subsequent runs, if the file # exists,", "os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try: os.remove(datap) except OSError: return False", "not self.loadauth.time_auth(load): log.warning('Authentication failure of type \"eauth\" occurred.') return ''", "log.warning('Authentication failure of type \"eauth\" occurred.') return '' return self.loadauth.mk_token(load)", "minions ''' # Generate EndTime endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid()) # If", "Token could not be retrieved.') return '' if token['eauth'] not", "think about # touching this stuff, we can probably do", "''' Checks if the specified keyid should automatically be signed.", "fmode.st_mode: # don't allow others to write to the file", "objects from the file server interface ''' fs_ = salt.fileserver.Fileserver(self.opts)", "self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']])): log.warning('Authentication failure of type \"eauth\"", "to delete pub auth file') def clean_old_jobs(opts): ''' Clean out", "can therefore not # chown the key file pass keys[user]", "[x for x in opts.get('ext_pillar', [])]: if 'git' in opts_dict:", "return dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][load['eauth']][name] if name in", "is blacklisted for user_re in self.opts['client_acl_blacklist'].get('users', []): if re.match(user_re, load['user']):", "= load['kwargs'].get('metadata') if 'user' in load: log.info( 'User {user} Published", "# if this is a regular command, its a single", "the workers needed by the master. ''' from __future__ import", "not good: # The minion is not who it says", "with salt.utils.fopen(jid_fn, 'r') as fp_: if not load['id'] == fp_.read():", "' '{2}'.format( fun, exc, load['id'] ) ) return ret def", "== fp_.read(): return {} return self.local.get_cache_returns(load['jid']) def minion_pub(self, load): '''", "salt.utils.gzip_util import salt.utils.jid from salt.pillar import git_pillar from salt.utils.event import", "in load['events']: self.event.fire_event(event, event['tag']) # old dup event if load.get('pretag')", "if not ((name in self.opts['external_auth'][extra['eauth']]) | ('*' in self.opts['external_auth'][extra['eauth']])): log.warning(", "'enc': 'clear', 'load': { 'jid': None, 'minions': minions } }", "you even think about # touching this stuff, we can", "token is invalid ''' if 'token' not in load: return", "self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][load['eauth']]['*'], load['fun']) if not", "is invalid, just ignore it if any(key not in load", "system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def mk_token(self, load):", "\"user\" occurred.' ) return '' else: if load['user'] in self.key:", "'data.p') tmpfh, tmpfname = tempfile.mkstemp(dir=cdir) os.close(tmpfh) with salt.utils.fopen(tmpfname, 'w+b') as", "if one is specified ''' if not skip_verify: if 'id'", "'*.p'): cache_file = os.path.join(file_lists_dir, file_lists_cache) try: os.remove(cache_file) except OSError as", "exc: log.critical( 'Unable to clear env cache file {0}: {1}'", "data}) ) # On Windows, os.rename will fail if the", "minion restrictions so that the minion publication will only work", "# if this a compound function else: funs_to_check = load['fun']", "fun in funs_to_check: if re.match(module_re, fun): good = False break", "load['jid']) new_job_load = { 'jid': load['jid'], 'tgt_type': load['tgt_type'], 'tgt': load['tgt'],", "{} envs = self._file_envs() for saltenv in envs: if saltenv", "occurred. \\ Authentication type of {0} not present.').format(token['eauth']) return ''", "out publications to the minions, it can only be used", "it and move on log.error( 'Top function {0} failed with", "the master job cache try: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load)", "= self.serial.load(fp_) if isinstance(mine_data, dict): if mine_data.pop(load['fun'], False): with salt.utils.fopen(datap,", "token) with salt.utils.fopen(token_path) as token_file: token_data = serializer.loads(token_file.read()) if 'expire'", "is authorised, check key and check perms if load.pop('key') !=", "initialted the execution. ''' if not skip_verify and any(key not", "= serializer.loads(token_file.read()) if 'expire' not in token_data or token_data.get('expire', 0)", "-*- ''' This module contains all of the routines needed", "extra['id'] if 'tgt_type' in load: pub_load['tgt_type'] = load['tgt_type'] if 'to'", "self.local = salt.client.get_local_client(mopts=self.opts) # Create the master minion to access", "raise SaltMasterError('No fileserver backends available') fileserver.update() except Exception as exc:", "False # check group flags if self.opts.get('permissive_pki_access', False) and stat.S_IWGRP", "Verify that the passed information authorized a minion to execute", "to the runner system ''' if 'token' in load: try:", "return data is invalid, just ignore it if any(key not", "this a compound function else: funs_to_check = load['fun'] for fun", "is the list of funcs/modules! if isinstance(self.opts['peer_run'][match], list): perms.update(self.opts['peer_run'][match]) good", "self.event.fire_event({'minions': minions}, load['jid']) new_job_load = { 'jid': load['jid'], 'tgt_type': load['tgt_type'],", "{})) pillar_dirs = {} data = pillar.compile_pillar(pillar_dirs=pillar_dirs) if self.opts.get('minion_data_cache', False):", "windows try: user = self.opts['user'] pwnam = pwd.getpwnam(user) uid =", "pub_load['to'] = load['to'] if 'kwargs' in load: if 'ret_config' in", "being run as root and can therefore not # chown", "''' def __init__(self, opts): self.opts = opts def check_permissions(self, filename):", "return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts)", "requires that a built fileserver object be passed in '''", "in ('fun', 'arg', 'tgt', 'ret', 'id')): return False # If", "ids def init_git_pillar(opts): ''' Clear out the ext pillar caches,", "= salt.minion.MasterMinion( self.opts, states=False, rend=False) self.__setup_fileserver() def __setup_fileserver(self): ''' Set", "load['fun'], 'arg': load['arg'], 'expr_form': load.get('tgt_type', 'glob'), 'tgt': load['tgt'], 'ret': load['ret'],", "log.warn('Autosign keyid expired {0}'.format(stub_file)) os.remove(stub_file) stub_file = os.path.join(autosign_dir, keyid) if", "the local file objects from the file server interface '''", "own mine ''' if 'id' not in load or 'fun'", "= os.path.join(dirpath, token) with salt.utils.fopen(token_path) as token_file: token_data = serializer.loads(token_file.read())", "load, skip_verify=False): ''' Request the return data from a specific", "except ValueError: msg = 'Failed to parse timeout value: {0}'.format(", "= salt.utils.minions.CkMinions(opts) # Make an Auth object self.loadauth = salt.auth.LoadAuth(opts)", "set() for match in self.opts['peer_run']: if re.match(match, load['id']): # This", "not load.get('clear', False): if os.path.isfile(datap): with salt.utils.fopen(datap, 'rb') as fp_:", "[] mopts['file_roots'] = file_roots if load.get('env_only'): return mopts mopts['renderer'] =", "# Check if the user is blacklisted for user_re in", "1024*1024 * self.opts['file_recv_max_size'] if 'loc' in load and load['loc'] <", "'glob') if match_type.lower() == 'pillar': match_type = 'pillar_exact' if match_type.lower()", ") # On Windows, os.rename will fail if the destination", "occurred for ' 'user {0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) try:", "cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): os.makedirs(cdir) datap", "'.{0}_key'.format(user) ) if os.path.exists(keyfile): log.debug('Removing stale keyfile: {0}'.format(keyfile)) os.unlink(keyfile) key", "return keys def fileserver_update(fileserver): ''' Update the fileserver backends, requires", "object self.wheel_ = salt.wheel.Wheel(opts) def runner(self, load): ''' Send a", "salt.utils.fopen(signing_file, 'r') as fp_: for line in fp_: line =", "Checks if the specified keyid should automatically be rejected. '''", "else: log.warning( 'Authentication failure of type \"user\" occurred.' ) return", "is not enabled, fail log.warning('Authentication failure of type \"eauth\" occurred.')", "load['user'] else: log.info( 'Published command {fun} with jid {jid}'.format( **load", "if os.path.isfile(cpath) and load['loc'] != 0: mode = 'ab' else:", "'fun': load['fun'], 'arg': load['arg'], 'minions': minions, } # Announce the", "load['tgt_type'] pub_load['raw'] = True ret = {} for minion in", "root and can therefore not # chown the key file", ") else: pillargitfs.append( git_pillar.GitPillar( br, loc, opts ) ) return", "= os.path.normpath(normpath) cpath = os.path.join( self.opts['cachedir'], 'minions', load['id'], 'files', normpath)", "the minion ''' if any(key not in load for key", "= 'Authentication failure of type \"token\" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError',", "fun, exc.__class__.__name__, exc, ) data['success'] = False self.event.fire_event(data, tagify([jid, 'ret'],", "and stat.S_IWGRP & fmode.st_mode: return True elif stat.S_IWGRP & fmode.st_mode:", "= os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn =", "if we have a load, save it if 'load' in", "if 'id' not in load: return False if 'events' not", "minion access to the master ''' def __init__(self, opts): self.opts", "if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, load['jid']) with salt.utils.fopen(jid_fn,", "occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][token['eauth']][token['name']] if", "dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def wheel(self, load): ''' Send a master", "load.pop('key') != self.key[salt.utils.get_user()]: log.warning( 'Authentication failure of type \"other\" occurred.'", "def _mine_flush(self, load, skip_verify=False): ''' Allow the minion to delete", "else's toes del good # Check for external auth calls", "elif load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication failure of type", "'user {0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) jid = salt.utils.jid.gen_jid() fun", "this: peer: .*: - .* This configuration will enable all", "'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: name =", "self.opts['master_job_cache'] ) ) except Exception: log.critical( 'The specified returner threw", "'Exception occurred when generating auth token: {0}'.format( exc ) )", "mminion = salt.minion.MasterMinion( opts, states=False, rend=False, ) # If the", "the list of funcs/modules! if isinstance(self.opts['peer_run'][match], list): perms.update(self.opts['peer_run'][match]) good =", "''' Verify that the passed information authorized a minion to", "mopts['state_events'] = self.opts['state_events'] mopts['state_aggregate'] = self.opts['state_aggregate'] mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks']", "# only accept valid minion ids def init_git_pillar(opts): ''' Clear", "salt.utils.fopen(datap, 'rb') as fp_: mine_data = self.serial.load(fp_) if isinstance(mine_data, dict):", "return {} if not salt.utils.verify.valid_id(self.opts, load['id']): return {} # Evaluate", "True def _return(self, load): ''' Handle the return data sent", "ret['minions'] = self.ckminions.check_minions( load['tgt'], pub_load['expr_form']) auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth')", "' 'introspecting {0}: {1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) if", "pwd.getpwnam(user).pw_uid, -1) except OSError: # The master is not being", "name = self.loadauth.load_name(extra) if not ((name in self.opts['external_auth'][extra['eauth']]) | ('*'", "get recreated for backend in ('git', 'hg', 'svn'): if backend", "load['arg'], 'tgt': load['tgt'], 'jid': load['jid'], 'ret': load['ret'], } if 'id'", "able to ' 'serve files to minions' ) raise SaltMasterError('No", "salt.utils.is_windows(): return True # After we've ascertained we're not on", "in ('return', 'jid', 'id')): return False if load['jid'] == 'req':", "load): ''' Allow a minion to request revocation of its", "# This is the list of funcs/modules! if isinstance(self.opts['peer_run'][match], list):", "> file_recv_max_size: log.error( 'Exceeding file_recv_max_size limit: {0}'.format( file_recv_max_size ) )", "load): ''' This method sends out publications to the minions,", "the raw routines post validation that make up the minion", "not salt.utils.verify.valid_id(self.opts, load['id']): return {} # Evaluate all configured master_tops", "data ''' if 'peer_run' not in self.opts: return {} if", ") return '' try: name = self.loadauth.load_name(extra) if not ((name", "'mine_get' in self.opts: # If master side acl defined. if", "'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.runner_check(", "if not minions: return { 'enc': 'clear', 'load': { 'jid':", "re.match(perm, load['fun']): good = True if not good: # The", "perms, load['fun'], load['tgt'], load.get('tgt_type', 'glob'), publish_validate=True) if not good: return", "{} acl_users = set(opts['client_acl'].keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.get_user()) if HAS_PWD:", "continue else: if salt.utils.expr_match(keyid, line): return True return False def", "= 'pillar_exact' if match_type.lower() == 'compound': match_type = 'compound_pillar_exact' checker", "as exc: msg = 'Exception occurred when generating auth token:", "'The specified returner used for the external job cache '", "'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, str(ret['jid'])) with", "= self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound' pub_load['expr_form'] = load['tgt_type'] else: return", "use only from the local system ''' # The ClearFuncs", "groups for user {0}. The user is not ' 'available.\\n'.format(", "is! # We don't want to listen to it! log.warn(", "'glob')) if not good: # Accept find_job so the CLI", "of funcs/modules! if isinstance(self.opts['peer_run'][match], list): perms.update(self.opts['peer_run'][match]) good = False for", "not be retrieved.') return '' if token['eauth'] not in self.opts['external_auth']:", "cdir = os.path.dirname(cpath) if not os.path.isdir(cdir): try: os.makedirs(cdir) except os.error:", "'Failed to parse timeout value: {0}'.format( load['timeout']) log.warn(msg) return {}", "that can be executed in # the clear: # publish", "not skip_verify: if any(key not in load for key in", "log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][load['eauth']][name] if name", "fileservers loaded, the master will not be able to '", "exc: log.error( 'Exception occurred in the wheel system: {0}'.format(exc) )", "load['user'] in self.key: # User is authorised, check key and", "load, skip_verify=False): ''' Gathers the data from the specified minions'", "log.warning('Authentication failure of type \"eauth\" occurred.') return '' if not", "about # touching this stuff, we can probably do what", "log.error('Received call for external nodes without an id') return {}", "to minions, this class includes the raw routines post validation", "= 1024*1024 * self.opts['file_recv_max_size'] if 'loc' in load and load['loc']", "in fnmatch.filter(file_lists_caches, '*.p'): cache_file = os.path.join(file_lists_dir, file_lists_cache) try: os.remove(cache_file) except", "'' load['user'] = token['name'] log.debug('Minion tokenized user = \"{0}\"'.format(load['user'])) elif", "os.path.join(autosign_dir, f) mtime = os.path.getmtime(stub_file) if mtime < min_time: log.warn('Autosign", "= tagify(jid, prefix='wheel') data = {'fun': \"wheel.{0}\".format(fun), 'jid': jid, 'tag':", "as root and can therefore not # chown the key", "'envs.p' ) if os.path.isfile(env_cache): log.debug('Clearing {0}fs env cache'.format(backend)) try: os.remove(env_cache)", "os.path.exists(auth_cache): return else: for (dirpath, dirnames, filenames) in os.walk(auth_cache): for", "'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id'], } if 'tgt_type' in", "user is not ' 'available.\\n'.format( user ) ) return False", "master allows minions to be matched to salt functions, so", "control function back to the runner system ''' if 'token'", "failure of type \"eauth\" occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN'))", "{} ret = {} if 'opts' in load: opts =", "cmd is blacklisted for module_re in self.opts['client_acl_blacklist'].get('modules', []): # if", "\"token\" occurred. \\ Authentication type of {0} not present.').format(token['eauth']) return", "data: {0}' .format(opts_dict['git']) ) else: pillargitfs.append( git_pillar.GitPillar( br, loc, opts", "flags if self.opts.get('permissive_pki_access', False) and stat.S_IWGRP & fmode.st_mode: return True", "isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match]) if ',' in load['fun']: # 'arg': [['cat',", "load['user'] == salt.utils.get_user(): if load.pop('key') != self.key.get(load['user']): log.warning( 'Authentication failure", "if not load['id'] == fp_.read(): return {} return self.local.get_cache_returns(load['jid']) def", "'compound' pub_load['expr_form'] = load['tgt_type'] else: return {} else: pub_load['expr_form'] =", "salt.utils.jid from salt.pillar import git_pillar from salt.utils.event import tagify from", "def clean_old_jobs(opts): ''' Clean out the old jobs from the", "else: ret[minion['id']] = minion['return'] if 'jid' in minion: ret['__jid__'] =", "someone can sudo, allow them to act as root if", "with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(load['data'])) return True def _mine_delete(self,", "return False file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size'] if 'loc' in", "os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'data.p') tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)", "to do in lower layers: # only accept valid minion", "self._return(ret) def minion_runner(self, load): ''' Execute a runner from a", "mopts['renderer'] = self.opts['renderer'] mopts['failhard'] = self.opts['failhard'] mopts['state_top'] = self.opts['state_top'] mopts['nodegroups']", "Exception as exc: log.error(exc) log.error('Exception occurred while ' 'introspecting {0}:", "type \"eauth\" occurred.') return '' if load['eauth'] not in self.opts['external_auth']:", "in envs: if saltenv not in file_roots: file_roots[saltenv] = []", "'/proc/cpuinfo'], [], ['foo']] load['fun'] = load['fun'].split(',') arg_ = [] for", "= os.path.join(file_lists_dir, file_lists_cache) try: os.remove(cache_file) except OSError as exc: log.critical(", "return '' except Exception as exc: log.error( 'Exception occurred while", "'compound': match_type = 'compound_pillar_exact' checker = salt.utils.minions.CkMinions(self.opts) minions = checker.check_minions(", "'mine.p') try: with salt.utils.fopen(mine, 'rb') as fp_: fdata = self.serial.load(fp_).get(load['fun'])", "dict): return {} perms = set() for match in self.opts['mine_get']:", "('Authentication failure of type \"token\" occurred for ' 'user {0}.').format(token['name'])", "= self.ckminions.check_minions( load['tgt'], pub_load['expr_form']) auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if", "''' Funcitons made available to minions, this class includes the", "load.get('username', 'UNKNOWN')) except Exception as exc: log.error('Exception occurred while '", "available') fileserver.update() except Exception as exc: log.error( 'Exception {0} occurred", "funcs/modules! if isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match]) if ',' in load['fun']: #", "fs_.file_list_emptydirs self._dir_list = fs_.dir_list self._symlink_list = fs_.symlink_list self._file_envs = fs_.envs", "access is necessary since on subsequent runs, if the file", "not present.').format(token['eauth']) return '' if not ((token['name'] in self.opts['external_auth'][token['eauth']]) |", "data['success'] = True self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag,", "# Altering the contents of the publish load is serious!!", "if isinstance(new, dict): new.update(load['data']) load['data'] = new with salt.utils.fopen(datap, 'w+b')", "self.ckminions.check_minions( load['tgt'], load.get('tgt_type', 'glob') ) # If we order masters", "log.critical( 'The specified returner used for the external job cache", "delete all of its own mine contents ''' if not", "True def _pillar(self, load): ''' Return the pillar data for", "('return', 'jid', 'id')): return False if load['jid'] == 'req': #", "except OSError as exc: log.critical( 'Unable to file_lists cache file", "{1}' .format(env_cache, exc) ) file_lists_dir = os.path.join( opts['cachedir'], 'file_lists', '{0}fs'.format(backend)", "if any(key not in load for key in ('id', 'tgt',", "isinstance(self.opts['mine_get'][match], list): perms.update(self.opts['mine_get'][match]) if not any(re.match(perm, load['fun']) for perm in", "on anyone else's toes del good # Check for external", "HAS_PWD = False log = logging.getLogger(__name__) # Things to do", "fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load['load']) # Format individual return loads", "make up the minion access to the master ''' def", "= os.path.join(autosign_dir, f) mtime = os.path.getmtime(stub_file) if mtime < min_time:", "self.ckminions.auth_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun'], load['tgt'],", "allow them to act as root if load.get('key', 'invalid') ==", "who it says it is!'.format( load['id'] ) ) return {}", "in ''' try: if not fileserver.servers: log.error( 'No fileservers loaded,", "not enabled, fail log.warning( 'Authentication failure of type \"eauth\" occurred.'", "not self.loadauth.time_auth(extra): log.warning( 'Authentication failure of type \"eauth\" occurred.' )", "int(expire_minutes)) for root, dirs, filenames in os.walk(autosign_dir): for f in", "with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(mine_data)) except OSError: return False", "return {} if any(key not in load for key in", "salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) # Make a client", "when generating auth token: {0}'.format( exc ) ) return ''", "False if not isinstance(self.opts['peer'], dict): return False if any(key not", "failure of type \"eauth\" occurred.') return '' if not self.loadauth.time_auth(load):", "an external node classifier if one is specified ''' if", "{0} occurred in file server update'.format(exc), exc_info_on_loglevel=logging.DEBUG ) class AutoKey(object):", "in filenames: stub_file = os.path.join(autosign_dir, f) mtime = os.path.getmtime(stub_file) if", "if 'peer' not in self.opts: return False if not isinstance(self.opts['peer'],", "not in self.opts['client_acl']: log.warning( 'Authentication failure of type \"user\" occurred.'", "# Save the invocation information if self.opts['ext_job_cache']: try: fstr =", "os.path.join( opts['cachedir'], '{0}fs'.format(backend), 'envs.p' ) if os.path.isfile(env_cache): log.debug('Clearing {0}fs env", "if salt.utils.expr_match(keyid, line): return True return False def check_autosign_dir(self, keyid):", "mopts['nodegroups'] = self.opts['nodegroups'] mopts['state_auto_order'] = self.opts['state_auto_order'] mopts['state_events'] = self.opts['state_events'] mopts['state_aggregate']", "we're not on windows try: user = self.opts['user'] pwnam =", "mine ''' if not skip_verify: if any(key not in load", "groups = salt.utils.get_gid_list(user, include_default=False) except KeyError: log.error( 'Failed to determine", "octal: Read and write access to the owner only. #", "' 'introspecting {0}: {1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) except", "job cache has a clean_old_jobs, call it fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache'])", "perm in perms): return {} ret = {} if not", ") # If the master job cache has a clean_old_jobs,", "provider: {0}').format( self.opts['external_auth']) return '' good = self.ckminions.auth_check( self.opts['external_auth'][token['eauth']][token['name']] if", "# If the master job cache has a clean_old_jobs, call", "key and the needed authentication creds. ''' if 'eauth' not", "fileserver backends available') fileserver.update() except Exception as exc: log.error( 'Exception", "to run {function}. Please ' 'contact your local administrator if", "load.get('tgt_type', 'glob'), publish_validate=True) if not good: return False return True", "args=exc.args, message=str(exc))) def wheel(self, load): ''' Send a master control", "in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']]): msg = ('Authentication failure", "failure of type \"eauth\" occurred.' ) return '' load['user'] =", "{'jid': load['jid'], 'id': key, 'return': item} if 'out' in load:", "load) except KeyError: log.critical( 'The specified returner used for the", "a command initiated from a minion, this method executes minion", "name associated with a token or False if the token", "os.path.exists(keyfile): log.debug('Removing stale keyfile: {0}'.format(keyfile)) os.unlink(keyfile) key = salt.crypt.Crypticle.generate_key_string() cumask", "what you want to do another # way that won't", "specific jid, only allowed if the requesting minion also initialted", "file # exists, it needs to be written to again.", "self.opts.get('autosign_file', None)): return True if self.check_autosign_dir(keyid): return True return False", "Verify that the load is valid if 'peer' not in", "if load.pop('key') != self.key[load['user']]: log.warning( 'Authentication failure of type \"user\"", "load['fun']): good = True if not good: # The minion", "= {} ret = {} if 'opts' in load: opts", "membership in a signing file ''' if not signing_file or", "arg_ good = self.ckminions.auth_check( perms, load['fun'], load['tgt'], load.get('tgt_type', 'glob'), publish_validate=True)", ") return False # Normalize Windows paths normpath = load['path']", "eauth system is not enabled, fail log.warning( 'Authentication failure of", "'' if not self.loadauth.time_auth(load): log.warning('Authentication failure of type \"eauth\" occurred.')", "specified returner threw a stack trace:\\n', exc_info=True ) # Altering", "ret = {'jid': load['jid'], 'id': key, 'return': item} if 'out'", "occurred.' ) return '' if not self.loadauth.time_auth(extra): log.warning( 'Authentication failure", "'Wrong permissions for {0}, ignoring content' log.warn(message.format(signing_file)) return False with", "if load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure of type \"eauth\"", "preparing the three listeners and the workers needed by the", "return False # check group flags if self.opts.get('permissive_pki_access', False) and", "self.opts['nodegroups'] mopts['state_auto_order'] = self.opts['state_auto_order'] mopts['state_events'] = self.opts['state_events'] mopts['state_aggregate'] = self.opts['state_aggregate']", "False for perm in perms: if re.match(perm, load['fun']): good =", "minion: ret['__jid__'] = minion['jid'] data['ret'] = data.pop('return') ret[minion['id']] = data", "opts_dict in [x for x in opts.get('ext_pillar', [])]: if 'git'", "load for key in ('return', 'jid', 'id')): return None #", "load: msg = ('Authentication failure of type \"eauth\" occurred for", "cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): return False", "mine_data.pop(load['fun'], False): with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(mine_data)) except OSError:", "of type \"eauth\" occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg)", "the owner only. # Write access is necessary since on", "if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][load['eauth']]['*'], load['fun']) if not good:", "to parse timeout value: {0}'.format( load['tmo']) log.warn(msg) return {} if", "if not skip_verify and 'id' not in load: return False", "if load.get('form', '') != 'full': ret.pop('__jid__') return ret def revoke_auth(self,", "''' if 'eauth' not in load: log.warning('Authentication failure of type", "runs, if the file # exists, it needs to be", "the file # exists, it needs to be written to", "' 'user {0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) jid = salt.utils.jid.gen_jid()", "load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not good: # Accept find_job", "top data self.tops = salt.loader.tops(self.opts) # Make a client self.local", "self._dir_list = fs_.dir_list self._symlink_list = fs_.symlink_list self._file_envs = fs_.envs def", "salt.utils.fopen(mine, 'rb') as fp_: fdata = self.serial.load(fp_).get(load['fun']) if fdata: ret[minion]", "salt.minion.MasterMinion( opts, states=False, rend=False, ) # If the master job", "written to again. Windows enforces this. os.chmod(keyfile, 0o600) if HAS_PWD:", "else: return {} else: pub_load['expr_form'] = load['tgt_type'] ret = {}", "ret['__jid__'] = minion['jid'] for key, val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])): if key", "= True # Check if the user is blacklisted for", "not have a save_load function!'.format( self.opts['ext_job_cache'] ) ) except Exception:", "# 600 octal: Read and write access to the owner", "execute commands from the test module ''' if not self.__verify_minion_publish(load):", "minion also initialted the execution. ''' if not skip_verify and", "for this minion perms = [] for match in self.opts['peer']:", "except Exception as exc: log.error( 'Exception occurred while authenticating: {0}'.format(exc)", "if 'data' in event: self.event.fire_event(event['data'], tagify(event['tag'], base=load['pretag'])) else: self.event.fire_event(event, tagify(event['tag'],", "= self.opts['user'] pwnam = pwd.getpwnam(user) uid = pwnam[2] gid =", "a autosign directory. ''' autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign') # cleanup", "def clean_fsbackend(opts): ''' Clean out the old fileserver backends '''", "log.error( 'Failed to determine groups for user {0}. The user", "clients are required to run as root. ''' users =", "out the old jobs from the job cache ''' #", "exc: msg = 'Exception occurred when generating auth token: {0}'.format(", "== self.opts.get('user', 'root'): if load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication", "only publish allowed salt functions The config will look like", "if 'metadata' in load['kwargs']: pub_load['metadata'] = load['kwargs'].get('metadata') if 'user' in", "the pillar data for the minion ''' if any(key not", "to be placed in the filesystem with permissions 0400 so", "membership in a autosign directory. ''' autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign')", "'events' not in load and ('tag' not in load or", "True ret = {} for minion in self.local.cmd_iter(**pub_load): if load.get('form',", "dict(error=dict(name='EauthAuthenticationError', message=msg)) try: name = self.loadauth.load_name(load) if not (name in", "if not self.opts['file_recv'] or os.path.isabs(load['path']): return False if os.path.isabs(load['path']) or", "LocalClient. ''' extra = load.get('kwargs', {}) # check blacklist/whitelist good", "log.error( 'Exception occurred in the wheel system: {0}'.format(exc) ) return", "acl_users.add(salt.utils.get_user()) if HAS_PWD: for user in pwd.getpwall(): users.append(user.pw_name) for user", "os.getuid() == 0: if fmode.st_uid == uid or fmode.st_gid !=", "class AutoKey(object): ''' Implement the methods to run auto key", "'Failed to determine groups for user {0}. The user is", "not good: return False return True def _master_opts(self, load): '''", "''' This module contains all of the routines needed to", "try: ret.update(self.tops[fun](opts=opts, grains=grains)) except Exception as exc: # If anything", ") return '' else: log.warning( 'Authentication failure of type \"user\"", "of the publish load is serious!! Changes here # break", "EndTime endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid()) # If the return data is", "tagify([load['jid'], 'ret', load['id']], 'job')) self.event.fire_ret_load(load) if not self.opts['job_cache'] or self.opts.get('ext_job_cache'):", "match in self.opts['peer_run']: if re.match(match, load['id']): # This is the", "occurred.' ) return '' elif load['user'] == salt.utils.get_user(): if load.pop('key')", "str): funs_to_check = [load['fun']] # if this a compound function", "self.key.get('root'): load.pop('key') elif load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication failure", "runner's function data ''' if 'peer_run' not in self.opts: return", "minions: mine = os.path.join( self.opts['cachedir'], 'minions', minion, 'mine.p') try: with", "if not isinstance(self.opts['mine_get'], dict): return {} perms = set() for", "of type \"eauth\" occurred.' ) return '' load['user'] = name", "self.opts['ext_job_cache']: try: fstr = '{0}.save_load'.format(self.opts['ext_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical(", "found if not self.opts.get('order_masters'): # Check for no minions if", "if 'expire' not in token_data or token_data.get('expire', 0) < time.time():", "In short, check with <NAME> before you even think about", "return False return self.loadauth.get_tok(load['token']) def publish(self, load): ''' This method", "if not good: # Accept find_job so the CLI will", "log.error( 'Exception occurred while authenticating: {0}'.format(exc) ) return '' def", "don't allow others to write to the file return False", "in file server update'.format(exc), exc_info_on_loglevel=logging.DEBUG ) class AutoKey(object): ''' Implement", "# make sure double backslashes are normalized normpath = normpath.replace('\\\\',", "saltenv in envs: if saltenv not in file_roots: file_roots[saltenv] =", "are sent to the master file cache ''' if any(key", "os.remove(auth_file_path) except (IOError, OSError): log.error('Unable to delete pub auth file')", "def fileserver_update(fileserver): ''' Update the fileserver backends, requires that a", "'user': load['user'], 'fun': load['fun'], 'arg': load['arg'], 'minions': minions, } #", "not in self.opts['external_auth']: msg = 'Authentication failure of type \"token\"", "server, this involves preparing the three listeners and the workers", "str(ret['jid'])) with salt.utils.fopen(jid_fn, 'w+') as fp_: fp_.write(load['id']) return ret def", "self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not good: msg = ('Authentication", "else self.opts['external_auth'][load['eauth']]['*'], load['fun']) if not good: msg = ('Authentication failure", "local system ''' # The ClearFuncs object encapsulates the functions", "short, check with <NAME> before you even think about #", "SaltMasterError # Import 3rd-party libs import salt.ext.six as six try:", "{0}: {1}'.format(fun, exc)) data['return'] = 'Exception occurred in wheel {0}:", "0: mode = 'ab' else: mode = 'wb' with salt.utils.fopen(cpath,", "== 'full': data = minion if 'jid' in minion: ret['__jid__']", "the wheel system ''' # All wheel ops pass through", "user=load['user'], function=load['fun'] ) ) return '' # to make sure", "in load['kwargs']: pub_load['ret_config'] = load['kwargs'].get('ret_config') if 'metadata' in load['kwargs']: pub_load['metadata']", "if self.opts.get('permissive_pki_access', False) and stat.S_IWGRP & fmode.st_mode: return True elif", "exc.__class__.__name__, exc, ) data['success'] = False self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))", "if isinstance(self.opts['peer_run'][match], list): perms.update(self.opts['peer_run'][match]) good = False for perm in", "for fun in funs_to_check: if re.match(module_re, fun): good = False", "used for the master job cache ' '\"{0}\" does not", "all minions to execute all commands. peer: foo.example.com: - test.*", "'jid', 'id')): return False if load['jid'] == 'req': # The", "{fun} with jid {jid}'.format( **load ) ) pub_load['user'] = load['user']", "except OSError: return False return True def _file_recv(self, load): '''", "(The publish from the LocalClient) # _auth def __init__(self, opts,", "if not ((token['name'] in self.opts['external_auth'][token['eauth']]) | ('*' in self.opts['external_auth'][token['eauth']])): log.warning('Authentication", "format it to look like returns from individual minions. '''", "failure of type \"user\" occurred.' ) return '' if load['user']", "msg = ('Authentication failure of type \"eauth\" occurred for '", "'clear', 'load': { 'jid': None, 'minions': minions } } #", "if 'id' not in load: log.error('Received call for external nodes", "the command will make a recursive publish don't run if", "load['tgt'], 'user': load['user'], 'fun': load['fun'], 'arg': load['arg'], 'minions': minions, }", "exc.__class__.__name__, exc, ) self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag,", "import salt.fileserver import salt.utils.atomicfile import salt.utils.event import salt.utils.verify import salt.utils.minions", "keyid): ''' Check a keyid for membership in a autosign", "= self.opts['state_top'] mopts['nodegroups'] = self.opts['nodegroups'] mopts['state_auto_order'] = self.opts['state_auto_order'] mopts['state_events'] =", "elif stat.S_IWGRP & fmode.st_mode: return False # check if writable", "the list of funcs/modules! if isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match]) if ','", "if not os.path.isdir(cdir): return False datap = os.path.join(cdir, 'mine.p') if", "{} if 'tgt_type' in load: if load['tgt_type'].startswith('node'): if load['tgt'] in", "load['tgt'], load.get('tgt_type', 'glob') ) # If we order masters (via", "gid: return True elif self.opts.get('permissive_pki_access', False) \\ and fmode.st_gid in", "not good: msg = ('Authentication failure of type \"eauth\" occurred", "# Make an minion checker object self.ckminions = salt.utils.minions.CkMinions(opts) #", "the job on the event bus self.event.fire_event(new_job_load, 'new_job') # old", "in load['kwargs']: pub_load['metadata'] = load['kwargs'].get('metadata') if 'user' in load: log.info(", "if key not in ret: ret[key] = val if load.get('form',", "'' else: if load['user'] in self.key: # User is authorised,", "'req': # The minion is returning a standalone job, request", "return pillargitfs def clean_fsbackend(opts): ''' Clean out the old fileserver", "load['id']) if not os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'mine.p') if", "Request the return data from a specific jid, only allowed", "self.loadauth.get_tok(load['token']) except Exception as exc: msg = 'Exception occurred when", "to the master file cache ''' if any(key not in", "False # Normalize Windows paths normpath = load['path'] if ':'", "token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if", "key file pass keys[user] = key return keys def fileserver_update(fileserver):", "communication'.format( user ) ) if HAS_PWD: if user not in", "the job cache ''' # TODO: better way to not", "in load: try: token = self.loadauth.get_tok(load['token']) except Exception as exc:", "'ret': load['ret'], 'id': load['id'], } if 'tgt_type' in load: if", "''' Receive a syndic minion return and format it to", "six try: import pwd HAS_PWD = True except ImportError: #", "if load['user'].startswith('sudo_'): # If someone can sudo, allow them to", "used for the external job cache ' '\"{0}\" does not", "Clear out the ext pillar caches, used when the master", "loc = parts[1] except IndexError: log.critical( 'Unable to extract external", "dict(error=dict(name='EauthAuthenticationError', message=msg)) if not self.loadauth.time_auth(load): msg = ('Authentication failure of", "# publish commands. # # In short, check with <NAME>", "datap) return data def _minion_event(self, load): ''' Receive an event", "# Check for no minions if not minions: return {", "= salt.loader.tops(self.opts) # Make a client self.local = salt.client.get_local_client(mopts=self.opts) #", "returns from individual minions. ''' # Verify the load if", "not skip_verify: if 'id' not in load: log.error('Received call for", "not salt.utils.verify.valid_id(self.opts, load['id']): return False file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size']", "node classifier if one is specified ''' if not skip_verify:", "exc) log.error(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if not token: msg =", "mtime < min_time: log.warn('Autosign keyid expired {0}'.format(stub_file)) os.remove(stub_file) stub_file =", "else: for (dirpath, dirnames, filenames) in os.walk(auth_cache): for auth_file in", "Exception as exc: # If anything happens in the top", "ret[minion] = fdata except Exception: continue return ret def _mine(self,", "old dup event if load.get('pretag') is not None: if 'data'", "' '\"{0}\" does not have a save_load function!'.format( self.opts['ext_job_cache'] )", "not self.opts['job_cache'] or self.opts.get('ext_job_cache'): return fstr = '{0}.update_endtime'.format(self.opts['master_job_cache']) if (self.opts.get('job_cache_store_endtime')", "* self.opts['file_recv_max_size'] if 'loc' in load and load['loc'] < 0:", "that the caller has root on master elif 'user' in", "group or other if not (stat.S_IWGRP & fmode.st_mode or stat.S_IWOTH", "all of the routines needed to set up a master", "or not os.path.exists(signing_file): return False if not self.check_permissions(signing_file): message =", "salt.utils.fopen(keyfile, 'w+') as fp_: fp_.write(key) os.umask(cumask) # 600 octal: Read", "= pwnam[2] gid = pwnam[3] groups = salt.utils.get_gid_list(user, include_default=False) except", "expired files expire_minutes = self.opts.get('autosign_expire_minutes', 10) if expire_minutes > 0:", "return False if not isinstance(self.opts['peer'], dict): return False if any(key", "salt.pillar import git_pillar from salt.utils.event import tagify from salt.exceptions import", "True def _mine_delete(self, load): ''' Allow the minion to delete", "pillar.compile_pillar(pillar_dirs=pillar_dirs) if self.opts.get('minion_data_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if", "want to listen to it! log.warn( 'Minion id {0} is", "dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']]", "of type \"other\" occurred.' ) return '' # Retrieve the", "'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: fun = load.pop('fun') runner_client", "self.opts['client_acl_blacklist'].get('users', []): if re.match(user_re, load['user']): good = False break #", "if not ((name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']])): log.warning('Authentication", "file') def clean_old_jobs(opts): ''' Clean out the old jobs from", "publish from the LocalClient) # _auth def __init__(self, opts, key):", "wheel(self, load): ''' Send a master control function back to", "load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure of type \"token\" occurred.'", "Create the master minion to access the external job cache", "local communication'.format( user ) ) if HAS_PWD: if user not", "elif load['user'] == self.opts.get('user', 'root'): if load.pop('key') != self.key[self.opts.get('user', 'root')]:", "not os.path.isdir(cdir): return False datap = os.path.join(cdir, 'mine.p') if os.path.isfile(datap):", "the specified keyid should automatically be signed. ''' if self.opts['auto_accept']:", "fdata = self.serial.load(fp_).get(load['fun']) if fdata: ret[minion] = fdata except Exception:", "saltenv not in file_roots: file_roots[saltenv] = [] mopts['file_roots'] = file_roots", "expired {0}'.format(stub_file)) os.remove(stub_file) stub_file = os.path.join(autosign_dir, keyid) if not os.path.exists(stub_file):", "the load if any(key not in load for key in", "be used by the LocalClient. ''' extra = load.get('kwargs', {})", "the old fileserver backends ''' # Clear remote fileserver backend", "os.path.join(opts['cachedir'], 'publish_auth') if not os.path.exists(auth_cache): return else: for (dirpath, dirnames,", "''' Allows minions to send files to the master, files", "if load['tgt'] in self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound'", "\"eauth\" occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError',", "self.wheel_.call_func(fun, **load) data['return'] = ret data['success'] = True self.event.fire_event(data, tagify([jid,", "if token['eauth'] not in self.opts['external_auth']: log.warning('Authentication failure of type \"token\"", "clean_old_jobs(opts): ''' Clean out the old jobs from the job", "of funcs/modules! if isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match]) if ',' in load['fun']:", "False if not self.check_permissions(signing_file): message = 'Wrong permissions for {0},", "= self.opts.get('autosign_expire_minutes', 10) if expire_minutes > 0: min_time = time.time()", "self.local = salt.client.get_local_client(mopts=self.opts) # Make an minion checker object self.ckminions", "{0}'.format( exc ) ) return '' if not token: log.warning('Authentication", "occurred in the wheel system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args,", "load and ('tag' not in load or 'data' not in", "for key in ('id', 'path', 'loc')): return False if not", "load['kwargs'].get('metadata') if 'user' in load: log.info( 'User {user} Published command", "OSError): pass def clean_pub_auth(opts): try: auth_cache = os.path.join(opts['cachedir'], 'publish_auth') if", "3rd-party libs import salt.ext.six as six try: import pwd HAS_PWD", "except (IOError, OSError): pass def clean_pub_auth(opts): try: auth_cache = os.path.join(opts['cachedir'],", "perms.extend(self.opts['peer'][match]) if ',' in load['fun']: # 'arg': [['cat', '/proc/cpuinfo'], [],", "{} else: pub_load['expr_form'] = load['tgt_type'] pub_load['raw'] = True ret =", "{0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) data['success'] = False", "data from the specified minions' mine ''' if not skip_verify:", "def _mine_delete(self, load): ''' Allow the minion to delete a", "'job')) # Save the invocation information if self.opts['ext_job_cache']: try: fstr", "if not good: # The minion is not who it", "arg_ = [] for arg in load['arg']: arg_.append(arg.split()) load['arg'] =", "keyapi.delete_key(load['id'], preserve_minions=load.get('preserve_minion_cache', False)) return True class LocalFuncs(object): ''' Set up", "if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not good:", "{} for minion in self.local.cmd_iter(**pub_load): if load.get('form', '') == 'full':", "Receive a syndic minion return and format it to look", "return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) except Exception as exc: log.error( 'Exception", "in a autosign directory. ''' autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign') #", "Receive an event from the minion and fire it on", "except Exception as exc: msg = 'Exception occurred when generating", "keyid should automatically be rejected. ''' return self.check_signing_file( keyid, self.opts.get('autoreject_file',", "in load: if load['user'].startswith('sudo_'): # If someone can sudo, allow", "''' Allow the minion to delete all of its own", "'mine.p') if os.path.isfile(datap): try: os.remove(datap) except OSError: return False return", "verify against eauth provider: {0}').format( self.opts['external_auth']) return '' good =", "return else: for (dirpath, dirnames, filenames) in os.walk(auth_cache): for auth_file", "arg_.append(arg.split()) load['arg'] = arg_ good = self.ckminions.auth_check( perms, load['fun'], load['tgt'],", "greedy=False ) for minion in minions: mine = os.path.join( self.opts['cachedir'],", "fail msg = ('Authentication failure of type \"eauth\" occurred for", "specified minions' mine ''' if not skip_verify: if any(key not", "of type \"token\" occurred. \\ Authentication type of {0} not", "cache ''' # TODO: better way to not require creating", "load['tgt'] in self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound' else:", "commands. peer: foo.example.com: - test.* This configuration will only allow", "'minions', load['id']) if not os.path.isdir(cdir): return False datap = os.path.join(cdir,", "'Exception occurred when generating auth token: {0}'.format( exc) log.error(msg) return", "0: min_time = time.time() - (60 * int(expire_minutes)) for root,", "contents ''' if not skip_verify and 'id' not in load:", "base=load['pretag'])) else: tag = load['tag'] self.event.fire_event(load, tag) return True def", "tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} if 'eauth'", "Stand up the master Minion to access returner data self.mminion", "load: pub_load['to'] = load['to'] if 'kwargs' in load: if 'ret_config'", ") pub_load['user'] = load['user'] else: log.info( 'Published command {fun} with", "False if 'events' not in load and ('tag' not in", "Verify the load if any(key not in load for key", "''' if self.opts['auto_accept']: return True if self.check_signing_file(keyid, self.opts.get('autosign_file', None)): return", "failure of type \"other\" occurred.' ) return '' # Retrieve", "try: with salt.utils.fopen(mine, 'rb') as fp_: fdata = self.serial.load(fp_).get(load['fun']) if", "{} ret['jid'] = self.local.cmd_async(**pub_load) ret['minions'] = self.ckminions.check_minions( load['tgt'], pub_load['expr_form']) auth_cache", "stat.S_IWOTH & fmode.st_mode): return True return False def check_signing_file(self, keyid,", "in the filesystem with permissions 0400 so clients are required", "fstr in self.mminion.returners): self.mminion.returners[fstr](load['jid'], endtime) fstr = '{0}.returner'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load) def", "# additions can have serious implications on the performance of", "self.event.fire_event(load, tagify([load['jid'], 'ret', load['id']], 'job')) self.event.fire_ret_load(load) if not self.opts['job_cache'] or", "self.opts['external_auth'][token['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not good: # Accept", "import tempfile # Import salt libs import salt.crypt import salt.utils", "''' # Clear remote fileserver backend caches so they get", "salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid()) # If the return data is invalid, just ignore", "is not None: if 'data' in event: self.event.fire_event(event['data'], tagify(event['tag'], base=load['pretag']))", "value: {0}'.format( load['timeout']) log.warn(msg) return {} if 'tgt_type' in load:", "failed with error {1} for minion ' '{2}'.format( fun, exc,", "match_type = 'compound_pillar_exact' checker = salt.utils.minions.CkMinions(self.opts) minions = checker.check_minions( load['tgt'],", "opts=self.opts, listen=False) # Make a client self.local = salt.client.get_local_client(mopts=self.opts) #", "| ('*' in self.opts['external_auth'][load['eauth']])): msg = ('Authentication failure of type", "{jid}'.format( **load ) ) log.debug('Published command details {0}'.format(pub_load)) return {'ret':", "pub_load['raw'] = True ret = {} for minion in self.local.cmd_iter(**pub_load):", "'id' not in load: log.error('Received call for external nodes without", "mopts['state_auto_order'] = self.opts['state_auto_order'] mopts['state_events'] = self.opts['state_events'] mopts['state_aggregate'] = self.opts['state_aggregate'] mopts['jinja_lstrip_blocks']", "= self.ckminions.auth_check( self.opts['client_acl'][load['user']], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not good:", "compatibility with minion/master versions and even tiny # additions can", "it saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[saveload_fstr](load['jid'], load) log.info('Got return from {id}", "log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name']", "salt.utils.atomicfile.atomic_rename(tmpfname, datap) return data def _minion_event(self, load): ''' Receive an", "timeout value: {0}'.format( load['timeout']) log.warn(msg) return {} if 'tgt_type' in", "[] for opts_dict in [x for x in opts.get('ext_pillar', [])]:", "'id': load['id'], } if 'tgt_type' in load: if load['tgt_type'].startswith('node'): if", "| ('*' in self.opts['external_auth'][load['eauth']])): log.warning('Authentication failure of type \"eauth\" occurred.')", "load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False)) self.event.fire_event({'minions': minions}, load['jid']) new_job_load = {", "'id' not in load or 'fun' not in load: return", "'saltutil.find_job': log.warning( 'Authentication failure of type \"token\" occurred.' ) return", "routines needed to set up a master server, this involves", "''' if 'id' not in load: return False if 'events'", "jid {jid}'.format( **load ) ) pub_load['user'] = load['user'] else: log.info(", "for match in self.opts['peer']: if re.match(match, load['id']): # This is", ") return '' elif load['user'] == self.opts.get('user', 'root'): if load.pop('key')", "load['fun'], load['tgt'], load.get('tgt_type', 'glob'), publish_validate=True) if not good: return False", "'Authentication failure of type \"other\" occurred.' ) return '' #", "message=msg)) if load['eauth'] not in self.opts['external_auth']: # The eauth system", "if extra.get('token', False): # A token was passed, check it", "= name # Verify that the caller has root on", "out to the master job cache try: fstr = '{0}.save_load'.format(self.opts['master_job_cache'])", "False return self.loadauth.get_tok(load['token']) def publish(self, load): ''' This method sends", "= fs_.file_list_emptydirs self._dir_list = fs_.dir_list self._symlink_list = fs_.symlink_list self._file_envs =", "parts[1] except IndexError: log.critical( 'Unable to extract external pillar data:", "it is! # We don't want to listen to it!", "a minion, this method executes minion restrictions so that the", "failure of type \"eauth\" occurred.' ) return '' try: name", "not in users: try: user = pwd.getpwnam(user).pw_name except KeyError: log.error('ACL", "' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) good =", "recursive publish don't run if re.match('publish.*', load['fun']): return False #", "runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async( fun, load.get('kwarg', {}), token['name']) except", "os.makedirs(cdir) datap = os.path.join(cdir, 'data.p') tmpfh, tmpfname = tempfile.mkstemp(dir=cdir) os.close(tmpfh)", "'id': load['id'], 'doc': False, 'conf_file': self.opts['conf_file']} opts.update(self.opts) runner = salt.runner.Runner(opts)", "good = self.ckminions.runner_check( self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][load['eauth']]['*'],", "except os.error: pass if os.path.isfile(cpath) and load['loc'] != 0: mode", "dict(error=dict(name='EauthAuthenticationError', message=msg)) try: fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return", "return an authentication token, the clear load needs to contain", "clear load needs to contain the eauth key and the", "load): ''' Receive an event from the minion and fire", "fileserver object be passed in ''' try: if not fileserver.servers:", "User is authorised, check key and check perms if load.pop('key')", "{1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) except Exception as exc:", "# chown the key file pass keys[user] = key return", "the runner object opts = {'fun': load['fun'], 'arg': load['arg'], 'id':", "'tag': tag, 'user': load.get('username', 'UNKNOWN')} try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))", "master job cache try: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load) except", "not skip_verify and any(key not in load for key in", "touching this stuff, we can probably do what you want", "file_recv_max_size ) ) return False # Normalize Windows paths normpath", "('*' in self.opts['external_auth'][load['eauth']]): msg = ('Authentication failure of type \"eauth\"", "CLI will function cleanly if load['fun'] != 'saltutil.find_job': log.warning( 'Authentication", "allows minions to be matched to salt functions, so the", "in load: return False keyapi = salt.key.Key(self.opts) keyapi.delete_key(load['id'], preserve_minions=load.get('preserve_minion_cache', False))", "if token['eauth'] not in self.opts['external_auth']: msg = 'Authentication failure of", "Update the fileserver backends, requires that a built fileserver object", "keyid should automatically be signed. ''' if self.opts['auto_accept']: return True", "not in load for key in ('id', 'path', 'loc')): return", "in self.opts['external_auth'][token['eauth']]) | ('*' in self.opts['external_auth'][token['eauth']])): log.warning('Authentication failure of type", "so the CLI will function cleanly if load['fun'] != 'saltutil.find_job':", "the test module ''' if not self.__verify_minion_publish(load): return {} #", "'events' in load: for event in load['events']: self.event.fire_event(event, event['tag']) #", "'tgt', 'fun')): return {} if 'mine_get' in self.opts: # If", "if os.path.isfile(datap): try: with salt.utils.fopen(datap, 'rb') as fp_: mine_data =", "not self.loadauth.time_auth(load): msg = ('Authentication failure of type \"eauth\" occurred", "self.opts = opts self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts,", "with salt.utils.fopen(tmpfname, 'w+b') as fp_: fp_.write( self.serial.dumps( {'grains': load['grains'], 'pillar':", "as exc: log.error( 'Exception occurred in the runner system: {0}'.format(exc)", "type \"eauth\" occurred.' ) return '' except Exception as exc:", "occurred.') return '' try: name = self.loadauth.load_name(load) if not ((name", "try: if not fileserver.servers: log.error( 'No fileservers loaded, the master", "'jid': jid, 'tag': tag, 'user': token['name']} try: self.event.fire_event(data, tagify([jid, 'new'],", "an Auth object self.loadauth = salt.auth.LoadAuth(opts) # Stand up the", "The minion is not who it says it is! #", "import salt.utils.gzip_util import salt.utils.jid from salt.pillar import git_pillar from salt.utils.event", "specified returner threw a stack trace:\\n', exc_info=True ) # always", "'id')): return {} perms = set() for match in self.opts['peer_run']:", "''' Implement the methods to run auto key acceptance and", "if not os.path.exists(auth_cache): return else: for (dirpath, dirnames, filenames) in", "load): ''' Return the pillar data for the minion '''", "save it if 'load' in load: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'],", "only work if it is enabled in the config. The", "token['name'] log.debug('Minion tokenized user = \"{0}\"'.format(load['user'])) elif 'eauth' in extra:", "if line.startswith('#'): continue else: if salt.utils.expr_match(keyid, line): return True return", "specified filename has correct permissions ''' if salt.utils.is_windows(): return True", "os.walk(autosign_dir): for f in filenames: stub_file = os.path.join(autosign_dir, f) mtime", "listeners and the workers needed by the master. ''' from", "on log.error( 'Top function {0} failed with error {1} for", "f in filenames: stub_file = os.path.join(autosign_dir, f) mtime = os.path.getmtime(stub_file)", "if isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match]) if ',' in load['fun']: # 'arg':", "token_path = os.path.join(dirpath, token) with salt.utils.fopen(token_path) as token_file: token_data =", "'fun' not in load: return False if self.opts.get('minion_data_cache', False) or", "method sends out publications to the minions, it can only", "'rb') as fp_: fdata = self.serial.load(fp_).get(load['fun']) if fdata: ret[minion] =", "def _return(self, load): ''' Handle the return data sent from", "in [x for x in opts.get('ext_pillar', [])]: if 'git' in", "the minion and fire it on the master event interface", "if this is a regular command, its a single function", "elif 'eauth' in extra: if extra['eauth'] not in self.opts['external_auth']: #", "salt.utils.minions import salt.utils.gzip_util import salt.utils.jid from salt.pillar import git_pillar from", "available on windows HAS_PWD = False log = logging.getLogger(__name__) #", "min_time = time.time() - (60 * int(expire_minutes)) for root, dirs,", "match_type = 'pillar_exact' if match_type.lower() == 'compound': match_type = 'compound_pillar_exact'", "'minions', load['id']) if not os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'mine.p')", "even tiny # additions can have serious implications on the", "key in ('jid', 'id')): return {} else: auth_cache = os.path.join(", "- .* This configuration will enable all minions to execute", "load: log.error('Received call for external nodes without an id') return", "not os.path.exists(stub_file): return False os.remove(stub_file) return True def check_autoreject(self, keyid):", "= minion['return'] if 'jid' in minion: ret['__jid__'] = minion['jid'] for", "try: user = self.opts['user'] pwnam = pwd.getpwnam(user) uid = pwnam[2]", "self.opts.get('autoreject_file', None) ) def check_autosign(self, keyid): ''' Checks if the", "# Verify the load if any(key not in load for", "' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: fun", "'r') as fp_: for line in fp_: line = line.strip()", "\"token\" occurred. \\ Token could not be retrieved.') return ''", "salt.crypt.Crypticle.generate_key_string() cumask = os.umask(191) with salt.utils.fopen(keyfile, 'w+') as fp_: fp_.write(key)", "perms = set() for match in self.opts['mine_get']: if re.match(match, load['id']):", "a recursive publish don't run if re.match('publish.*', load['fun']): return False", "occurred.' ) return '' good = self.ckminions.auth_check( self.opts['client_acl'][load['user']], load['fun'], load['tgt'],", "salt.utils.verify.valid_id(self.opts, load['id']): return {} # Evaluate all configured master_tops interfaces", "os.remove(stub_file) stub_file = os.path.join(autosign_dir, keyid) if not os.path.exists(stub_file): return False", "is in ' 'error.\\n'.format( user=load['user'], function=load['fun'] ) ) return ''", "return '' else: if load.pop('key') != self.key[salt.utils.get_user()]: log.warning( 'Authentication failure", "while authenticating: {0}'.format(exc) ) return '' def get_token(self, load): '''", "filenames in os.walk(autosign_dir): for f in filenames: stub_file = os.path.join(autosign_dir,", "= self.loadauth.load_name(load) if not ((name in self.opts['external_auth'][load['eauth']]) | ('*' in", "an authentication token, the clear load needs to contain the", "self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) # Make a client self.local =", "occurred.' ) return '' if load['user'] not in self.opts['client_acl']: log.warning(", "Exception as exc: log.error( 'Exception occurred in the wheel system:", "file return False # check group flags if self.opts.get('permissive_pki_access', False)", "'root')]: log.warning( 'Authentication failure of type \"user\" occurred.' ) return", "Return the pillar data for the minion ''' if any(key", "check_autosign(self, keyid): ''' Checks if the specified keyid should automatically", "minions = checker.check_minions( load['tgt'], match_type, greedy=False ) for minion in", "self.__verify_minion_publish(load): return {} # Set up the publication payload pub_load", "job cache self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False) self.__setup_fileserver() def", "don't step on anyone else's toes del good # Check", "with jid {jid}'.format( **load ) ) pub_load['user'] = load['user'] else:", "{} else: auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache):", "not in self.opts['external_auth']: log.warning('Authentication failure of type \"token\" occurred. \\", "skip_verify and 'id' not in load: return False if self.opts.get('minion_data_cache',", "\\ Authentication type of {0} not present.').format(token['eauth']) return '' if", "self.event.fire_event(event, tagify(event['tag'], base=load['pretag'])) else: tag = load['tag'] self.event.fire_event(load, tag) return", "return False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cdir =", "if the specified keyid should automatically be rejected. ''' return", "self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False) self.__setup_fileserver() def __setup_fileserver(self): '''", "believe this is in ' 'error.\\n'.format( user=load['user'], function=load['fun'] ) )", "keys def fileserver_update(fileserver): ''' Update the fileserver backends, requires that", "{0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) self.event.fire_event(data, tagify([jid, 'ret'],", "with salt.utils.fopen(jid_fn, 'w+') as fp_: fp_.write(load['id']) return ret def minion_publish(self,", "not self.check_permissions(signing_file): message = 'Wrong permissions for {0}, ignoring content'", "''' return self.check_signing_file( keyid, self.opts.get('autoreject_file', None) ) def check_autosign(self, keyid):", "load: try: token = self.loadauth.get_tok(load['token']) except Exception as exc: msg", "return ret def _mine_get(self, load, skip_verify=False): ''' Gathers the data", "message=msg)) jid = salt.utils.jid.gen_jid() fun = load.pop('fun') tag = tagify(jid,", "failure of type \"token\" occurred. \\ Authentication type of {0}", "} } # Retrieve the jid if not load['jid']: fstr", "type \"token\" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if token['eauth'] not", "only allow the minion foo.example.com to execute commands from the", "not # chown the key file pass keys[user] = key", "\"token\" occurred for ' 'user {0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg))", "check blacklist/whitelist good = True # Check if the user", "salt.pillar.Pillar( self.opts, load['grains'], load['id'], load.get('saltenv', load.get('env')), load.get('ext'), self.mminion.functions, pillar=load.get('pillar_override', {}))", "in pwd.getpwall(): users.append(user.pw_name) for user in acl_users: log.info( 'Preparing the", "if not isinstance(self.opts['peer_run'], dict): return {} if any(key not in", "the methods to run auto key acceptance and rejection '''", "auth calls if extra.get('token', False): # A token was passed,", "key and check perms if load.pop('key') != self.key[load['user']]: log.warning( 'Authentication", "br, loc, opts ) ) return pillargitfs def clean_fsbackend(opts): '''", "the cmd is blacklisted for module_re in self.opts['client_acl_blacklist'].get('modules', []): #", "None, 'minions': minions } } # Retrieve the jid if", "600 octal: Read and write access to the owner only.", "allow others to write to the file return False #", "eauth if 'token' in load: try: token = self.loadauth.get_tok(load['token']) except", "load): return False if 'events' in load: for event in", "if not salt.utils.verify.valid_id(self.opts, load['id']): return False file_recv_max_size = 1024*1024 *", "fileserver.servers: log.error( 'No fileservers loaded, the master will not be", "a keyid for membership in a autosign directory. ''' autosign_dir", "in self.opts: return False if not isinstance(self.opts['peer'], dict): return False", "{jid}'.format(**load)) self.event.fire_event(load, load['jid']) # old dup event self.event.fire_event(load, tagify([load['jid'], 'ret',", "message=msg)) try: name = self.loadauth.load_name(load) if not (name in self.opts['external_auth'][load['eauth']])", "return dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][load['eauth']][name] if name in", "!= 'saltutil.find_job': log.warning( 'Authentication failure of type \"user\" ' 'occurred.'", "# exists, it needs to be written to again. Windows", "in load: return False return self.loadauth.get_tok(load['token']) def publish(self, load): '''", "(via a syndic), don't short circuit if no minions #", "sure double backslashes are normalized normpath = normpath.replace('\\\\', '/') normpath", "load['events']: self.event.fire_event(event, event['tag']) # old dup event if load.get('pretag') is", "= False self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data':", "in load: log.warning('Authentication failure of type \"eauth\" occurred.') return ''", "fp_: fp_.write(self.serial.dumps(load['data'])) return True def _mine_delete(self, load): ''' Allow the", "'tag': tag, 'user': token['name']} try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret", "return False if len(load['data']) + load.get('loc', 0) > file_recv_max_size: log.error(", "for key in ('jid', 'id')): return {} else: auth_cache =", "{0}: {1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) except Exception as", "good: msg = ('Authentication failure of type \"token\" occurred for", "request revocation of its own key ''' if 'id' not", "import salt.utils.atomicfile import salt.utils.event import salt.utils.verify import salt.utils.minions import salt.utils.gzip_util", "run as root. ''' users = [] keys = {}", "load['opts']['grains'] for fun in self.tops: if fun not in self.opts.get('master_tops',", "anyone else's toes del good # Check for external auth", "try: pub_load['timeout'] = int(load['timeout']) except ValueError: msg = 'Failed to", "another # way that won't have a negative impact. pub_load", "the minions list minions = self.ckminions.check_minions( load['tgt'], load.get('tgt_type', 'glob') )", "{0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) jid = salt.utils.jid.gen_jid() fun", "[]): if re.match(user_re, load['user']): good = False break # check", "return False # Normalize Windows paths normpath = load['path'] if", "delete pub auth file') def clean_old_jobs(opts): ''' Clean out the", "failure of type \"user\" occurred.' ) return '' good =", "msg = 'Exception occurred when generating auth token: {0}'.format( exc)", "self.loadauth.load_name(extra) if not ((name in self.opts['external_auth'][extra['eauth']]) | ('*' in self.opts['external_auth'][extra['eauth']])):", "line = line.strip() if line.startswith('#'): continue else: if salt.utils.expr_match(keyid, line):", "salt.runner.RunnerClient(self.opts) return runner_client.async( fun, load.get('kwarg', {}), token['name']) except Exception as", "{1}'.format(fun, exc)) data['return'] = 'Exception occurred in wheel {0}: {1}:", "pass through eauth if 'token' in load: try: token =", "the publish load is serious!! Changes here # break compatibility", "\"eauth\" occurred.' ) return '' load['user'] = name # Verify", "KeyError: log.error( 'Failed to determine groups for user {0}. The", "The config will look like this: peer: .*: - .*", "True def check_autoreject(self, keyid): ''' Checks if the specified keyid", "= data else: ret[minion['id']] = minion['return'] if 'jid' in minion:", "auth_file in filenames: auth_file_path = os.path.join(dirpath, auth_file) if not os.path.isfile(auth_file_path):", "dict): return False if any(key not in load for key", "('tag' not in load or 'data' not in load): return", "'hg', 'svn'): if backend in opts['fileserver_backend']: env_cache = os.path.join( opts['cachedir'],", "load['tmo']) log.warn(msg) return {} if 'timeout' in load: try: pub_load['timeout']", "= opts self.serial = salt.payload.Serial(opts) self.key = key # Create", "jid, 'tag': tag, 'user': load.get('username', 'UNKNOWN')} try: self.event.fire_event(data, tagify([jid, 'new'],", "try: name = self.loadauth.load_name(extra) if not ((name in self.opts['external_auth'][extra['eauth']]) |", "file ''' if not signing_file or not os.path.exists(signing_file): return False", "import salt.utils import salt.client import salt.payload import salt.pillar import salt.state", "parts[0] loc = parts[1] except IndexError: log.critical( 'Unable to extract", "run {function}. Please ' 'contact your local administrator if you", "if re.match(module_re, fun): good = False break if good is", "'{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False)) self.event.fire_event({'minions': minions}, load['jid']) new_job_load =", "stub_file = os.path.join(autosign_dir, f) mtime = os.path.getmtime(stub_file) if mtime <", "or 'data' not in load): return False if 'events' in", "load['tgt'] in self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound' pub_load['expr_form']", "''' Receive an event from the minion and fire it", "self.mminion.returners): self.mminion.returners[fstr](load['jid'], endtime) fstr = '{0}.returner'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load) def _syndic_return(self, load):", "'minions', minion, 'mine.p') try: with salt.utils.fopen(mine, 'rb') as fp_: fdata", "''' if 'token' in load: try: token = self.loadauth.get_tok(load['token']) except", "mopts def _ext_nodes(self, load, skip_verify=False): ''' Return the results from", "grains = {} ret = {} if 'opts' in load:", "and fstr in self.mminion.returners): self.mminion.returners[fstr](load['jid'], endtime) fstr = '{0}.returner'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load)", "return '' def get_token(self, load): ''' Return the name associated", "os.path.isfile(env_cache): log.debug('Clearing {0}fs env cache'.format(backend)) try: os.remove(env_cache) except OSError as", "not require creating the masterminion every time? mminion = salt.minion.MasterMinion(", "self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound' else: return {} else: pub_load['expr_form'] =", "ret.pop('__jid__') return ret def revoke_auth(self, load): ''' Allow a minion", "acl_users.add(opts['user']) acl_users.add(salt.utils.get_user()) if HAS_PWD: for user in pwd.getpwall(): users.append(user.pw_name) for", "= 'Exception occurred when generating auth token: {0}'.format( exc) log.error(msg)", "= {'fun': \"wheel.{0}\".format(fun), 'jid': jid, 'tag': tag, 'user': load.get('username', 'UNKNOWN')}", "return '' if load['user'] not in self.opts['client_acl']: log.warning( 'Authentication failure", "Checks if the specified keyid should automatically be signed. '''", "match_type = load.get('expr_form', 'glob') if match_type.lower() == 'pillar': match_type =", "event['tag']) # old dup event if load.get('pretag') is not None:", "one is specified ''' if not skip_verify: if 'id' not", "except Exception as exc: # If anything happens in the", "if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type', 'glob'))", "{0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) jid = salt.utils.jid.gen_jid() fun =", "the invocation information if self.opts['ext_job_cache']: try: fstr = '{0}.save_load'.format(self.opts['ext_job_cache']) self.mminion.returners[fstr](load['jid'],", "stub_file = os.path.join(autosign_dir, keyid) if not os.path.exists(stub_file): return False os.remove(stub_file)", "the specified keyid should automatically be rejected. ''' return self.check_signing_file(", "# TODO: better way to not require creating the masterminion", "as exc: log.error( 'Exception {0} occurred in file server update'.format(exc),", "enabled, fail msg = ('Authentication failure of type \"eauth\" occurred", "returner used for the master job cache ' '\"{0}\" does", ") ) return False # Normalize Windows paths normpath =", "# On Windows, os.rename will fail if the destination file", "else: log.info( 'Published command {fun} with jid {jid}'.format( **load )", "True # After we've ascertained we're not on windows try:", "'' try: name = self.loadauth.load_name(extra) if not ((name in self.opts['external_auth'][extra['eauth']])", "requesting minion also initialted the execution. ''' if not skip_verify", "filenames) in os.walk(auth_cache): for auth_file in filenames: auth_file_path = os.path.join(dirpath,", "the return data is invalid, just ignore it if any(key", "message=msg)) try: name = self.loadauth.load_name(load) if not ((name in self.opts['external_auth'][load['eauth']])", "valid if 'peer' not in self.opts: return False if not", "Format individual return loads for key, item in six.iteritems(load['return']): ret", "in self.opts['external_auth'][load['eauth']]): msg = ('Authentication failure of type \"eauth\" occurred", "or 'data' not in load: return False if self.opts.get('minion_data_cache', False)", "('*' in self.opts['external_auth'][token['eauth']])): log.warning('Authentication failure of type \"token\" occurred. \\", "minion: ret['__jid__'] = minion['jid'] for key, val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])): if", "self.loadauth.load_name(load) if not (name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']]):", "This method sends out publications to the minions, it can", "log.debug('Minion tokenized user = \"{0}\"'.format(load['user'])) elif 'eauth' in extra: if", ") return {} # Prepare the runner object opts =", "= load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async( fun, load.get('kwarg', {}),", "if not token: msg = 'Authentication failure of type \"token\"", "file pass keys[user] = key return keys def fileserver_update(fileserver): '''", "groups: return True else: if stat.S_IWOTH & fmode.st_mode: # don't", "information authorized a minion to execute ''' # Verify that", "fp_.write( self.serial.dumps( {'grains': load['grains'], 'pillar': data}) ) # On Windows,", "wheel object self.wheel_ = salt.wheel.Wheel(opts) def runner(self, load): ''' Send", "'{0}.clean_old_jobs'.format(opts['master_job_cache']) if fstr in mminion.returners: mminion.returners[fstr]() def access_keys(opts): ''' A", "!= 'saltutil.find_job': log.warning( 'Authentication failure of type \"eauth\" occurred.' )", "look like returns from individual minions. ''' # Verify the", "= os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): return False datap", "exc_info=True ) # Altering the contents of the publish load", "Altering the contents of the publish load is serious!! Changes", "'{0}.save_load'.format(self.opts['ext_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The specified returner used", "minion, return the runner's function data ''' if 'peer_run' not", "val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])): if key not in ret: ret[key] =", "HAS_PWD = True except ImportError: # pwd is not available", "False): # A token was passed, check it try: token", "salt.minion.MasterMinion( self.opts, states=False, rend=False) self.__setup_fileserver() def __setup_fileserver(self): ''' Set the", "os.path.join(auth_cache, str(ret['jid'])) with salt.utils.fopen(jid_fn, 'w+') as fp_: fp_.write(load['id']) return ret", "loads for key, item in six.iteritems(load['return']): ret = {'jid': load['jid'],", "to file_lists cache file {0}: {1}' .format(cache_file, exc) ) def", "fstr = '{0}.update_endtime'.format(self.opts['master_job_cache']) if (self.opts.get('job_cache_store_endtime') and fstr in self.mminion.returners): self.mminion.returners[fstr](load['jid'],", "load['opts']: grains = load['opts']['grains'] for fun in self.tops: if fun", "enabled in the config. The configuration on the master allows", "contain the eauth key and the needed authentication creds. '''", "'pillar_exact' if match_type.lower() == 'compound': match_type = 'compound_pillar_exact' checker =", "'data': data} except Exception as exc: log.error('Exception occurred while '", "pillar caches, used when the master starts ''' pillargitfs =", "have a negative impact. pub_load = { 'fun': load['fun'], 'arg':", "expired tokens from the master ''' serializer = salt.payload.Serial(opts) for", "os.remove(stub_file) return True def check_autoreject(self, keyid): ''' Checks if the", "10) if expire_minutes > 0: min_time = time.time() - (60", "= self.local.cmd_async(**pub_load) ret['minions'] = self.ckminions.check_minions( load['tgt'], pub_load['expr_form']) auth_cache = os.path.join(", "set() for match in self.opts['mine_get']: if re.match(match, load['id']): if isinstance(self.opts['mine_get'][match],", "load['ret'], 'id': load['id'], } if 'tgt_type' in load: if load['tgt_type'].startswith('node'):", "self.opts['external_auth'][extra['eauth']]) | ('*' in self.opts['external_auth'][extra['eauth']])): log.warning( 'Authentication failure of type", "False def check_autosign_dir(self, keyid): ''' Check a keyid for membership", "# save the load, since we don't have it saveload_fstr", "load: pub_load['tgt_type'] = load['tgt_type'] if 'to' in load: pub_load['to'] =", "OSError: return False return True def _file_recv(self, load): ''' Allows", "good = False break # check if the cmd is", "minion ids def init_git_pillar(opts): ''' Clear out the ext pillar", "not in load for key in ('fun', 'arg', 'id')): return", "to be matched to salt functions, so the minions can", "return True return False def check_signing_file(self, keyid, signing_file): ''' Check", "while authenticating: {0}'.format(exc) ) return '' good = self.ckminions.auth_check( self.opts['external_auth'][extra['eauth']][name]", "salt functions The config will look like this: peer: .*:", "'publish_auth') if not os.path.exists(auth_cache): return else: for (dirpath, dirnames, filenames)", "Clear remote fileserver backend caches so they get recreated for", "loaded, the master will not be able to ' 'serve", "return False pillar = salt.pillar.Pillar( self.opts, load['grains'], load['id'], load.get('saltenv', load.get('env')),", "''' # TODO: better way to not require creating the", "= self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound' else: return {} else: pub_load['expr_form']", "salt.key import salt.fileserver import salt.utils.atomicfile import salt.utils.event import salt.utils.verify import", "stat.S_IWOTH & fmode.st_mode: # don't allow others to write to", "& fmode.st_mode or stat.S_IWOTH & fmode.st_mode): return True return False", "= 'Wrong permissions for {0}, ignoring content' log.warn(message.format(signing_file)) return False", "'arg', 'tgt', 'ret', 'id')): return False # If the command", "'root')): log.warning( 'Authentication failure of type \"user\" occurred.' ) return", "masters (via a syndic), don't short circuit if no minions", "save_load function!'.format( self.opts['ext_job_cache'] ) ) except Exception: log.critical( 'The specified", "the needed authentication creds. ''' if 'eauth' not in load:", "local file objects from the file server interface ''' fs_", "!= self.key[load['user']]: log.warning( 'Authentication failure of type \"user\" occurred.' )", "the minions can only publish allowed salt functions The config", "job cache ''' # TODO: better way to not require", "permissions for {0}, ignoring content' log.warn(message.format(signing_file)) return False with salt.utils.fopen(signing_file,", "in self.opts['external_auth']: # The eauth system is not enabled, fail", "involves preparing the three listeners and the workers needed by", "masterminion every time? mminion = salt.minion.MasterMinion( opts, states=False, rend=False, )", "filename has correct permissions ''' if salt.utils.is_windows(): return True #", "False fmode = os.stat(filename) if os.getuid() == 0: if fmode.st_uid", "autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign') # cleanup expired files expire_minutes =", "of its own mine contents ''' if not skip_verify and", "self.opts['external_auth'][token['eauth']]) | ('*' in self.opts['external_auth'][token['eauth']])): log.warning('Authentication failure of type \"token\"", "fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async(fun, load.get('kwarg', {}),", "the contents of the publish load is serious!! Changes here", "made available to minions, this class includes the raw routines", "from a minion, return the runner's function data ''' if", "try: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The", "def __init__(self, opts): self.opts = opts self.event = salt.utils.event.get_event( 'master',", "'ab' else: mode = 'wb' with salt.utils.fopen(cpath, mode) as fp_:", "except OSError: return False return True def _mine_flush(self, load, skip_verify=False):", "salt.payload.Serial(opts) self.ckminions = salt.utils.minions.CkMinions(opts) # Create the tops dict for", "return {} if 'mine_get' in self.opts: # If master side", "with salt.utils.fopen(datap, 'rb') as fp_: mine_data = self.serial.load(fp_) if isinstance(mine_data,", "__init__(self, opts): self.opts = opts self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'],", "self.opts.get('master_tops', {}): continue try: ret.update(self.tops[fun](opts=opts, grains=grains)) except Exception as exc:", "= minion['jid'] for key, val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])): if key not", "return runner_client.async( fun, load.get('kwarg', {}), token['name']) except Exception as exc:", "minion/master versions and even tiny # additions can have serious", "import salt.utils.event import salt.utils.verify import salt.utils.minions import salt.utils.gzip_util import salt.utils.jid", "specified returner used for the master job cache ' '\"{0}\"", "event bus self.event.fire_event(new_job_load, 'new_job') # old dup event self.event.fire_event(new_job_load, tagify([load['jid'],", "'Authentication failure of type \"user\" occurred.' ) return '' elif", "opts = {'fun': load['fun'], 'arg': load['arg'], 'id': load['id'], 'doc': False,", "check_autoreject(self, keyid): ''' Checks if the specified keyid should automatically", "init_git_pillar(opts): ''' Clear out the ext pillar caches, used when", ") def clean_expired_tokens(opts): ''' Clean expired tokens from the master", "load['fun']) if not good: msg = ('Authentication failure of type", "# Evaluate all configured master_tops interfaces opts = {} grains", "jid_fn = os.path.join(auth_cache, str(ret['jid'])) with salt.utils.fopen(jid_fn, 'w+') as fp_: fp_.write(load['id'])", "the master Minion to access returner data self.mminion = salt.minion.MasterMinion(", "mopts mopts['renderer'] = self.opts['renderer'] mopts['failhard'] = self.opts['failhard'] mopts['state_top'] = self.opts['state_top']", "token = self.loadauth.get_tok(load['token']) except Exception as exc: msg = 'Exception", "failure of type \"token\" occurred for ' 'user {0}.').format(token['name']) log.warning(msg)", "except Exception as exc: log.error(exc) log.error('Exception occurred while ' 'introspecting", "'Preparing the {0} key for local communication'.format( user ) )", "from its own mine ''' if 'id' not in load", "pwd.getpwnam(user) uid = pwnam[2] gid = pwnam[3] groups = salt.utils.get_gid_list(user,", "return False # If the command will make a recursive", "pillar = salt.pillar.Pillar( self.opts, load['grains'], load['id'], load.get('saltenv', load.get('env')), load.get('ext'), self.mminion.functions,", "ret def minion_publish(self, load): ''' Publish a command initiated from", "Create and return an authentication token, the clear load needs", "if good is False: log.error( '{user} does not have permissions", "no minions if not minions: return { 'enc': 'clear', 'load':", "as fp_: fp_.write(load['id']) return ret def minion_publish(self, load): ''' Publish", "to parse timeout value: {0}'.format( load['timeout']) log.warn(msg) return {} if", "failure of type \"token\" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) good", ") ) return pillargitfs def clean_fsbackend(opts): ''' Clean out the", "return {'tag': tag, 'data': data} except Exception as exc: log.error(", "Things to do in lower layers: # only accept valid", "to execute ''' # Verify that the load is valid", "file_roots[saltenv] = [] mopts['file_roots'] = file_roots if load.get('env_only'): return mopts", "circuit if no minions # are found if not self.opts.get('order_masters'):", "LocalFuncs(object): ''' Set up methods for use only from the", "on subsequent runs, if the file # exists, it needs", "salt.utils.fopen(jid_fn, 'r') as fp_: if not load['id'] == fp_.read(): return", "writable by group or other if not (stat.S_IWGRP & fmode.st_mode", "in os.walk(autosign_dir): for f in filenames: stub_file = os.path.join(autosign_dir, f)", "# _auth def __init__(self, opts, key): self.opts = opts self.serial", "Please ' 'contact your local administrator if you believe this", "make sure we don't step on anyone else's toes del", "return dict(error=dict(name='EauthAuthenticationError', message=msg)) if not self.loadauth.time_auth(load): msg = ('Authentication failure", "cache try: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical(", "minions}, load['jid']) new_job_load = { 'jid': load['jid'], 'tgt_type': load['tgt_type'], 'tgt':", "Retrieve the jid if not load['jid']: fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid']", "for the external job cache ' '\"{0}\" does not have", "False if os.path.isabs(load['path']) or '../' in load['path']: # Can overwrite", "not in ret: ret[key] = val if load.get('form', '') !=", "''' # All wheel ops pass through eauth if 'token'", "minions can only publish allowed salt functions The config will", "self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type',", "load['id']) if not os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'data.p') tmpfh,", "was passed, check it try: token = self.loadauth.get_tok(extra['token']) except Exception", "opts self.serial = salt.payload.Serial(opts) self.key = key # Create the", "os.path.exists(signing_file): return False if not self.check_permissions(signing_file): message = 'Wrong permissions", "continue return ret def _mine(self, load, skip_verify=False): ''' Return the", "cpath = os.path.join( self.opts['cachedir'], 'minions', load['id'], 'files', normpath) cdir =", "exc ) ) return '' if not token: log.warning('Authentication failure", "extra['eauth'] not in self.opts['external_auth']: # The eauth system is not", "log.info( 'User {user} Published command {fun} with jid {jid}'.format( **load", "{id} for job {jid}'.format(**load)) self.event.fire_event(load, load['jid']) # old dup event", "salt.client.get_local_client(mopts=self.opts) # Create the master minion to access the external", "salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(load['data'])) return True def _mine_delete(self, load):", "the file return False # check group flags if self.opts.get('permissive_pki_access',", "fun, load.get('kwarg', {}), token['name']) except Exception as exc: log.error('Exception occurred", "clean_fsbackend(opts): ''' Clean out the old fileserver backends ''' #", "True return False def check_autosign_dir(self, keyid): ''' Check a keyid", "for key in ('fun', 'arg', 'tgt', 'ret', 'id')): return False", "try: os.remove(env_cache) except OSError as exc: log.critical( 'Unable to clear", "if os.getuid() == 0: if fmode.st_uid == uid or fmode.st_gid", "access the external job cache self.mminion = salt.minion.MasterMinion( self.opts, states=False,", "log.error( 'Exception {0} occurred in file server update'.format(exc), exc_info_on_loglevel=logging.DEBUG )", "log.critical( 'The specified returner threw a stack trace:\\n', exc_info=True )", "in self.opts: return {} if not isinstance(self.opts['peer_run'], dict): return {}", "{jid}'.format( **load ) ) pub_load['user'] = load['user'] else: log.info( 'Published", "determine groups for user {0}. The user is not '", "object be passed in ''' try: if not fileserver.servers: log.error(", "load['id']) if not os.path.isdir(cdir): return False datap = os.path.join(cdir, 'mine.p')", "mine ''' if 'id' not in load or 'fun' not", "file_roots: file_roots[saltenv] = [] mopts['file_roots'] = file_roots if load.get('env_only'): return", "the event manager self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts,", "a master server, this involves preparing the three listeners and", "{'fun': \"wheel.{0}\".format(fun), 'jid': jid, 'tag': tag, 'user': load.get('username', 'UNKNOWN')} try:", ") raise SaltMasterError('No fileserver backends available') fileserver.update() except Exception as", "master side acl defined. if not isinstance(self.opts['mine_get'], dict): return {}", "in load and ('tag' not in load or 'data' not", "Can overwrite master files!! return False if not salt.utils.verify.valid_id(self.opts, load['id']):", "return '' if load['eauth'] not in self.opts['external_auth']: # The eauth", "'\"{0}\" does not have a save_load function!'.format( self.opts['master_job_cache'] ) )", "log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if token['eauth'] not in self.opts['external_auth']: msg", "failure of type \"user\" occurred.' ) return '' else: if", "load['fun'] = load['fun'].split(',') arg_ = [] for arg in load['arg']:", "os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir,", "self.opts: return {} if not isinstance(self.opts['peer_run'], dict): return {} if", "& fmode.st_mode: return True elif stat.S_IWGRP & fmode.st_mode: return False", ") return ret def _mine_get(self, load, skip_verify=False): ''' Gathers the", "True if not good: # The minion is not who", ") return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def mk_token(self, load): ''' Create", "ret[key] = val if load.get('form', '') != 'full': ret.pop('__jid__') return", "directory. ''' autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign') # cleanup expired files", "here # break compatibility with minion/master versions and even tiny", ") file_lists_dir = os.path.join( opts['cachedir'], 'file_lists', '{0}fs'.format(backend) ) try: file_lists_caches", "executed in # the clear: # publish (The publish from", "else: if salt.utils.expr_match(keyid, line): return True return False def check_autosign_dir(self,", "'eauth' not in load: log.warning('Authentication failure of type \"eauth\" occurred.')", "the minion ''' mopts = {} file_roots = {} envs", "{0}'.format(keyfile)) os.unlink(keyfile) key = salt.crypt.Crypticle.generate_key_string() cumask = os.umask(191) with salt.utils.fopen(keyfile,", "a save_load function!'.format( self.opts['ext_job_cache'] ) ) except Exception: log.critical( 'The", "for (dirpath, dirnames, filenames) in os.walk(auth_cache): for auth_file in filenames:", "elif 'user' in load: if load['user'].startswith('sudo_'): # If someone can", "user ) ) return False fmode = os.stat(filename) if os.getuid()", "for user in pwd.getpwall(): users.append(user.pw_name) for user in acl_users: log.info(", "('fun', 'arg', 'tgt', 'ret', 'id')): return False # If the", "keyfile: {0}'.format(keyfile)) os.unlink(keyfile) key = salt.crypt.Crypticle.generate_key_string() cumask = os.umask(191) with", "the file server interface ''' fs_ = salt.fileserver.Fileserver(self.opts) self._serve_file =", "failure of type \"user\" ' 'occurred.' ) return '' else:", "opts=self.opts, listen=False) self.serial = salt.payload.Serial(opts) self.ckminions = salt.utils.minions.CkMinions(opts) # Create", "Return the master options to the minion ''' mopts =", "data = {'fun': \"wheel.{0}\".format(fun), 'jid': jid, 'tag': tag, 'user': token['name']}", "work if it is enabled in the config. The configuration", "= checker.check_minions( load['tgt'], match_type, greedy=False ) for minion in minions:", "interfaces opts = {} grains = {} ret = {}", "log.error('Unable to delete pub auth file') def clean_old_jobs(opts): ''' Clean", "def runner(self, load): ''' Send a master control function back", "load needs to contain the eauth key and the needed", "master will not be able to ' 'serve files to", "return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False file_recv_max_size =", "load['tgt'], 'ret': load['ret'], 'id': load['id'], } if 'tgt_type' in load:", "self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound' pub_load['expr_form'] = load['tgt_type']", "a token or False if the token is invalid '''", "self.opts['state_auto_order'] mopts['state_events'] = self.opts['state_events'] mopts['state_aggregate'] = self.opts['state_aggregate'] mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']", "runner object opts = {'fun': load['fun'], 'arg': load['arg'], 'id': load['id'],", "of type \"user\" occurred.' ) return '' if load['user'] not", "load['id'], 'doc': False, 'conf_file': self.opts['conf_file']} opts.update(self.opts) runner = salt.runner.Runner(opts) return", "acl_users = set(opts['client_acl'].keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.get_user()) if HAS_PWD: for", "min_time: log.warn('Autosign keyid expired {0}'.format(stub_file)) os.remove(stub_file) stub_file = os.path.join(autosign_dir, keyid)", "can be executed in # the clear: # publish (The", "for ' 'user {0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) try: fun", "from {id} for job {jid}'.format(**load)) self.event.fire_event(load, load['jid']) # old dup", "self.loadauth.time_auth(extra): log.warning( 'Authentication failure of type \"eauth\" occurred.' ) return", "mopts['state_top'] = self.opts['state_top'] mopts['nodegroups'] = self.opts['nodegroups'] mopts['state_auto_order'] = self.opts['state_auto_order'] mopts['state_events']", "{function}. Please ' 'contact your local administrator if you believe", "self.key[salt.utils.get_user()]: log.warning( 'Authentication failure of type \"other\" occurred.' ) return", "libs import salt.ext.six as six try: import pwd HAS_PWD =", "< time.time(): try: os.remove(token_path) except (IOError, OSError): pass def clean_pub_auth(opts):", "if not good: return False return True def _master_opts(self, load):", "the minions, it can only be used by the LocalClient.", "if 'id' in extra: pub_load['id'] = extra['id'] if 'tgt_type' in", "log.warning( 'Authentication failure of type \"other\" occurred.' ) return ''", "= 'compound' pub_load['expr_form'] = load['tgt_type'] else: return {} else: pub_load['expr_form']", "should automatically be rejected. ''' return self.check_signing_file( keyid, self.opts.get('autoreject_file', None)", "# All wheel ops pass through eauth if 'token' in", "if 'tmo' in load: try: pub_load['timeout'] = int(load['tmo']) except ValueError:", "{0}'.format( load['tmo']) log.warn(msg) return {} if 'timeout' in load: try:", "in self.key: # User is authorised, check key and check", "import salt.ext.six as six try: import pwd HAS_PWD = True", "'kwargs' in load: if 'ret_config' in load['kwargs']: pub_load['ret_config'] = load['kwargs'].get('ret_config')", "if load['jid'] == 'req': # The minion is returning a", "dup event self.event.fire_event(load, tagify([load['jid'], 'ret', load['id']], 'job')) self.event.fire_ret_load(load) if not", "or fmode.st_gid != gid: return True elif self.opts.get('permissive_pki_access', False) \\", "the name associated with a token or False if the", "opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.get_user()) if HAS_PWD: for user in pwd.getpwall(): users.append(user.pw_name)", "br = parts[0] loc = parts[1] except IndexError: log.critical( 'Unable", "self.opts['external_auth'][load['eauth']])): msg = ('Authentication failure of type \"eauth\" occurred for", "fp_: for line in fp_: line = line.strip() if line.startswith('#'):", "return {} else: pub_load['expr_form'] = load['tgt_type'] ret = {} ret['jid']", "= self.ckminions.auth_check( self.opts['external_auth'][extra['eauth']][name] if name in self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*'], load['fun'],", "dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']]", "= os.path.join(opts['cachedir'], 'publish_auth') if not os.path.exists(auth_cache): return else: for (dirpath,", "jid if not load['jid']: fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache',", "load: opts = load['opts'] if 'grains' in load['opts']: grains =", "return True elif stat.S_IWGRP & fmode.st_mode: return False # check", "and can therefore not # chown the key file pass", "= salt.pillar.Pillar( self.opts, load['grains'], load['id'], load.get('saltenv', load.get('env')), load.get('ext'), self.mminion.functions, pillar=load.get('pillar_override',", "failure of type \"eauth\" occurred.') return '' return self.loadauth.mk_token(load) except", "functions that can be executed in # the clear: #", "fp_.write(load['id']) return ret def minion_publish(self, load): ''' Publish a command", "load['fun']: # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']] load['fun'] = load['fun'].split(',')", "minion and fire it on the master event interface '''", "if stat.S_IWOTH & fmode.st_mode: # don't allow others to write", "key in ('id', 'grains')): return False pillar = salt.pillar.Pillar( self.opts,", "else: if load['user'] in self.key: # User is authorised, check", "order masters (via a syndic), don't short circuit if no", "load['loc'] != 0: mode = 'ab' else: mode = 'wb'", "self.opts['auto_accept']: return True if self.check_signing_file(keyid, self.opts.get('autosign_file', None)): return True if", "standalone job, request a jobid prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] =", "type \"user\" occurred.' ) return '' good = self.ckminions.auth_check( self.opts['client_acl'][load['user']],", "'Authentication failure of type \"user\" ' 'occurred.' ) return ''", "salt.utils.atomicfile import salt.utils.event import salt.utils.verify import salt.utils.minions import salt.utils.gzip_util import", "not load['jid']: fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False)) self.event.fire_event({'minions':", "to not require creating the masterminion every time? mminion =", "that a built fileserver object be passed in ''' try:", "data = {'fun': \"wheel.{0}\".format(fun), 'jid': jid, 'tag': tag, 'user': load.get('username',", "re.match('publish.*', load['fun']): return False # Check the permissions for this", "''' Update the fileserver backends, requires that a built fileserver", "def access_keys(opts): ''' A key needs to be placed in", "{'ret': { 'jid': load['jid'], 'minions': minions }, 'pub': pub_load }", "type \"eauth\" occurred.') return '' return self.loadauth.mk_token(load) except Exception as", "in load or 'data' not in load): return False if", "'{user} does not have permissions to run {function}. Please '", "pillar data: {0}' .format(opts_dict['git']) ) else: pillargitfs.append( git_pillar.GitPillar( br, loc,", "return True elif self.opts.get('permissive_pki_access', False) \\ and fmode.st_gid in groups:", "self.opts.get('user', 'root'): if load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication failure", "= salt.payload.Serial(opts) for (dirpath, dirnames, filenames) in os.walk(opts['token_dir']): for token", "fmode.st_mode: return False # check if writable by group or", "os.path.isdir(cdir): try: os.makedirs(cdir) except os.error: pass if os.path.isfile(cpath) and load['loc']", "os.umask(cumask) # 600 octal: Read and write access to the", "= {} envs = self._file_envs() for saltenv in envs: if", "False if not self.opts['file_recv'] or os.path.isabs(load['path']): return False if os.path.isabs(load['path'])", "self.opts['external_auth']) return '' good = self.ckminions.auth_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in", "'ret_config' in load['kwargs']: pub_load['ret_config'] = load['kwargs'].get('ret_config') if 'metadata' in load['kwargs']:", "generating auth token: {0}'.format( exc ) ) return '' if", "# way that won't have a negative impact. pub_load =", "if not self.opts['job_cache'] or self.opts.get('ext_job_cache'): return fstr = '{0}.update_endtime'.format(self.opts['master_job_cache']) if", "'Authentication failure of type \"token\" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg))", "Authentication type of {0} not present.').format(token['eauth']) return '' if not", "self.ckminions.auth_check( self.opts['client_acl'][load['user']], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not good: #", ") ) return ret def _mine_get(self, load, skip_verify=False): ''' Gathers", "_minion_event(self, load): ''' Receive an event from the minion and", "the eauth key and the needed authentication creds. ''' if", "'' if not token: log.warning('Authentication failure of type \"token\" occurred.", "we have a load, save it if 'load' in load:", "a negative impact. pub_load = { 'fun': load['fun'], 'arg': load['arg'],", "required to run as root. ''' users = [] keys", "'' # to make sure we don't step on anyone", "pillar_dirs = {} data = pillar.compile_pillar(pillar_dirs=pillar_dirs) if self.opts.get('minion_data_cache', False): cdir", "dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']]", "passed in ''' try: if not fileserver.servers: log.error( 'No fileservers", "return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def mk_token(self, load): ''' Create and", "x in opts.get('ext_pillar', [])]: if 'git' in opts_dict: try: import", "# old dup event self.event.fire_event(new_job_load, tagify([load['jid'], 'new'], 'job')) # Save", "opts['keep_jobs']: os.remove(auth_file_path) except (IOError, OSError): log.error('Unable to delete pub auth", "Exception as exc: log.error( 'Exception occurred while authenticating: {0}'.format(exc) )", "load['arg'], 'expr_form': load.get('tgt_type', 'glob'), 'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id'],", ") if os.path.isfile(env_cache): log.debug('Clearing {0}fs env cache'.format(backend)) try: os.remove(env_cache) except", "False): with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(mine_data)) except OSError: return", "is not who it says it is! # We don't", "job on the event bus self.event.fire_event(new_job_load, 'new_job') # old dup", "salt.ext.six as six try: import pwd HAS_PWD = True except", "{0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if not self.loadauth.time_auth(load): msg", "0) > file_recv_max_size: log.error( 'Exceeding file_recv_max_size limit: {0}'.format( file_recv_max_size )", "as fp_: fdata = self.serial.load(fp_).get(load['fun']) if fdata: ret[minion] = fdata", "if load.pop('key') != self.key[salt.utils.get_user()]: log.warning( 'Authentication failure of type \"other\"", "serious!! Changes here # break compatibility with minion/master versions and", "fileserver backend caches so they get recreated for backend in", "= salt.utils.get_gid_list(user, include_default=False) except KeyError: log.error( 'Failed to determine groups", "a jobid prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False)) #", "salt.runner.Runner(opts) return runner.run() def pub_ret(self, load, skip_verify=False): ''' Request the", "message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else", "to the master job cache try: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'],", "return from {id} for job {jid}'.format(**load)) self.event.fire_event(load, load['jid']) # old", "= {} if 'opts' in load: opts = load['opts'] if", "have a load, save it if 'load' in load: fstr", "message=msg)) if not token: msg = 'Authentication failure of type", "the wheel system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def", "remote fileserver backend caches so they get recreated for backend", "run auto key acceptance and rejection ''' def __init__(self, opts):", "perms if load.pop('key') != self.key[load['user']]: log.warning( 'Authentication failure of type", "in load: try: pub_load['timeout'] = int(load['tmo']) except ValueError: msg =", "self.key[load['user']]: log.warning( 'Authentication failure of type \"user\" occurred.' ) return", "minion checker object self.ckminions = salt.utils.minions.CkMinions(opts) # Make an Auth", "_mine_delete(self, load): ''' Allow the minion to delete a specific", "Set up the publication payload pub_load = { 'fun': load['fun'],", "be placed in the filesystem with permissions 0400 so clients", "os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): return False datap =", "backends, requires that a built fileserver object be passed in", "not in load): return False if 'events' in load: for", "in minions: mine = os.path.join( self.opts['cachedir'], 'minions', minion, 'mine.p') try:", "'ret'], 'wheel')) return {'tag': tag, 'data': data} except Exception as", "is False: log.error( '{user} does not have permissions to run", "auth_file) if not os.path.isfile(auth_file_path): continue if os.path.getmtime(auth_file_path) - time.time() >", "\"token\" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if token['eauth'] not in", "has root on master elif 'user' in load: if load['user'].startswith('sudo_'):", "salt.minion.MasterMinion( self.opts, states=False, rend=False) # Make a wheel object self.wheel_", "def minion_pub(self, load): ''' Publish a command initiated from a", "available'.format(user)) continue keyfile = os.path.join( opts['cachedir'], '.{0}_key'.format(user) ) if os.path.exists(keyfile):", "if the destination file exists. salt.utils.atomicfile.atomic_rename(tmpfname, datap) return data def", "None # if we have a load, save it if", "return False with salt.utils.fopen(signing_file, 'r') as fp_: for line in", "fp_.write(load['data']) return True def _pillar(self, load): ''' Return the pillar", "} if 'id' in extra: pub_load['id'] = extra['id'] if 'tgt_type'", "exists. salt.utils.atomicfile.atomic_rename(tmpfname, datap) return data def _minion_event(self, load): ''' Receive", "methods for use only from the local system ''' #", "'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if not self.loadauth.time_auth(load):", "includes the raw routines post validation that make up the", "return data sent from the minions ''' # Generate EndTime", "fp_: if not load['id'] == fp_.read(): return {} return self.local.get_cache_returns(load['jid'])", "self.opts['transport'], opts=self.opts, listen=False) # Make a client self.local = salt.client.get_local_client(mopts=self.opts)", "'Unable to extract external pillar data: {0}' .format(opts_dict['git']) ) else:", "return runner_client.async(fun, load.get('kwarg', {}), load.get('username', 'UNKNOWN')) except Exception as exc:", "root on master elif 'user' in load: if load['user'].startswith('sudo_'): #", "is!'.format( load['id'] ) ) return {} # Prepare the runner", "datap = os.path.join(cdir, 'mine.p') if not load.get('clear', False): if os.path.isfile(datap):", "token['name']) except Exception as exc: log.error('Exception occurred while ' 'introspecting", "return False # check if writable by group or other", "on windows HAS_PWD = False log = logging.getLogger(__name__) # Things", "import logging import os import re import time import stat", "type \"user\" occurred.' ) return '' else: if load.pop('key') !=", "needed authentication creds. ''' if 'eauth' not in load: log.warning('Authentication", "log.critical( 'Unable to extract external pillar data: {0}' .format(opts_dict['git']) )", "salt.pillar import salt.state import salt.runner import salt.auth import salt.wheel import", "'{0}fs'.format(backend), 'envs.p' ) if os.path.isfile(env_cache): log.debug('Clearing {0}fs env cache'.format(backend)) try:", "= 'Failed to parse timeout value: {0}'.format( load['tmo']) log.warn(msg) return", "'arg': load['arg'], 'tgt': load['tgt'], 'jid': load['jid'], 'ret': load['ret'], } if", "opts): self.opts = opts def check_permissions(self, filename): ''' Check if", "self.opts['transport'], opts=self.opts, listen=False) self.serial = salt.payload.Serial(opts) self.ckminions = salt.utils.minions.CkMinions(opts) #", "classifier if one is specified ''' if not skip_verify: if", "# Can overwrite master files!! return False if not salt.utils.verify.valid_id(self.opts,", "try: name = self.loadauth.load_name(load) if not (name in self.opts['external_auth'][load['eauth']]) |", "return '' load['user'] = name # Verify that the caller", "occurred in file server update'.format(exc), exc_info_on_loglevel=logging.DEBUG ) class AutoKey(object): '''", "in six.iteritems(load['return']): ret = {'jid': load['jid'], 'id': key, 'return': item}", "_mine(self, load, skip_verify=False): ''' Return the mine data ''' if", "' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) jid =", "system is not enabled, fail log.warning( 'Authentication failure of type", "= False break if good is False: log.error( '{user} does", "return False fmode = os.stat(filename) if os.getuid() == 0: if", "self._file_envs() for saltenv in envs: if saltenv not in file_roots:", "recreated for backend in ('git', 'hg', 'svn'): if backend in", "= minion['jid'] data['ret'] = data.pop('return') ret[minion['id']] = data else: ret[minion['id']]", "key): self.opts = opts self.serial = salt.payload.Serial(opts) self.key = key", "self.ckminions.wheel_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if", "else: if stat.S_IWOTH & fmode.st_mode: # don't allow others to", "clean_expired_tokens(opts): ''' Clean expired tokens from the master ''' serializer", "load, save it if 'load' in load: fstr = '{0}.save_load'.format(self.opts['master_job_cache'])", "return '' good = self.ckminions.auth_check( self.opts['client_acl'][load['user']], load['fun'], load['tgt'], load.get('tgt_type', 'glob'))", "to the master ''' def __init__(self, opts): self.opts = opts", "not available'.format(user)) continue keyfile = os.path.join( opts['cachedir'], '.{0}_key'.format(user) ) if", "self._file_envs = fs_.envs def __verify_minion_publish(self, load): ''' Verify that the", "if re.match('publish.*', load['fun']): return False # Check the permissions for", "try: os.remove(cache_file) except OSError as exc: log.critical( 'Unable to file_lists", "not enabled, fail msg = ('Authentication failure of type \"eauth\"", "minion publication will only work if it is enabled in", "log.error( 'Top function {0} failed with error {1} for minion", "load): ''' Handle the return data sent from the minions", "# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']] load['fun'] = load['fun'].split(',') arg_", "'glob'), 'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id'], } if 'tgt_type'", "load['jid']) # old dup event self.event.fire_event(load, tagify([load['jid'], 'ret', load['id']], 'job'))", "self.ckminions.check_minions( load['tgt'], pub_load['expr_form']) auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not", "logging import os import re import time import stat import", "'Top function {0} failed with error {1} for minion '", "KeyError: log.critical( 'The specified returner used for the external job", "token in filenames: token_path = os.path.join(dirpath, token) with salt.utils.fopen(token_path) as", "revocation of its own key ''' if 'id' not in", "Exception as exc: log.error( 'Exception occurred in the runner system:", "pub_load['expr_form'] = load['tgt_type'] pub_load['raw'] = True ret = {} for", "have permissions to run {function}. Please ' 'contact your local", "break compatibility with minion/master versions and even tiny # additions", "= minion if 'jid' in minion: ret['__jid__'] = minion['jid'] data['ret']", "if any(key not in load for key in ('return', 'jid',", "fs_.file_list self._file_list_emptydirs = fs_.file_list_emptydirs self._dir_list = fs_.dir_list self._symlink_list = fs_.symlink_list", "if 'events' not in load and ('tag' not in load", "runner from a minion, return the runner's function data '''", "= self.wheel_.call_func(fun, **load) data['return'] = ret data['success'] = True self.event.fire_event(data,", "= load['tgt_type'] else: return {} else: pub_load['expr_form'] = load['tgt_type'] ret", "to set up a master server, this involves preparing the", "pillargitfs.append( git_pillar.GitPillar( br, loc, opts ) ) return pillargitfs def", "tagify(event['tag'], base=load['pretag'])) else: self.event.fire_event(event, tagify(event['tag'], base=load['pretag'])) else: tag = load['tag']", "for user {0}. The user is not ' 'available.\\n'.format( user", "for key, item in six.iteritems(load['return']): ret = {'jid': load['jid'], 'id':", "'Minion id {0} is not who it says it is!'.format(", "before you even think about # touching this stuff, we", "\"token\" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][token['eauth']][token['name']]", "dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']]", "permissions ''' if salt.utils.is_windows(): return True # After we've ascertained", "system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def wheel(self, load):", "{0}fs env cache'.format(backend)) try: os.remove(env_cache) except OSError as exc: log.critical(", "in load: pub_load['to'] = load['to'] if 'kwargs' in load: if", "for local communication'.format( user ) ) if HAS_PWD: if user", "funs_to_check = load['fun'] for fun in funs_to_check: if re.match(module_re, fun):", "{} file_roots = {} envs = self._file_envs() for saltenv in", "return False if 'events' in load: for event in load['events']:", "the master ''' serializer = salt.payload.Serial(opts) for (dirpath, dirnames, filenames)", "returner data self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False) # Make", "== self.key.get('root'): load.pop('key') elif load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication", "= os.path.join(cdir, 'mine.p') if not load.get('clear', False): if os.path.isfile(datap): with", "{0}'.format(pub_load)) return {'ret': { 'jid': load['jid'], 'minions': minions }, 'pub':", "import stat import tempfile # Import salt libs import salt.crypt", "re.match(match, load['id']): if isinstance(self.opts['mine_get'][match], list): perms.update(self.opts['mine_get'][match]) if not any(re.match(perm, load['fun'])", "Check for external auth calls if extra.get('token', False): # A", "the three listeners and the workers needed by the master.", "could not be retrieved.') return '' if token['eauth'] not in", "not good: # Accept find_job so the CLI will function", "= self.opts['state_aggregate'] mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks'] return mopts", "salt.utils.fopen(datap, 'rb') as fp_: new = self.serial.load(fp_) if isinstance(new, dict):", "except Exception as exc: log.error( 'Exception occurred in the wheel", "self.opts.get('minion_data_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir):", "True return False class RemoteFuncs(object): ''' Funcitons made available to", "for (dirpath, dirnames, filenames) in os.walk(opts['token_dir']): for token in filenames:", "\"token\" occurred.' ) return '' load['user'] = token['name'] log.debug('Minion tokenized", "= {'jid': load['jid'], 'id': key, 'return': item} if 'out' in", "defined. if not isinstance(self.opts['mine_get'], dict): return {} perms = set()", "in load: return False if 'events' not in load and", "Check if the user is blacklisted for user_re in self.opts['client_acl_blacklist'].get('users',", "token was passed, check it try: token = self.loadauth.get_tok(extra['token']) except", "re import time import stat import tempfile # Import salt", "performance of the # publish commands. # # In short,", "exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) except Exception as exc: log.error(", "prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False)) # save the", "auth token: {0}'.format( exc ) ) return '' if not", "of type \"user\" ' 'occurred.' ) return '' else: log.warning(", "load): ''' Execute a runner from a minion, return the", "except Exception as exc: log.error( 'Exception occurred in the runner", "= [] for opts_dict in [x for x in opts.get('ext_pillar',", "self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not good: msg = ('Authentication failure of", "not on windows try: user = self.opts['user'] pwnam = pwd.getpwnam(user)", "if re.match(perm, load['fun']): good = True if not good: #", "jobs from the job cache ''' # TODO: better way", "key not in ret: ret[key] = val if load.get('form', '')", "try: fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async( fun,", "of type \"token\" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) good =", "its own mine ''' if 'id' not in load or", "else self.opts['external_auth'][extra['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not good: #", "salt.utils.fopen(jid_fn, 'w+') as fp_: fp_.write(load['id']) return ret def minion_publish(self, load):", "with error {1} for minion ' '{2}'.format( fun, exc, load['id']", "minion in minions: mine = os.path.join( self.opts['cachedir'], 'minions', minion, 'mine.p')", "break # check if the cmd is blacklisted for module_re", "opts.update(self.opts) runner = salt.runner.Runner(opts) return runner.run() def pub_ret(self, load, skip_verify=False):", "= {} data = pillar.compile_pillar(pillar_dirs=pillar_dirs) if self.opts.get('minion_data_cache', False): cdir =", "False # check if writable by group or other if", "load['id']): return ret match_type = load.get('expr_form', 'glob') if match_type.lower() ==", "# -*- coding: utf-8 -*- ''' This module contains all", "= '{0}.returner'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load) def _syndic_return(self, load): ''' Receive a syndic", "'w+') as fp_: fp_.write(load['id']) return ret def minion_publish(self, load): '''", "loc, opts ) ) return pillargitfs def clean_fsbackend(opts): ''' Clean", "= '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The specified returner", "OSError: # The master is not being run as root", "('Authentication failure of type \"eauth\" occurred for ' 'user {0}.').format(load.get('username',", "fp_.read(): return {} return self.local.get_cache_returns(load['jid']) def minion_pub(self, load): ''' Publish", "== 0: if fmode.st_uid == uid or fmode.st_gid != gid:", "dict): if mine_data.pop(load['fun'], False): with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(mine_data))", "{0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][load['eauth']][name]", "exc: log.error( 'Exception occurred while authenticating: {0}'.format(exc) ) return ''", "its a single function if isinstance(load['fun'], str): funs_to_check = [load['fun']]", "send files to the master, files are sent to the", "data for the minion ''' if any(key not in load", "salt.payload.Serial(opts) for (dirpath, dirnames, filenames) in os.walk(opts['token_dir']): for token in", "import salt.client import salt.payload import salt.pillar import salt.state import salt.runner", "message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else", "job cache try: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError:", "# If the command will make a recursive publish don't", "if self.check_autosign_dir(keyid): return True return False class RemoteFuncs(object): ''' Funcitons", "probably do what you want to do another # way", "return False datap = os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try: os.remove(datap)", "__future__ import absolute_import # Import python libs import fnmatch import", "grains = load['opts']['grains'] for fun in self.tops: if fun not", "'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, load['jid']) with", "== 'req': # The minion is returning a standalone job,", "import salt.runner import salt.auth import salt.wheel import salt.minion import salt.search", "This is the list of funcs/modules! if isinstance(self.opts['peer_run'][match], list): perms.update(self.opts['peer_run'][match])", "'id' not in load: return False keyapi = salt.key.Key(self.opts) keyapi.delete_key(load['id'],", "tag, 'user': token['name']} try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret =", "'wheel')) return {'tag': tag, 'data': data} if 'eauth' not in", "all of its own mine contents ''' if not skip_verify", "be able to ' 'serve files to minions' ) raise", "= salt.crypt.Crypticle.generate_key_string() cumask = os.umask(191) with salt.utils.fopen(keyfile, 'w+') as fp_:", "_pillar(self, load): ''' Return the pillar data for the minion", "external top data self.tops = salt.loader.tops(self.opts) # Make a client", "fun not in self.opts.get('master_tops', {}): continue try: ret.update(self.tops[fun](opts=opts, grains=grains)) except", "log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: fun = load.pop('fun') runner_client =", "os.chmod(keyfile, 0o600) if HAS_PWD: try: os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1) except OSError:", "does not have a save_load function!'.format( self.opts['ext_job_cache'] ) ) except", "<NAME> before you even think about # touching this stuff,", "if len(load['data']) + load.get('loc', 0) > file_recv_max_size: log.error( 'Exceeding file_recv_max_size", "tmpfh, tmpfname = tempfile.mkstemp(dir=cdir) os.close(tmpfh) with salt.utils.fopen(tmpfname, 'w+b') as fp_:", "{} # Evaluate all configured master_tops interfaces opts = {}", "dup event if load.get('pretag') is not None: if 'data' in", "pwnam = pwd.getpwnam(user) uid = pwnam[2] gid = pwnam[3] groups", "os.makedirs(cdir) datap = os.path.join(cdir, 'mine.p') if not load.get('clear', False): if", "have a save_load function!'.format( self.opts['master_job_cache'] ) ) except Exception: log.critical(", "on the performance of the # publish commands. # #", "= time.time() - (60 * int(expire_minutes)) for root, dirs, filenames", "the top generation, log it and move on log.error( 'Top", "salt.exceptions import SaltMasterError # Import 3rd-party libs import salt.ext.six as", "if match_type.lower() == 'compound': match_type = 'compound_pillar_exact' checker = salt.utils.minions.CkMinions(self.opts)", "return {} else: pub_load['expr_form'] = load['tgt_type'] pub_load['raw'] = True ret", "data.pop('return') ret[minion['id']] = data else: ret[minion['id']] = minion['return'] if 'jid'", "{0} is not available'.format(user)) continue keyfile = os.path.join( opts['cachedir'], '.{0}_key'.format(user)", "self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][load['eauth']]['*'], load['fun']) if not good: msg = ('Authentication", "Return the results from an external node classifier if one", "'compound' else: return {} else: pub_load['expr_form'] = load['tgt_type'] pub_load['raw'] =", "are normalized normpath = normpath.replace('\\\\', '/') normpath = os.path.normpath(normpath) cpath", "event if load.get('pretag') is not None: if 'data' in event:", "''' # The ClearFuncs object encapsulates the functions that can", "opts['fileserver_backend']: env_cache = os.path.join( opts['cachedir'], '{0}fs'.format(backend), 'envs.p' ) if os.path.isfile(env_cache):", "log.error( 'Exception occurred in the runner system: {0}'.format(exc) ) return", "is returning a standalone job, request a jobid prep_fstr =", "'root': if load.pop('key') != self.key.get(self.opts.get('user', 'root')): log.warning( 'Authentication failure of", "sent from the minions ''' # Generate EndTime endtime =", "in event: self.event.fire_event(event['data'], tagify(event['tag'], base=load['pretag'])) else: self.event.fire_event(event, tagify(event['tag'], base=load['pretag'])) else:", "new.update(load['data']) load['data'] = new with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(load['data']))", "load: if 'ret_config' in load['kwargs']: pub_load['ret_config'] = load['kwargs'].get('ret_config') if 'metadata'", "return True return False def check_autosign_dir(self, keyid): ''' Check a", "True return False def check_signing_file(self, keyid, signing_file): ''' Check a", "do in lower layers: # only accept valid minion ids", "the return data sent from the minions ''' # Generate", "individual return loads for key, item in six.iteritems(load['return']): ret =", "ret['jid'] = self.local.cmd_async(**pub_load) ret['minions'] = self.ckminions.check_minions( load['tgt'], pub_load['expr_form']) auth_cache =", "job cache ' '\"{0}\" does not have a save_load function!'.format(", "= {} ret['jid'] = self.local.cmd_async(**pub_load) ret['minions'] = self.ckminions.check_minions( load['tgt'], pub_load['expr_form'])", "as fp_: fp_.write(self.serial.dumps(load['data'])) return True def _mine_delete(self, load): ''' Allow", "(IOError, OSError): pass def clean_pub_auth(opts): try: auth_cache = os.path.join(opts['cachedir'], 'publish_auth')", "the master file cache ''' if any(key not in load", "self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not", "error {1} for minion ' '{2}'.format( fun, exc, load['id'] )", "check if the cmd is blacklisted for module_re in self.opts['client_acl_blacklist'].get('modules',", "IndexError: log.critical( 'Unable to extract external pillar data: {0}' .format(opts_dict['git'])", "load['opts'] if 'grains' in load['opts']: grains = load['opts']['grains'] for fun", "keyid expired {0}'.format(stub_file)) os.remove(stub_file) stub_file = os.path.join(autosign_dir, keyid) if not", "occurred.' ) return '' elif load['user'] == self.opts.get('user', 'root'): if", "of type \"user\" occurred.' ) return '' else: if load['user']", "' 'available.\\n'.format( user ) ) return False fmode = os.stat(filename)", "= [] mopts['file_roots'] = file_roots if load.get('env_only'): return mopts mopts['renderer']", "load['id']): return {} # Evaluate all configured master_tops interfaces opts", "= os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try: os.remove(datap) except OSError: return", "load['tgt'], pub_load['expr_form']) auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache):", "cumask = os.umask(191) with salt.utils.fopen(keyfile, 'w+') as fp_: fp_.write(key) os.umask(cumask)", "self.serial = salt.payload.Serial(opts) self.key = key # Create the event", "up methods for use only from the local system '''", "a minion, return the runner's function data ''' if 'peer_run'", "try: name = self.loadauth.load_name(load) if not ((name in self.opts['external_auth'][load['eauth']]) |", "backslashes are normalized normpath = normpath.replace('\\\\', '/') normpath = os.path.normpath(normpath)", "any(key not in load for key in ('id', 'grains')): return", "data ''' if not skip_verify: if 'id' not in load", "occurred.') return '' if load['eauth'] not in self.opts['external_auth']: # The", "not (stat.S_IWGRP & fmode.st_mode or stat.S_IWOTH & fmode.st_mode): return True", "else: mode = 'wb' with salt.utils.fopen(cpath, mode) as fp_: if", "the master ''' def __init__(self, opts): self.opts = opts self.event", "ret = {} if not salt.utils.verify.valid_id(self.opts, load['id']): return ret match_type", "it if 'load' in load: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load['load'])", "perm in perms: if re.match(perm, load['fun']): good = True if", "'full': ret.pop('__jid__') return ret def revoke_auth(self, load): ''' Allow a", "'') != 'full': ret.pop('__jid__') return ret def revoke_auth(self, load): '''", "'saltutil.find_job': log.warning( 'Authentication failure of type \"user\" ' 'occurred.' )", ") return '' if load['user'] not in self.opts['client_acl']: log.warning( 'Authentication", "if not (stat.S_IWGRP & fmode.st_mode or stat.S_IWOTH & fmode.st_mode): return", "in groups: return True else: if stat.S_IWOTH & fmode.st_mode: #", "opts, key): self.opts = opts self.serial = salt.payload.Serial(opts) self.key =", "fun in self.tops: if fun not in self.opts.get('master_tops', {}): continue", "layers: # only accept valid minion ids def init_git_pillar(opts): '''", "token = self.loadauth.get_tok(extra['token']) except Exception as exc: log.error( 'Exception occurred", "not in load: return False if 'events' not in load", "key, item in six.iteritems(load['return']): ret = {'jid': load['jid'], 'id': key,", "for f in filenames: stub_file = os.path.join(autosign_dir, f) mtime =", "not in load or 'data' not in load: return False", "pwd.getpwall(): users.append(user.pw_name) for user in acl_users: log.info( 'Preparing the {0}", "= self.opts['renderer'] mopts['failhard'] = self.opts['failhard'] mopts['state_top'] = self.opts['state_top'] mopts['nodegroups'] =", "# Verify that the caller has root on master elif", "os.remove(env_cache) except OSError as exc: log.critical( 'Unable to clear env", "def check_permissions(self, filename): ''' Check if the specified filename has", "specified keyid should automatically be signed. ''' if self.opts['auto_accept']: return", "event from the minion and fire it on the master", "If anything happens in the top generation, log it and", "'new'], 'wheel')) ret = self.wheel_.call_func(fun, **load) data['return'] = ret data['success']", "self.ckminions.auth_check( self.opts['external_auth'][extra['eauth']][name] if name in self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*'], load['fun'], load['tgt'],", "validation that make up the minion access to the master", "self._file_hash = fs_.file_hash self._file_list = fs_.file_list self._file_list_emptydirs = fs_.file_list_emptydirs self._dir_list", "data['success'] = False self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag,", "opts_dict['git'].strip().split() try: br = parts[0] loc = parts[1] except IndexError:", "''' Publish a command initiated from a minion, this method", "any(key not in load for key in ('jid', 'id')): return", "Make a client self.local = salt.client.get_local_client(mopts=self.opts) # Make an minion", "load.get('expr_form', 'glob') if match_type.lower() == 'pillar': match_type = 'pillar_exact' if", "auto key acceptance and rejection ''' def __init__(self, opts): self.opts", "False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cdir = os.path.join(self.opts['cachedir'],", "tag, 'data': data} except Exception as exc: log.error(exc) log.error('Exception occurred", "in self.opts: # If master side acl defined. if not", "self.loadauth.load_name(load) if not ((name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']])):", "''' Clean out the old jobs from the job cache", "if 'id' not in load or 'fun' not in load:", "of type \"eauth\" occurred.' ) return '' try: name =", "utf-8 -*- ''' This module contains all of the routines", "Retrieve the minions list minions = self.ckminions.check_minions( load['tgt'], load.get('tgt_type', 'glob')", "load['tgt'], 'ret': load['ret'], 'id': load['id'], } if 'tmo' in load:", "''' if 'id' not in load or 'fun' not in", "stat.S_IWGRP & fmode.st_mode: return False # check if writable by", "} # Retrieve the jid if not load['jid']: fstr =", "return True def _file_recv(self, load): ''' Allows minions to send", "will not be able to ' 'serve files to minions'", "- test.* This configuration will only allow the minion foo.example.com", "Make an Auth object self.loadauth = salt.auth.LoadAuth(opts) # Stand up", "# Verify that the load is valid if 'peer' not", "load['timeout']) log.warn(msg) return {} if 'tgt_type' in load: if load['tgt_type'].startswith('node'):", "False class RemoteFuncs(object): ''' Funcitons made available to minions, this", "'' if not ((token['name'] in self.opts['external_auth'][token['eauth']]) | ('*' in self.opts['external_auth'][token['eauth']])):", "type \"user\" occurred.' ) return '' else: if load['user'] in", "id') return {} if not salt.utils.verify.valid_id(self.opts, load['id']): return {} #", "self.opts.get('enforce_mine_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir):", "''' users = [] keys = {} acl_users = set(opts['client_acl'].keys())", "salt.utils.verify import salt.utils.minions import salt.utils.gzip_util import salt.utils.jid from salt.pillar import", "that the passed information authorized a minion to execute '''", "job {jid}'.format(**load)) self.event.fire_event(load, load['jid']) # old dup event self.event.fire_event(load, tagify([load['jid'],", "return { 'enc': 'clear', 'load': { 'jid': None, 'minions': minions", "os.path.normpath(normpath) cpath = os.path.join( self.opts['cachedir'], 'minions', load['id'], 'files', normpath) cdir", "for no minions if not minions: return { 'enc': 'clear',", "True elif self.opts.get('permissive_pki_access', False) \\ and fmode.st_gid in groups: return", "-1) except OSError: # The master is not being run", "pass keys[user] = key return keys def fileserver_update(fileserver): ''' Update", "for external nodes without an id') return {} if not", "return {} # Evaluate all configured master_tops interfaces opts =", "in load: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load['load']) # Format individual", "skip_verify=False): ''' Request the return data from a specific jid,", "pillargitfs def clean_fsbackend(opts): ''' Clean out the old fileserver backends", "Funcitons made available to minions, this class includes the raw", "ValueError: msg = 'Failed to parse timeout value: {0}'.format( load['tmo'])", "type \"eauth\" occurred.' ) return '' if not self.loadauth.time_auth(extra): log.warning(", "except ImportError: return pillargitfs parts = opts_dict['git'].strip().split() try: br =", "return False return True def _master_opts(self, load): ''' Return the", "load['fun']) for perm in perms): return {} ret = {}", "load for key in ('id', 'tgt', 'fun')): return {} if", "type \"token\" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.runner_check(", "type \"eauth\" occurred.') return '' if not self.loadauth.time_auth(load): log.warning('Authentication failure", "token: log.warning('Authentication failure of type \"token\" occurred. \\ Token could", "load['user'] == 'root': if load.pop('key') != self.key.get(self.opts.get('user', 'root')): log.warning( 'Authentication", "self.ckminions = salt.utils.minions.CkMinions(opts) # Make an Auth object self.loadauth =", "external job cache ' '\"{0}\" does not have a save_load", "says it is!'.format( load['id'] ) ) return {} # Prepare", "and 'id' not in load: return False if self.opts.get('minion_data_cache', False)", "to contain the eauth key and the needed authentication creds.", "def __init__(self, opts): self.opts = opts def check_permissions(self, filename): '''", "failure of type \"eauth\" occurred.') return '' try: name =", "as root if load.get('key', 'invalid') == self.key.get('root'): load.pop('key') elif load.pop('key')", "''' Checks if the specified keyid should automatically be rejected.", "fp_: fp_.write(load['id']) return ret def minion_publish(self, load): ''' Publish a", "not in load for key in ('fun', 'arg', 'tgt', 'ret',", "load or 'fun' not in load: return False if self.opts.get('minion_data_cache',", "type \"user\" ' 'occurred.' ) return '' else: log.warning( 'Authentication", "a load, save it if 'load' in load: fstr =", "should automatically be signed. ''' if self.opts['auto_accept']: return True if", "fnmatch import logging import os import re import time import", "import salt.utils.minions import salt.utils.gzip_util import salt.utils.jid from salt.pillar import git_pillar", "load.get('form', '') != 'full': ret.pop('__jid__') return ret def revoke_auth(self, load):", "stat import tempfile # Import salt libs import salt.crypt import", "tag = tagify(jid, prefix='wheel') data = {'fun': \"wheel.{0}\".format(fun), 'jid': jid,", "the return data from a specific jid, only allowed if", "top generation, log it and move on log.error( 'Top function", "{} else: pub_load['expr_form'] = load['tgt_type'] ret = {} ret['jid'] =", "type \"user\" occurred.' ) return '' elif load['user'] == salt.utils.get_user():", "load and load['loc'] < 0: log.error('Invalid file pointer: load[loc] <", "paths normpath = load['path'] if ':' in normpath: # make", "= set() for match in self.opts['mine_get']: if re.match(match, load['id']): if", "None: if 'data' in event: self.event.fire_event(event['data'], tagify(event['tag'], base=load['pretag'])) else: self.event.fire_event(event,", "{ 'fun': load['fun'], 'arg': load['arg'], 'expr_form': load.get('tgt_type', 'glob'), 'tgt': load['tgt'],", "= '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False)) self.event.fire_event({'minions': minions}, load['jid']) new_job_load", "def check_autoreject(self, keyid): ''' Checks if the specified keyid should", "!= self.key.get(self.opts.get('user', 'root')): log.warning( 'Authentication failure of type \"user\" occurred.'", "that the minion publication will only work if it is", "fp_.write(key) os.umask(cumask) # 600 octal: Read and write access to", "< min_time: log.warn('Autosign keyid expired {0}'.format(stub_file)) os.remove(stub_file) stub_file = os.path.join(autosign_dir,", "mode) as fp_: if load['loc']: fp_.seek(load['loc']) fp_.write(load['data']) return True def", "pub_load['timeout'] = int(load['tmo']) except ValueError: msg = 'Failed to parse", "{ 'enc': 'clear', 'load': { 'jid': None, 'minions': minions }", "you want to do another # way that won't have", "< 0: log.error('Invalid file pointer: load[loc] < 0') return False", "{0}'.format(stub_file)) os.remove(stub_file) stub_file = os.path.join(autosign_dir, keyid) if not os.path.exists(stub_file): return", "def publish(self, load): ''' This method sends out publications to", "generating auth token: {0}'.format( exc) log.error(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if", "_mine_flush(self, load, skip_verify=False): ''' Allow the minion to delete all", "will enable all minions to execute all commands. peer: foo.example.com:", "= self.loadauth.load_name(extra) if not ((name in self.opts['external_auth'][extra['eauth']]) | ('*' in", "except KeyError: log.error('ACL user {0} is not available'.format(user)) continue keyfile", "= self.mminion.returners[fstr](nocache=extra.get('nocache', False)) self.event.fire_event({'minions': minions}, load['jid']) new_job_load = { 'jid':", "{1}: {2}'.format( fun, exc.__class__.__name__, exc, ) self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))", "auth token: {0}'.format( exc) log.error(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if not", "old dup event self.event.fire_event(load, tagify([load['jid'], 'ret', load['id']], 'job')) self.event.fire_ret_load(load) if", "to it! log.warn( 'Minion id {0} is not who it", "''' from __future__ import absolute_import # Import python libs import", "'' def get_token(self, load): ''' Return the name associated with", "= pwd.getpwnam(user).pw_name except KeyError: log.error('ACL user {0} is not available'.format(user))", "If the return data is invalid, just ignore it if", "extract external pillar data: {0}' .format(opts_dict['git']) ) else: pillargitfs.append( git_pillar.GitPillar(", "'No fileservers loaded, the master will not be able to", ") return pillargitfs def clean_fsbackend(opts): ''' Clean out the old", "for fun in self.tops: if fun not in self.opts.get('master_tops', {}):", "acl_users: log.info( 'Preparing the {0} key for local communication'.format( user", "= ('Authentication failure of type \"token\" occurred for ' 'user", "six.iteritems(load['return']): ret = {'jid': load['jid'], 'id': key, 'return': item} if", "as exc: log.error( 'Exception occurred in the wheel system: {0}'.format(exc)", "check_autosign_dir(self, keyid): ''' Check a keyid for membership in a", "keyfile = os.path.join( opts['cachedir'], '.{0}_key'.format(user) ) if os.path.exists(keyfile): log.debug('Removing stale", "the passed information authorized a minion to execute ''' #", "salt.fileserver import salt.utils.atomicfile import salt.utils.event import salt.utils.verify import salt.utils.minions import", "function {0} failed with error {1} for minion ' '{2}'.format(", "user {0}. The user is not ' 'available.\\n'.format( user )", "def _file_recv(self, load): ''' Allows minions to send files to", "log.error('Invalid file pointer: load[loc] < 0') return False if len(load['data'])", "'occurred.' ) return '' else: log.warning( 'Authentication failure of type", "If the master job cache has a clean_old_jobs, call it", "a single function if isinstance(load['fun'], str): funs_to_check = [load['fun']] #", "in ('jid', 'id')): return {} else: auth_cache = os.path.join( self.opts['cachedir'],", "restrictions so that the minion publication will only work if", "return ret match_type = load.get('expr_form', 'glob') if match_type.lower() == 'pillar':", "the master options to the minion ''' mopts = {}", "load['user'], 'fun': load['fun'], 'arg': load['arg'], 'minions': minions, } # Announce", "write out to the master job cache try: fstr =", "self.loadauth.time_auth(load): msg = ('Authentication failure of type \"eauth\" occurred for", "minions } } # Retrieve the jid if not load['jid']:", "not in load for key in ('id', 'grains')): return False", "if the user is blacklisted for user_re in self.opts['client_acl_blacklist'].get('users', []):", "coding: utf-8 -*- ''' This module contains all of the", "type \"token\" occurred. \\ Token could not be retrieved.') return", "load['id'] == fp_.read(): return {} return self.local.get_cache_returns(load['jid']) def minion_pub(self, load):", "':' in normpath: # make sure double backslashes are normalized", "= False log = logging.getLogger(__name__) # Things to do in", "= load['to'] if 'kwargs' in load: if 'ret_config' in load['kwargs']:", "{}), load.get('username', 'UNKNOWN')) except Exception as exc: log.error('Exception occurred while", "self.opts['file_recv'] or os.path.isabs(load['path']): return False if os.path.isabs(load['path']) or '../' in", "('id', 'grains')): return False pillar = salt.pillar.Pillar( self.opts, load['grains'], load['id'],", "self.opts['conf_file']} opts.update(self.opts) runner = salt.runner.Runner(opts) return runner.run() def pub_ret(self, load,", "authentication creds. ''' if 'eauth' not in load: log.warning('Authentication failure", "self.opts: return False if not isinstance(self.opts['peer'], dict): return False if", "creds. ''' if 'eauth' not in load: log.warning('Authentication failure of", "salt.auth import salt.wheel import salt.minion import salt.search import salt.key import", "\"other\" occurred.' ) return '' # Retrieve the minions list", "not who it says it is! # We don't want", "try: br = parts[0] loc = parts[1] except IndexError: log.critical(", "False with salt.utils.fopen(signing_file, 'r') as fp_: for line in fp_:", "Save the invocation information if self.opts['ext_job_cache']: try: fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])", "Publish a command initiated from a minion, this method executes", "master Minion to access returner data self.mminion = salt.minion.MasterMinion( self.opts,", "# Import 3rd-party libs import salt.ext.six as six try: import", "= pwd.getpwnam(user) uid = pwnam[2] gid = pwnam[3] groups =", "message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else", "os.remove(datap) except OSError: return False return True def _file_recv(self, load):", "be signed. ''' if self.opts['auto_accept']: return True if self.check_signing_file(keyid, self.opts.get('autosign_file',", "The eauth system is not enabled, fail log.warning( 'Authentication failure", "function from its own mine ''' if 'id' not in", "else: self.event.fire_event(event, tagify(event['tag'], base=load['pretag'])) else: tag = load['tag'] self.event.fire_event(load, tag)", "return self.check_signing_file( keyid, self.opts.get('autoreject_file', None) ) def check_autosign(self, keyid): '''", "server update'.format(exc), exc_info_on_loglevel=logging.DEBUG ) class AutoKey(object): ''' Implement the methods", "tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun, **load) data['return'] = ret", "publication will only work if it is enabled in the", "type \"eauth\" occurred.') return '' try: name = self.loadauth.load_name(load) if", ") return '' load['user'] = name # Verify that the", "self.opts['state_aggregate'] mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks'] return mopts def", "git_pillar.GitPillar( br, loc, opts ) ) return pillargitfs def clean_fsbackend(opts):", "with jid {jid}'.format( **load ) ) log.debug('Published command details {0}'.format(pub_load))", "tagify(event['tag'], base=load['pretag'])) else: tag = load['tag'] self.event.fire_event(load, tag) return True", "'jid': None, 'minions': minions } } # Retrieve the jid", "os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'mine.p') if not load.get('clear', False):", "in self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound' else: return", "load: return False return self.loadauth.get_tok(load['token']) def publish(self, load): ''' This", "load: return False if 'events' not in load and ('tag'", "return '' elif load['user'] == 'root': if load.pop('key') != self.key.get(self.opts.get('user',", "a runner from a minion, return the runner's function data", "filenames: stub_file = os.path.join(autosign_dir, f) mtime = os.path.getmtime(stub_file) if mtime", "fp_: mine_data = self.serial.load(fp_) if isinstance(mine_data, dict): if mine_data.pop(load['fun'], False):", "master job cache ' '\"{0}\" does not have a save_load", "else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not good: msg = ('Authentication failure", ") return '' else: if load.pop('key') != self.key[salt.utils.get_user()]: log.warning( 'Authentication", "isinstance(self.opts['peer_run'][match], list): perms.update(self.opts['peer_run'][match]) good = False for perm in perms:", "log.info( 'Published command {fun} with jid {jid}'.format( **load ) )", "associated with a token or False if the token is", "\"eauth\" occurred.') return '' return self.loadauth.mk_token(load) except Exception as exc:", "the tops dict for loading external top data self.tops =", "extra: if extra['eauth'] not in self.opts['external_auth']: # The eauth system", "try: fstr = '{0}.save_load'.format(self.opts['ext_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The", "if not skip_verify: if 'id' not in load or 'data'", "''' extra = load.get('kwargs', {}) # check blacklist/whitelist good =", "fmode.st_gid != gid: return True elif self.opts.get('permissive_pki_access', False) \\ and", "fs_.symlink_list self._file_envs = fs_.envs def __verify_minion_publish(self, load): ''' Verify that", "isinstance(new, dict): new.update(load['data']) load['data'] = new with salt.utils.fopen(datap, 'w+b') as", "funcs/modules! if isinstance(self.opts['peer_run'][match], list): perms.update(self.opts['peer_run'][match]) good = False for perm", "post validation that make up the minion access to the", "if self.opts.get('minion_data_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not", "os.unlink(keyfile) key = salt.crypt.Crypticle.generate_key_string() cumask = os.umask(191) with salt.utils.fopen(keyfile, 'w+')", "False file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size'] if 'loc' in load", "return {} # Prepare the runner object opts = {'fun':", "up the master Minion to access returner data self.mminion =", "be written to again. Windows enforces this. os.chmod(keyfile, 0o600) if", "Check a keyid for membership in a autosign directory. '''", "not os.path.isdir(cdir): try: os.makedirs(cdir) except os.error: pass if os.path.isfile(cpath) and", "cache has a clean_old_jobs, call it fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache']) if", "pillargitfs parts = opts_dict['git'].strip().split() try: br = parts[0] loc =", "return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: name = self.loadauth.load_name(load) if not (name", "occurred in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, )", "to act as root if load.get('key', 'invalid') == self.key.get('root'): load.pop('key')", "type \"token\" occurred.' ) return '' load['user'] = token['name'] log.debug('Minion", ") try: file_lists_caches = os.listdir(file_lists_dir) except OSError: continue for file_lists_cache", "# Make a wheel object self.wheel_ = salt.wheel.Wheel(opts) def runner(self,", "def wheel(self, load): ''' Send a master control function back", "os.path.isfile(datap): try: os.remove(datap) except OSError: return False return True def", "return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def wheel(self, load): ''' Send a", "Make a wheel object self.wheel_ = salt.wheel.Wheel(opts) def runner(self, load):", "a signing file ''' if not signing_file or not os.path.exists(signing_file):", "the runner system ''' if 'token' in load: try: token", "import git except ImportError: return pillargitfs parts = opts_dict['git'].strip().split() try:", "load) log.info('Got return from {id} for job {jid}'.format(**load)) self.event.fire_event(load, load['jid'])", "'r') as fp_: if not load['id'] == fp_.read(): return {}", "if re.match(user_re, load['user']): good = False break # check if", "is not ' 'available.\\n'.format( user ) ) return False fmode", "fp_: line = line.strip() if line.startswith('#'): continue else: if salt.utils.expr_match(keyid,", "try: fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async(fun, load.get('kwarg',", "\"user\" occurred.' ) return '' elif load['user'] == salt.utils.get_user(): if", "A key needs to be placed in the filesystem with", "not token: log.warning('Authentication failure of type \"token\" occurred. \\ Token", "key in ('fun', 'arg', 'tgt', 'ret', 'id')): return False #", "to run as root. ''' users = [] keys =", "for key in ('id', 'tgt', 'fun')): return {} if 'mine_get'", "not in load or 'data' not in load): return False", "endtime) fstr = '{0}.returner'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load) def _syndic_return(self, load): ''' Receive", "nodes without an id') return {} if not salt.utils.verify.valid_id(self.opts, load['id']):", "command initiated from a minion, this method executes minion restrictions", "enabled, fail log.warning( 'Authentication failure of type \"eauth\" occurred.' )", "''' Handle the return data sent from the minions '''", "''' Return the mine data ''' if not skip_verify: if", "'git' in opts_dict: try: import git except ImportError: return pillargitfs", "if mine_data.pop(load['fun'], False): with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(mine_data)) except", "to the wheel system ''' # All wheel ops pass", "load for key in ('id', 'path', 'loc')): return False if", "-*- coding: utf-8 -*- ''' This module contains all of", "load['kwargs']: pub_load['metadata'] = load['kwargs'].get('metadata') if 'user' in load: log.info( 'User", "'w+b') as fp_: fp_.write(self.serial.dumps(mine_data)) except OSError: return False return True", "in self.tops: if fun not in self.opts.get('master_tops', {}): continue try:", "master minion to access the external job cache self.mminion =", "be executed in # the clear: # publish (The publish", "the minion to delete a specific function from its own", "os.path.join(cdir, 'data.p') tmpfh, tmpfname = tempfile.mkstemp(dir=cdir) os.close(tmpfh) with salt.utils.fopen(tmpfname, 'w+b')", "is not available'.format(user)) continue keyfile = os.path.join( opts['cachedir'], '.{0}_key'.format(user) )", "if (self.opts.get('job_cache_store_endtime') and fstr in self.mminion.returners): self.mminion.returners[fstr](load['jid'], endtime) fstr =", "# The minion is not who it says it is!", "# don't allow others to write to the file return", "of type \"eauth\" occurred.' ) return '' except Exception as", "''' mopts = {} file_roots = {} envs = self._file_envs()", "set(opts['client_acl'].keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.get_user()) if HAS_PWD: for user in", "= os.path.join( self.opts['cachedir'], 'minions', minion, 'mine.p') try: with salt.utils.fopen(mine, 'rb')", "os.path.join(file_lists_dir, file_lists_cache) try: os.remove(cache_file) except OSError as exc: log.critical( 'Unable", "from an external node classifier if one is specified '''", "import time import stat import tempfile # Import salt libs", "file_lists_caches = os.listdir(file_lists_dir) except OSError: continue for file_lists_cache in fnmatch.filter(file_lists_caches,", "match_type, greedy=False ) for minion in minions: mine = os.path.join(", "'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][load['eauth']][name] if", "key in ('id', 'path', 'loc')): return False if not self.opts['file_recv']", "time.time() > opts['keep_jobs']: os.remove(auth_file_path) except (IOError, OSError): log.error('Unable to delete", "mopts['failhard'] = self.opts['failhard'] mopts['state_top'] = self.opts['state_top'] mopts['nodegroups'] = self.opts['nodegroups'] mopts['state_auto_order']", "'grains' in load['opts']: grains = load['opts']['grains'] for fun in self.tops:", "skip_verify and any(key not in load for key in ('jid',", "class LocalFuncs(object): ''' Set up methods for use only from", "log.debug('Removing stale keyfile: {0}'.format(keyfile)) os.unlink(keyfile) key = salt.crypt.Crypticle.generate_key_string() cumask =", "self.opts['failhard'] mopts['state_top'] = self.opts['state_top'] mopts['nodegroups'] = self.opts['nodegroups'] mopts['state_auto_order'] = self.opts['state_auto_order']", "from salt.utils.event import tagify from salt.exceptions import SaltMasterError # Import", "{0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def wheel(self, load): '''", "load): ''' Return the name associated with a token or", "import salt.minion import salt.search import salt.key import salt.fileserver import salt.utils.atomicfile", "pub_ret(self, load, skip_verify=False): ''' Request the return data from a", "good is False: log.error( '{user} does not have permissions to", "your local administrator if you believe this is in '", "log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) jid = salt.utils.jid.gen_jid() fun = load.pop('fun')", "failure of type \"token\" occurred.' ) return '' load['user'] =", "for match in self.opts['mine_get']: if re.match(match, load['id']): if isinstance(self.opts['mine_get'][match], list):", "not in self.opts['external_auth']: # The eauth system is not enabled,", "os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, str(ret['jid'])) with salt.utils.fopen(jid_fn, 'w+') as", "load is serious!! Changes here # break compatibility with minion/master", "= int(load['tmo']) except ValueError: msg = 'Failed to parse timeout", "listen=False) self.serial = salt.payload.Serial(opts) self.ckminions = salt.utils.minions.CkMinions(opts) # Create the", "= self.opts['state_events'] mopts['state_aggregate'] = self.opts['state_aggregate'] mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks'] =", "If someone can sudo, allow them to act as root", "file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size'] if 'loc' in load and", "self.opts['external_auth'][load['eauth']]['*'], load['fun']) if not good: msg = ('Authentication failure of", "True self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data}", "its own key ''' if 'id' not in load: return", "the load, since we don't have it saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache'])", "occurred while authenticating: {0}'.format(exc) ) return '' def get_token(self, load):", "self.ckminions.runner_check( self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][load['eauth']]['*'], load['fun']) if", "routines post validation that make up the minion access to", "so that the minion publication will only work if it", "load['tgt_type'] ret = {} ret['jid'] = self.local.cmd_async(**pub_load) ret['minions'] = self.ckminions.check_minions(", "cache'.format(backend)) try: os.remove(env_cache) except OSError as exc: log.critical( 'Unable to", "type \"eauth\" occurred.' ) return '' load['user'] = name #", "os.path.isfile(auth_file_path): continue if os.path.getmtime(auth_file_path) - time.time() > opts['keep_jobs']: os.remove(auth_file_path) except", "as exc: log.critical( 'Unable to clear env cache file {0}:", "clean_old_jobs, call it fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache']) if fstr in mminion.returners:", "self.opts = opts def check_permissions(self, filename): ''' Check if the", "check with <NAME> before you even think about # touching", "'' good = self.ckminions.auth_check( self.opts['external_auth'][extra['eauth']][name] if name in self.opts['external_auth'][extra['eauth']] else", "self._symlink_list = fs_.symlink_list self._file_envs = fs_.envs def __verify_minion_publish(self, load): '''", "load['fun'], 'arg': load['arg'], 'minions': minions, } # Announce the job", "__init__(self, opts, key): self.opts = opts self.serial = salt.payload.Serial(opts) self.key", "good = self.ckminions.auth_check( self.opts['external_auth'][extra['eauth']][name] if name in self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*'],", "'Exception {0} occurred in file server update'.format(exc), exc_info_on_loglevel=logging.DEBUG ) class", "return {'tag': tag, 'data': data} if 'eauth' not in load:", "toes del good # Check for external auth calls if", "self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not good:", "self.opts['cachedir'], 'minions', load['id'], 'files', normpath) cdir = os.path.dirname(cpath) if not", "datap = os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try: os.remove(datap) except OSError:", "it! log.warn( 'Minion id {0} is not who it says", "''' if 'id' not in load: return False keyapi =", "publish(self, load): ''' This method sends out publications to the", "'id' not in load or 'data' not in load: return", "Send a master control function back to the wheel system", "self.mminion.returners[prep_fstr](nocache=load.get('nocache', False)) # save the load, since we don't have", ") ) return False fmode = os.stat(filename) if os.getuid() ==", "'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: name = self.loadauth.load_name(load) if", "authenticating: {0}'.format(exc) ) return '' good = self.ckminions.auth_check( self.opts['external_auth'][extra['eauth']][name] if", "if 'id' not in load: return False keyapi = salt.key.Key(self.opts)", "exc: # If anything happens in the top generation, log", "'arg': load['arg'], 'expr_form': load.get('tgt_type', 'glob'), 'tgt': load['tgt'], 'ret': load['ret'], 'id':", "them to act as root if load.get('key', 'invalid') == self.key.get('root'):", "self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']]): msg = ('Authentication failure of", "load.get('kwarg', {}), load.get('username', 'UNKNOWN')) except Exception as exc: log.error('Exception occurred", "= opts_dict['git'].strip().split() try: br = parts[0] loc = parts[1] except", "to access returner data self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False)", "fp_.write(self.serial.dumps(load['data'])) return True def _mine_delete(self, load): ''' Allow the minion", "starts ''' pillargitfs = [] for opts_dict in [x for", "self.opts['external_auth'][token['eauth']])): log.warning('Authentication failure of type \"token\" occurred. \\ Token does", "Allows minions to send files to the master, files are", "data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format( fun,", "serializer = salt.payload.Serial(opts) for (dirpath, dirnames, filenames) in os.walk(opts['token_dir']): for", "for external auth calls if extra.get('token', False): # A token", "else: pub_load['expr_form'] = load['tgt_type'] pub_load['raw'] = True ret = {}", "log.error(exc) log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) data['return']", "self.serial = salt.payload.Serial(opts) self.ckminions = salt.utils.minions.CkMinions(opts) # Create the tops", "load.get('saltenv', load.get('env')), load.get('ext'), self.mminion.functions, pillar=load.get('pillar_override', {})) pillar_dirs = {} data", "master starts ''' pillargitfs = [] for opts_dict in [x", "external node classifier if one is specified ''' if not", "except OSError: # The master is not being run as", "'data' not in load: return False if self.opts.get('minion_data_cache', False) or", "filenames) in os.walk(opts['token_dir']): for token in filenames: token_path = os.path.join(dirpath,", "os.path.isdir(cdir): return False datap = os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try:", "trace:\\n', exc_info=True ) # always write out to the master", "self.opts['client_acl'][load['user']], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not good: # Accept", "in load: if load['tgt_type'].startswith('node'): if load['tgt'] in self.opts['nodegroups']: pub_load['tgt'] =", "'w+b') as fp_: fp_.write(self.serial.dumps(load['data'])) return True def _mine_delete(self, load): '''", "data} except Exception as exc: log.error( 'Exception occurred in the", "occurred.' ) return '' else: if load['user'] in self.key: #", "cache_file = os.path.join(file_lists_dir, file_lists_cache) try: os.remove(cache_file) except OSError as exc:", "or False if the token is invalid ''' if 'token'", "'wheel')) return {'tag': tag, 'data': data} except Exception as exc:", "again. Windows enforces this. os.chmod(keyfile, 0o600) if HAS_PWD: try: os.chown(keyfile,", "not in load and ('tag' not in load or 'data'", "**load ) ) log.debug('Published command details {0}'.format(pub_load)) return {'ret': {", "is not being run as root and can therefore not", "'id' in extra: pub_load['id'] = extra['id'] if 'tgt_type' in load:", "return the runner's function data ''' if 'peer_run' not in", "minion_runner(self, load): ''' Execute a runner from a minion, return", "exc) ) file_lists_dir = os.path.join( opts['cachedir'], 'file_lists', '{0}fs'.format(backend) ) try:", "if the specified filename has correct permissions ''' if salt.utils.is_windows():", "and any(key not in load for key in ('jid', 'id')):", "minion in self.local.cmd_iter(**pub_load): if load.get('form', '') == 'full': data =", "tag, 'data': data} except Exception as exc: log.error( 'Exception occurred", "placed in the filesystem with permissions 0400 so clients are", "in load['fun']: # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']] load['fun'] =", "''' if any(key not in load for key in ('id',", "return '' return self.loadauth.mk_token(load) except Exception as exc: log.error( 'Exception", "return '' else: log.warning( 'Authentication failure of type \"user\" occurred.'", "import salt.crypt import salt.utils import salt.client import salt.payload import salt.pillar", "if ':' in normpath: # make sure double backslashes are", ") def check_autosign(self, keyid): ''' Checks if the specified keyid", "self.opts['cachedir'], 'minions', minion, 'mine.p') try: with salt.utils.fopen(mine, 'rb') as fp_:", "if load.get('pretag') is not None: if 'data' in event: self.event.fire_event(event['data'],", "ret = {} for minion in self.local.cmd_iter(**pub_load): if load.get('form', '')", "if HAS_PWD: if user not in users: try: user =", "= self.ckminions.wheel_check( self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun'])", "tag, 'user': load.get('username', 'UNKNOWN')} try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret", "won't have a negative impact. pub_load = { 'fun': load['fun'],", "trace:\\n', exc_info=True ) # Altering the contents of the publish", "syndic), don't short circuit if no minions # are found", "'' try: name = self.loadauth.load_name(load) if not ((name in self.opts['external_auth'][load['eauth']])", "= self.opts['failhard'] mopts['state_top'] = self.opts['state_top'] mopts['nodegroups'] = self.opts['nodegroups'] mopts['state_auto_order'] =", "type \"user\" occurred.' ) return '' elif load['user'] == 'root':", "return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) if 'eauth' not in load: msg", "good = False break if good is False: log.error( '{user}", "in self.opts['client_acl_blacklist'].get('modules', []): # if this is a regular command,", "self.mminion.returners[fstr](nocache=extra.get('nocache', False)) self.event.fire_event({'minions': minions}, load['jid']) new_job_load = { 'jid': load['jid'],", "load['arg'], 'minions': minions, } # Announce the job on the", "perms = set() for match in self.opts['peer_run']: if re.match(match, load['id']):", "load['tgt'], load.get('tgt_type', 'glob')) if not good: # Accept find_job so", "the local system ''' # The ClearFuncs object encapsulates the", "'glob'), 'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id'], } if 'tmo'", "Windows, os.rename will fail if the destination file exists. salt.utils.atomicfile.atomic_rename(tmpfname,", "if os.path.exists(keyfile): log.debug('Removing stale keyfile: {0}'.format(keyfile)) os.unlink(keyfile) key = salt.crypt.Crypticle.generate_key_string()", "it on the master event interface ''' if 'id' not", "ret data['success'] = True self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag':", "# Set up the publication payload pub_load = { 'fun':", "isinstance(mine_data, dict): if mine_data.pop(load['fun'], False): with salt.utils.fopen(datap, 'w+b') as fp_:", "opts): self.opts = opts self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'],", "the clear: # publish (The publish from the LocalClient) #", "self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, str(ret['jid']))", "# In short, check with <NAME> before you even think", "keyid, self.opts.get('autoreject_file', None) ) def check_autosign(self, keyid): ''' Checks if", "config. The configuration on the master allows minions to be", "better way to not require creating the masterminion every time?", "= load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async(fun, load.get('kwarg', {}), load.get('username',", "if 'peer_run' not in self.opts: return {} if not isinstance(self.opts['peer_run'],", "if not self.loadauth.time_auth(load): log.warning('Authentication failure of type \"eauth\" occurred.') return", "if not skip_verify and any(key not in load for key", "this minion perms = [] for match in self.opts['peer']: if", "self.serial.dumps( {'grains': load['grains'], 'pillar': data}) ) # On Windows, os.rename", "{0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: name = self.loadauth.load_name(load)", "old jobs from the job cache ''' # TODO: better", ") return '' good = self.ckminions.auth_check( self.opts['external_auth'][extra['eauth']][name] if name in", "if not salt.utils.verify.valid_id(self.opts, load['id']): return {} # Evaluate all configured", "if any(key not in load for key in ('fun', 'arg',", "Check for no minions if not minions: return { 'enc':", "if it is enabled in the config. The configuration on", "'Failed to parse timeout value: {0}'.format( load['tmo']) log.warn(msg) return {}", "if fmode.st_uid == uid or fmode.st_gid != gid: return True", "= {} if not salt.utils.verify.valid_id(self.opts, load['id']): return ret match_type =", "os.makedirs(cdir) except os.error: pass if os.path.isfile(cpath) and load['loc'] != 0:", "any(key not in load for key in ('id', 'tgt', 'fun')):", "command {fun} with jid {jid}'.format( **load ) ) log.debug('Published command", "python libs import fnmatch import logging import os import re", "perms: if re.match(perm, load['fun']): good = True if not good:", "load): ''' Allow the minion to delete a specific function", "self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not", "not in file_roots: file_roots[saltenv] = [] mopts['file_roots'] = file_roots if", "{0}'.format( file_recv_max_size ) ) return False # Normalize Windows paths", "message=str(exc))) def wheel(self, load): ''' Send a master control function", "individual minions. ''' # Verify the load if any(key not", "self.opts['external_auth']: log.warning('Authentication failure of type \"token\" occurred. \\ Authentication type", "as token_file: token_data = serializer.loads(token_file.read()) if 'expire' not in token_data", "if 'opts' in load: opts = load['opts'] if 'grains' in", ".* This configuration will enable all minions to execute all", "not in load for key in ('id', 'tgt', 'fun')): return", "when the master starts ''' pillargitfs = [] for opts_dict", "if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.get_user()) if HAS_PWD: for user in pwd.getpwall():", "'Exceeding file_recv_max_size limit: {0}'.format( file_recv_max_size ) ) return False #", "'tgt', 'ret', 'id')): return False # If the command will", "= salt.fileserver.Fileserver(self.opts) self._serve_file = fs_.serve_file self._file_hash = fs_.file_hash self._file_list =", "data sent from the minions ''' # Generate EndTime endtime", "it fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache']) if fstr in mminion.returners: mminion.returners[fstr]() def", "'/') normpath = os.path.normpath(normpath) cpath = os.path.join( self.opts['cachedir'], 'minions', load['id'],", "= key # Create the event manager self.event = salt.utils.event.get_event(", "self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False) # Make a wheel", "'' load['user'] = name # Verify that the caller has", "= {} file_roots = {} envs = self._file_envs() for saltenv", "self._file_list_emptydirs = fs_.file_list_emptydirs self._dir_list = fs_.dir_list self._symlink_list = fs_.symlink_list self._file_envs", "= load['tgt_type'] pub_load['raw'] = True ret = {} for minion", "occurred.' ) return '' # Retrieve the minions list minions", "file_lists cache file {0}: {1}' .format(cache_file, exc) ) def clean_expired_tokens(opts):", "in the wheel system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc)))", "if mtime < min_time: log.warn('Autosign keyid expired {0}'.format(stub_file)) os.remove(stub_file) stub_file", "else: auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache)", "''' Set up methods for use only from the local", "return '' elif load['user'] == salt.utils.get_user(): if load.pop('key') != self.key.get(load['user']):", "it needs to be written to again. Windows enforces this.", "os.rename will fail if the destination file exists. salt.utils.atomicfile.atomic_rename(tmpfname, datap)", "minions if not minions: return { 'enc': 'clear', 'load': {", "{1}: {2}'.format( fun, exc.__class__.__name__, exc, ) data['success'] = False self.event.fire_event(data,", "Exception: continue return ret def _mine(self, load, skip_verify=False): ''' Return", "the event bus self.event.fire_event(new_job_load, 'new_job') # old dup event self.event.fire_event(new_job_load,", "if the cmd is blacklisted for module_re in self.opts['client_acl_blacklist'].get('modules', []):", "{2}'.format( fun, exc.__class__.__name__, exc, ) data['success'] = False self.event.fire_event(data, tagify([jid,", "mine = os.path.join( self.opts['cachedir'], 'minions', minion, 'mine.p') try: with salt.utils.fopen(mine,", "to execute all commands. peer: foo.example.com: - test.* This configuration", "_master_opts(self, load): ''' Return the master options to the minion", "def __setup_fileserver(self): ''' Set the local file objects from the", "load['arg'] = arg_ good = self.ckminions.auth_check( perms, load['fun'], load['tgt'], load.get('tgt_type'," ]
[ "from __future__ import unicode_literals # pylint: disable=import-only-modules from core.domain import", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "Exception, 'Role TEST_ROLE does not exist.'): role_services.get_all_actions('TEST_ROLE') self.assertEqual( role_services.get_all_actions(feconf.ROLE_ID_GUEST), [role_services.ACTION_PLAY_ANY_PUBLIC_ACTIVITY])", "utf-8 # # Copyright 2017 The Oppia Authors. All Rights", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "2.0 (the \"License\"); # you may not use this file", "file except in compliance with the License. # You may", "agreed to in writing, software # distributed under the License", "Unless required by applicable law or agreed to in writing,", "relating to roles and actions.\"\"\" from __future__ import absolute_import #", "roles and actions.\"\"\" from __future__ import absolute_import # pylint: disable=import-only-modules", "the License is distributed on an \"AS-IS\" BASIS, # WITHOUT", "pylint: disable=import-only-modules from core.domain import role_services from core.tests import test_utils", "= role_services.get_role_actions() self.assertTrue(isinstance(role_actions, dict)) for role_name, allotted_actions in role_actions.items(): self.assertTrue(isinstance(role_name,", "\"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "import python_utils class RolesAndActionsServicesUnitTests(test_utils.GenericTestBase): \"\"\"Tests for roles and actions.\"\"\" def", "The Oppia Authors. All Rights Reserved. # # Licensed under", "allotted_actions: self.assertTrue( isinstance(action_name, python_utils.UNICODE)) def test_get_all_actions(self): with self.assertRaisesRegexp( Exception, 'Role", "role_services.get_role_actions() self.assertTrue(isinstance(role_actions, dict)) for role_name, allotted_actions in role_actions.items(): self.assertTrue(isinstance(role_name, python_utils.UNICODE))", "the specific language governing permissions and # limitations under the", "absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint:", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "from core.tests import test_utils import feconf import python_utils class RolesAndActionsServicesUnitTests(test_utils.GenericTestBase):", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "def test_get_role_actions_return_value_in_correct_schema(self): role_actions = role_services.get_role_actions() self.assertTrue(isinstance(role_actions, dict)) for role_name, allotted_actions", "\"\"\"Tests for roles and actions.\"\"\" def test_get_role_actions_return_value_in_correct_schema(self): role_actions = role_services.get_role_actions()", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "def test_get_all_actions(self): with self.assertRaisesRegexp( Exception, 'Role TEST_ROLE does not exist.'):", "allotted_actions in role_actions.items(): self.assertTrue(isinstance(role_name, python_utils.UNICODE)) self.assertTrue(isinstance(allotted_actions, list)) self.assertEqual(len(set(allotted_actions)), len(allotted_actions)) for", "writing, software # distributed under the License is distributed on", "in writing, software # distributed under the License is distributed", "python_utils.UNICODE)) def test_get_all_actions(self): with self.assertRaisesRegexp( Exception, 'Role TEST_ROLE does not", "you may not use this file except in compliance with", "__future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "# pylint: disable=import-only-modules from core.domain import role_services from core.tests import", "role_actions.items(): self.assertTrue(isinstance(role_name, python_utils.UNICODE)) self.assertTrue(isinstance(allotted_actions, list)) self.assertEqual(len(set(allotted_actions)), len(allotted_actions)) for action_name in", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "in role_actions.items(): self.assertTrue(isinstance(role_name, python_utils.UNICODE)) self.assertTrue(isinstance(allotted_actions, list)) self.assertEqual(len(set(allotted_actions)), len(allotted_actions)) for action_name", "permissions and # limitations under the License. \"\"\"Test functions relating", "under the License is distributed on an \"AS-IS\" BASIS, #", "License. \"\"\"Test functions relating to roles and actions.\"\"\" from __future__", "core.tests import test_utils import feconf import python_utils class RolesAndActionsServicesUnitTests(test_utils.GenericTestBase): \"\"\"Tests", "dict)) for role_name, allotted_actions in role_actions.items(): self.assertTrue(isinstance(role_name, python_utils.UNICODE)) self.assertTrue(isinstance(allotted_actions, list))", "CONDITIONS OF ANY KIND, either express or implied. # See", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "Oppia Authors. All Rights Reserved. # # Licensed under the", "on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "# limitations under the License. \"\"\"Test functions relating to roles", "or implied. # See the License for the specific language", "Rights Reserved. # # Licensed under the Apache License, Version", "License. # You may obtain a copy of the License", "functions relating to roles and actions.\"\"\" from __future__ import absolute_import", "License, Version 2.0 (the \"License\"); # you may not use", "from core.domain import role_services from core.tests import test_utils import feconf", "# You may obtain a copy of the License at", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "self.assertEqual(len(set(allotted_actions)), len(allotted_actions)) for action_name in allotted_actions: self.assertTrue( isinstance(action_name, python_utils.UNICODE)) def", "import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals #", "test_get_all_actions(self): with self.assertRaisesRegexp( Exception, 'Role TEST_ROLE does not exist.'): role_services.get_all_actions('TEST_ROLE')", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "role_actions = role_services.get_role_actions() self.assertTrue(isinstance(role_actions, dict)) for role_name, allotted_actions in role_actions.items():", "License for the specific language governing permissions and # limitations", "Authors. All Rights Reserved. # # Licensed under the Apache", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "role_services from core.tests import test_utils import feconf import python_utils class", "Reserved. # # Licensed under the Apache License, Version 2.0", "Copyright 2017 The Oppia Authors. All Rights Reserved. # #", "core.domain import role_services from core.tests import test_utils import feconf import", "under the License. \"\"\"Test functions relating to roles and actions.\"\"\"", "for roles and actions.\"\"\" def test_get_role_actions_return_value_in_correct_schema(self): role_actions = role_services.get_role_actions() self.assertTrue(isinstance(role_actions,", "the License for the specific language governing permissions and #", "role_name, allotted_actions in role_actions.items(): self.assertTrue(isinstance(role_name, python_utils.UNICODE)) self.assertTrue(isinstance(allotted_actions, list)) self.assertEqual(len(set(allotted_actions)), len(allotted_actions))", "(the \"License\"); # you may not use this file except", "<gh_stars>0 # coding: utf-8 # # Copyright 2017 The Oppia", "Apache License, Version 2.0 (the \"License\"); # you may not", "import test_utils import feconf import python_utils class RolesAndActionsServicesUnitTests(test_utils.GenericTestBase): \"\"\"Tests for", "# you may not use this file except in compliance", "either express or implied. # See the License for the", "python_utils.UNICODE)) self.assertTrue(isinstance(allotted_actions, list)) self.assertEqual(len(set(allotted_actions)), len(allotted_actions)) for action_name in allotted_actions: self.assertTrue(", "list)) self.assertEqual(len(set(allotted_actions)), len(allotted_actions)) for action_name in allotted_actions: self.assertTrue( isinstance(action_name, python_utils.UNICODE))", "isinstance(action_name, python_utils.UNICODE)) def test_get_all_actions(self): with self.assertRaisesRegexp( Exception, 'Role TEST_ROLE does", "OR CONDITIONS OF ANY KIND, either express or implied. #", "distributed under the License is distributed on an \"AS-IS\" BASIS,", "to roles and actions.\"\"\" from __future__ import absolute_import # pylint:", "import role_services from core.tests import test_utils import feconf import python_utils", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "limitations under the License. \"\"\"Test functions relating to roles and", "and actions.\"\"\" def test_get_role_actions_return_value_in_correct_schema(self): role_actions = role_services.get_role_actions() self.assertTrue(isinstance(role_actions, dict)) for", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "import feconf import python_utils class RolesAndActionsServicesUnitTests(test_utils.GenericTestBase): \"\"\"Tests for roles and", "with self.assertRaisesRegexp( Exception, 'Role TEST_ROLE does not exist.'): role_services.get_all_actions('TEST_ROLE') self.assertEqual(", "2017 The Oppia Authors. All Rights Reserved. # # Licensed", "# # Unless required by applicable law or agreed to", "RolesAndActionsServicesUnitTests(test_utils.GenericTestBase): \"\"\"Tests for roles and actions.\"\"\" def test_get_role_actions_return_value_in_correct_schema(self): role_actions =", "distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "# coding: utf-8 # # Copyright 2017 The Oppia Authors.", "# pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules", "in allotted_actions: self.assertTrue( isinstance(action_name, python_utils.UNICODE)) def test_get_all_actions(self): with self.assertRaisesRegexp( Exception,", "Version 2.0 (the \"License\"); # you may not use this", "for role_name, allotted_actions in role_actions.items(): self.assertTrue(isinstance(role_name, python_utils.UNICODE)) self.assertTrue(isinstance(allotted_actions, list)) self.assertEqual(len(set(allotted_actions)),", "law or agreed to in writing, software # distributed under", "# distributed under the License is distributed on an \"AS-IS\"", "self.assertRaisesRegexp( Exception, 'Role TEST_ROLE does not exist.'): role_services.get_all_actions('TEST_ROLE') self.assertEqual( role_services.get_all_actions(feconf.ROLE_ID_GUEST),", "unicode_literals # pylint: disable=import-only-modules from core.domain import role_services from core.tests", "feconf import python_utils class RolesAndActionsServicesUnitTests(test_utils.GenericTestBase): \"\"\"Tests for roles and actions.\"\"\"", "roles and actions.\"\"\" def test_get_role_actions_return_value_in_correct_schema(self): role_actions = role_services.get_role_actions() self.assertTrue(isinstance(role_actions, dict))", "an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "implied. # See the License for the specific language governing", "is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR", "under the Apache License, Version 2.0 (the \"License\"); # you", "and # limitations under the License. \"\"\"Test functions relating to", "\"License\"); # you may not use this file except in", "test_utils import feconf import python_utils class RolesAndActionsServicesUnitTests(test_utils.GenericTestBase): \"\"\"Tests for roles", "from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import", "governing permissions and # limitations under the License. \"\"\"Test functions", "self.assertTrue(isinstance(role_actions, dict)) for role_name, allotted_actions in role_actions.items(): self.assertTrue(isinstance(role_name, python_utils.UNICODE)) self.assertTrue(isinstance(allotted_actions,", "by applicable law or agreed to in writing, software #", "import unicode_literals # pylint: disable=import-only-modules from core.domain import role_services from", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "the License. \"\"\"Test functions relating to roles and actions.\"\"\" from", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules from core.domain", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "to in writing, software # distributed under the License is", "actions.\"\"\" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__", "class RolesAndActionsServicesUnitTests(test_utils.GenericTestBase): \"\"\"Tests for roles and actions.\"\"\" def test_get_role_actions_return_value_in_correct_schema(self): role_actions", "# # Copyright 2017 The Oppia Authors. All Rights Reserved.", "# See the License for the specific language governing permissions", "\"\"\"Test functions relating to roles and actions.\"\"\" from __future__ import", "disable=import-only-modules from core.domain import role_services from core.tests import test_utils import", "test_get_role_actions_return_value_in_correct_schema(self): role_actions = role_services.get_role_actions() self.assertTrue(isinstance(role_actions, dict)) for role_name, allotted_actions in", "self.assertTrue(isinstance(role_name, python_utils.UNICODE)) self.assertTrue(isinstance(allotted_actions, list)) self.assertEqual(len(set(allotted_actions)), len(allotted_actions)) for action_name in allotted_actions:", "You may obtain a copy of the License at #", "python_utils class RolesAndActionsServicesUnitTests(test_utils.GenericTestBase): \"\"\"Tests for roles and actions.\"\"\" def test_get_role_actions_return_value_in_correct_schema(self):", "and actions.\"\"\" from __future__ import absolute_import # pylint: disable=import-only-modules from", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "self.assertTrue( isinstance(action_name, python_utils.UNICODE)) def test_get_all_actions(self): with self.assertRaisesRegexp( Exception, 'Role TEST_ROLE", "required by applicable law or agreed to in writing, software", "pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules from", "for action_name in allotted_actions: self.assertTrue( isinstance(action_name, python_utils.UNICODE)) def test_get_all_actions(self): with", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "self.assertTrue(isinstance(allotted_actions, list)) self.assertEqual(len(set(allotted_actions)), len(allotted_actions)) for action_name in allotted_actions: self.assertTrue( isinstance(action_name,", "actions.\"\"\" def test_get_role_actions_return_value_in_correct_schema(self): role_actions = role_services.get_role_actions() self.assertTrue(isinstance(role_actions, dict)) for role_name,", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "License is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES", "the Apache License, Version 2.0 (the \"License\"); # you may", "__future__ import unicode_literals # pylint: disable=import-only-modules from core.domain import role_services", "# Copyright 2017 The Oppia Authors. All Rights Reserved. #", "language governing permissions and # limitations under the License. \"\"\"Test", "coding: utf-8 # # Copyright 2017 The Oppia Authors. All", "len(allotted_actions)) for action_name in allotted_actions: self.assertTrue( isinstance(action_name, python_utils.UNICODE)) def test_get_all_actions(self):", "action_name in allotted_actions: self.assertTrue( isinstance(action_name, python_utils.UNICODE)) def test_get_all_actions(self): with self.assertRaisesRegexp(" ]
[ "constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype =", "C.sqrt(var(x, axis=axis, keepdims=keepdims)) def expand_dims(x, axis=-1): shape = list(int_shape(x)) nones", "l_s, i_s in zip(last_states, initial_states): if _get_dynamic_axis_num(i_s) == 0 and", "if name is None: name = '' scale = (high", "floatx() for _ in shape: if _ is None: raise", "(bias_dims, dims)) if dims == 4: if data_format == 'channels_first':", "output = repeat_elements(output, width_factor, axis=2) return output else: raise ValueError('CNTK", "not catched in CNTK 2.1 release. # Will update with", "we want to evaluate them.from # But the assign ops", "len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1]) ==", "= int(num_element / num_static_element) return C.cntk_py.Value( grad_array_view.as_shape( (num_old_batch,) + self.from_shape))", "extra batch axis with 1, it is not needed #", "softplus(x): return C.softplus(x) def softsign(x): return x / (1 +", "return x / norm def hard_sigmoid(x): x = (0.2 *", "workaround for recurrent layer # if n is inferred dimension,", "beta = zeros_like(gamma) mean, variant = _moments(x, _normalize_axis(reduction_axes, x)) if", "a bug in cntk gather op which may cause crash.", "np.float32: return 'float32' elif dtype == np.float64: return 'float64' else:", "reduce_axes) else: if isinstance(axis, list): has_seq = False for a", "== 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0],", "x: print(message))) def batch_set_value(tuples): for t in tuples: x =", "_ in range(ndim)]) dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2", "in reduction_axes: target_shape.append(1) if ndim(gamma) > axis: gamma = C.reduce_mean(gamma,", "- 1) else: target_shape.append(x_shape[axis]) broadcast_mean = C.reshape(mean, target_shape) broadcast_var =", "left_pad = dilation_rate * (kernel.shape[0] - 1) x = temporal_padding(x,", "x = _preprocess_conv2d_input(x, data_format) if pool_mode == 'max': x =", "any parameters in the model. ' 'Please double check how", "padding[0], 0) x = _padding(x, padding[1], 1) x = _padding(x,", "+ 'Expected: None, int, (int, int), ' + 'Provided: '", "Will support it in next release. if not self._is_input_shape_compatible(value, tensor):", "else: return C.stop_gradient(variables) def switch(condition, then_expression, else_expression): ndim_cond = ndim(condition)", "= list(x.shape) new_shape.insert(index, 1) new_shape = tuple(new_shape) x = C.reshape(x,", "pattern=[padding, (0, 0)]) else: x = _padding(x, padding, 0) else:", "feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError('CNTK backend: metrics argument", "== 2 if data_format is None: data_format = image_data_format() if", "return variable(np.eye(size), dtype, name) def zeros_like(x, dtype=None, name=None): return x", "'check the model and inputs.' % argument.name) # Some ops", "range(ndim)]) dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension", "+= 1 while i < len(cntk_axis): cntk_axis[i] -= nones i", "ndim(var) == ndim(x) and shape(var)[0] == 1: var = _reshape_dummy_dim(var,", "= [] for constant in constants: if isinstance(constant, list): new_c", "= np.prod(np.asarray(self.from_shape)) num_old_batch = int(num_element / num_static_element) return C.cntk_py.Value( grad_array_view.as_shape(", "padding[2], 3) else: assert len(base_shape) == 5 if hasattr(C, 'pad'):", "def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): if data_format == 'channels_first':", "'reduce_mean') return _remove_dims(output, axis, keepdims) def any(x, axis=None, keepdims=False): reduce_result", "seed=seed), dtype=dtype, name=name) def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if", "ValueError('CNTK Backend: `eval` method on ' '`%s` type is not", "dtype=floatx()) x.value = value else: raise NotImplementedError def print_tensor(x, message=''):", "detected. ' 'Will using CNTK 2.0 GA as default.') return", "global NAME_SCOPE_STACK NAME_SCOPE_STACK.append(name) yield NAME_SCOPE_STACK.pop() def get_uid(prefix=''): _UID_PREFIXES[prefix] += 1", "len(y_shape) == 2: result = squeeze(result, -1) return result def", "feed_dict[argument] else: raise ValueError( 'CNTK backend: argument %s is not", "dimensions' % (bias_dims, dims)) if dims == 4: if data_format", "col, filters output = permute_dimensions(output, (0, 2, 3, 1)) return", "output = repeat_elements(x, depth_factor, axis=1) output = repeat_elements(output, height_factor, axis=2)", "expand_dims(y) normalized_axis = [] normalized_axis.append(_normalize_axis(axes[0], x)[0]) normalized_axis.append(_normalize_axis(axes[1], y)[0]) # transpose", "= permute_dimensions(output, (0, 2, 3, 1)) return output def reverse(x,", "to any parameters in the model. ' 'Please double check", "shape = tuple([None for _ in range(ndim)]) dynamic_dimension = C.FreeDimension", "if len(outputs) == 0: prev_output = zeros_like(output) else: prev_output =", "cntk, need to remove those dummy axis. if ndim(mean) ==", "# perf issue, will resolve it later with cntk cond", "str(axes)) if len(x_shape) == 2 and len(y_shape) == 2: if", "output_values = self.metrics_func.eval(input_dict, as_numpy=False) if isinstance(output_values, dict): for o in", "x = C.splice(*tmp, axis=i - num_dynamic_axis) i += 1 return", "`data_format`. # CNTK expects `(depth, input_depth, rows, cols)`. kernel =", "batch). so using the workaround # here to mapping the", "is not supported now. ' 'Please provide fixed dimension '", "def _reduce_on_axis(x, axis, reduce_fun_name): if isinstance(axis, list): for a in", "= C.sequence.last(final_output) last_states = [C.sequence.last(s) for s in final_states] if", "kernel_size[0]) slice_col = slice(j * stride_col, j * stride_col +", "is not supported. ' 'CNTK only supports `eval` with '", "if isinstance(update, tuple): if len(update) != 2: raise NotImplementedError else:", "'channels_first': output = repeat_elements(x, height_factor, axis=2) output = repeat_elements(output, width_factor,", "fixed dimension ' 'instead of `None`.') size *= _ binomial", "' 'instead of `None`.') return random_uniform_variable(shape, minval, maxval, dtype, seed)", "time_axis, i, i + 1) mask_slice = squeeze(mask_slice, 1) if", "if a is not None and a < 0: _axis[i]", "are normalized # on the format `(rows, cols, input_depth, depth)`,", "axis[0] slices = [] shape = x.shape i = 0", "i in range(output_length): slice_length = slice(i * stride, i *", "value not in {0, 1}: raise ValueError('CNTK Backend: Set learning", "[0, 0], list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0], 2)", "feature_dim))) x_aggregate = concatenate(xs, axis=1) # transpose kernel to output_filters", "data_format == 'channels_first': output = repeat_elements(x, height_factor, axis=2) output =", "None: raise ValueError('CNTK Backend: the input of static rnn '", "dims = len(shape) global uses_learning_phase uses_learning_phase = False if dims", "range(1, ndim(x)): if axis in reduction_axes: target_shape.append(1) if ndim(gamma) >", "1), while keras expect (batch, ) return C.reshape(result, ()) else:", "else _ for _ in axis] if shape.count(C.InferredDimension) > 1", "= _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_mean') return _remove_dims(output,", "kernel, x, strides, auto_padding=[ False, padding, padding, padding]) return _postprocess_conv3d_output(x,", "it is not 0 or 1, we will go with", "= C.relu(x) if max_value is not None: x = C.clip(x,", "1)) # shape: batch, filters, output_length, input_length * kernel_size output", "return x def dot(x, y): if len(x.shape) > 2 or", "[ C.output_variable( self.target_shape, self.inputs[0].dtype, [batch_axis])] def forward(self, arguments, device=None, outputs_to_retain=None):", "phase. To make it work, call # \"forward\" method to", "C.element_select(training, x, alt) result._uses_learning_phase = uses_learning_phase return result def in_test_phase(x,", "= C.assign(update[0], update[1]) else: u = update if len(u.arguments) ==", "padding, padding], output_shape=output_shape) return _postprocess_conv3d_output(x, data_format) def pool2d(x, pool_size, strides=(1,", "shape) def mean(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output", "if max_value is None: max_value = np.inf if min_value is", "for argument in self.unrelated_updates.arguments: if argument in feed_dict: input_dict[argument] =", "input.shape def infer_outputs(self): return [ C.output_variable( self.target_shape, self.inputs[0].dtype, [])] def", "run RNN.' % dims) if _get_dynamic_axis_num(inputs) == 0 or unroll:", "auto_padding=[False]) return _postprocess_conv2d_output(x, data_format) def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid',", "C.to_sequence_like(mask, rnn_inputs) states = tuple(initial) with C.default_options(axis_offset=1): def _recurrence(x, states,", "C.splice(*tmp, axis=i - num_dynamic_axis) i += 1 return x def", "x if training == 1 or training is True else", "% g) if len(u_list) > 0: learner = C.cntk_py.universal_learner(p_list, u_list,", "> 2 or len(y.shape) > 2: y_shape = int_shape(y) if", "for g in grads: if g in grad_parameter_dict: p_list.append(grad_parameter_dict[g]) u_list.append(g)", "that randomness is conditioned by the Numpy RNG seed =", "begin_index, end_index, strides) def _reshape_batch(x, shape): # there is a", "def forward(self, argument, device=None, outputs_to_retain=None): if self.when(argument): self.execute(argument) return None,", "+= 1 else: cntk_axis.append(i - dynamic_axis_index) if dynamic_axis_index < nones:", "== 2: mask = expand_dims(mask) mask = C.to_sequence_like(mask, rnn_inputs) states", "nones = _get_dynamic_axis_num(x) index = axis if axis >= 0", "shape=postfix_shape), axis=axis) return x def spatial_2d_padding(x, padding=((1, 1), (1, 1)),", "output_length, feature_dim, filters = kernel_shape xs = [] for i", "tuple([1 for _ in range(len(shape) - len(n))]) + n if", "' 'related to any parameters in the model. ' 'Please", "C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) else: if dilation_rate[0] !=", "symbolic tensor instance.') return hasattr(x, '_keras_history') def is_tensor(x): return isinstance(x,", "= [len(y_shape) - 2] permutation += list(range(len(y_shape) - 2)) permutation", "( outputs[0], outputs[1]) if len(outputs) > 1 else ( outputs[0],", "has ' '%d cntk dynamic axis, this is not expected,", "shape = (1, 1, 1, bias.shape[0]) else: shape = bias.shape", "C.assign(variable, variable * momentum + value * (1. - momentum))", "ndim(then_expression) if ndim_cond > ndim_expr: raise ValueError('Rank of condition should", "keepdims=False): return log(sum(exp(x), axis=axis, keepdims=keepdims)) def var(x, axis=None, keepdims=False): m", "tmp_shape) def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): if data_format is", "1: shape = (bias.shape[0], 1, 1) else: shape = (bias.shape[2],)", "new_shape]) result = C.reshape(x, new_shape) if index < nones: result._keras_shape", "bug in cntk gather op which may cause crash. #", "= sum(output, axis=3) # shape: batch, filters, row, col output", "else: result.append(eval(x)) return result def set_value(x, value): if (isinstance(x, C.variables.Parameter)", "axis=-1): axis = [axis] axis = _normalize_axis(axis, x) output =", "1) + depthwise_kernel.shape[2:]) pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format) padding = _preprocess_border_mode(padding)", "or shape.count(C.FreeDimension) > 1: result = x for index in", "return C.sqrt(var(x, axis=axis, keepdims=keepdims)) def expand_dims(x, axis=-1): shape = list(int_shape(x))", "can not take variable length inputs. Please ' 'pass inputs", "np.float32 and value.dtype != np.float64): value = value.astype(np.float32) if tensor", "= tuple([None for _ in range(ndim)]) dynamic_dimension = C.FreeDimension if", "ndim(condition) ndim_expr = ndim(then_expression) if ndim_cond > ndim_expr: raise ValueError('Rank", "in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) stride", "= learning_phase() uses_learning_phase = True else: uses_learning_phase = False #", "depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format", "_padding(x, padding[1], 3) else: if num_dynamic_axis > 0: assert len(base_shape)", "tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase', False): uses_learning_phase = True", "dtype is None: dtype = floatx() if shape is None:", "def ones(shape, dtype=None, name=None): if dtype is None: dtype =", "return output else: raise ValueError('CNTK Backend: Invalid data_format:', data_format) def", "str(data_format)) padding = _preprocess_border_mode(padding) strides = strides pool_size = pool_size", "C.parameter( shape, init=C.initializer.uniform( scale, seed=seed), dtype=dtype, name=name) return variable(value=p.value +", "reshape did not take place.') return x return _reshape_batch(x, shape)", "name=None, dynamic_axis_num=1): if dtype is None: dtype = floatx() if", "num_dynamic_axis = _get_dynamic_axis_num(x) if isinstance(pattern, list): current_layout = [i for", "' '%d dimensions are needed.' % (len(cntk_shape, dynamic_axis_num))) if name", "1, feature_dim))) else: xs.append(reshape(inputs[:, slice_row, slice_col, :], (-1, 1, feature_dim)))", "i + 1) mask_slice = squeeze(mask_slice, 1) if len(outputs) ==", "NotImplementedError('CNTK Backend: `go_backwards` is not supported with ' 'variable-length sequences.", "[] for constant in constants: if isinstance(constant, list): new_c =", "axis back output_slice = expand_dims(outputs[i], 1) final_output = C.splice(final_output, output_slice,", "target, axis=-1)), axis=C.Axis.all_axes()) def argmax(x, axis=-1): axis = [axis] axis", "# cntk's batch axis is not in shape, # so", "if self.metrics_func is not None: input_dict = {} for argument", "_preprocess_conv3d_input(x, data_format) if pool_mode == 'max': x = C.pooling( x,", "1) kernel = C.swapaxes(kernel, 0, 2) padding = _preprocess_border_mode(padding) strides", "int_shape(y) if isinstance(axes, int): axes = (axes, axes) if axes", "= output_shape[1:] # in keras2, need handle output shape in", "axis, keepdims) def prod(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x)", "raise ValueError('Invalid border mode: ' + str(padding)) return padding def", "x return _reshape_batch(x, shape) else: # no collapse, then first", "in cntk, need to remove those dummy axis. if ndim(mean)", "'channels_last': x = C.transpose(x, (1, 2, 0)) return x def", "evaluated in training phase. To make it work, call #", "print(arg), name=''): self.when = when self.execute = execute super(LambdaFunc, self).__init__([arg],", "If _LEARNING_PHASE is not 0 or 1, return dynamic learning", "x, tuple(past_values) + tuple(rnn_constants)) if getattr(new_output, '_uses_learning_phase', False): global uses_learning_phase", "`%s` contains non-specified dimension, ' 'which is not supported. Please", "of `None`.') # how to apply mean and stddev return", "o in self.trainer_output: updated.append(outputs[o]) if self.metrics_func is not None: input_dict", "NAME_SCOPE_STACK.pop() def get_uid(prefix=''): _UID_PREFIXES[prefix] += 1 return _UID_PREFIXES[prefix] def learning_phase():", "the shape if num_dynamic_axis >= len(shape): i = 0 while", "shape(mean)[0] == 1: mean = _reshape_dummy_dim(mean, [0]) if ndim(var) ==", "return x def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): if", "dimension is not supported, at least ' '%d dimensions are", "isinstance(a, C.Axis) \\ and a != C.Axis.default_batch_axis() \\ and hasattr(C.sequence,", "from grad placeholder to parameter grad_parameter_dict = {} NAME_SCOPE_STACK =", "return _static_rnn( step_function, inputs, initial_states, go_backwards, mask, constants, unroll, input_length)", "_LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase') # static learning phase", "in axes]): raise ValueError('Multiple target dimensions are not supported. '", "C.to_sequence(rnn_inputs) rnn_constants = [] for constant in constants: if isinstance(constant,", "gather(reference, indices): # There is a bug in cntk gather", "'pass inputs that have a static shape.' % (str(tensor.shape), str(value.shape)))", "list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0], 2) x =", "data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' +", "y) def minimum(x, y): return C.element_min(x, y) def sin(x): return", "filters, output_length, input_length * kernel_size output = x_aggregate * weight", "currently don't support cond op, so here we use #", "axis=axis) shift = C.stop_gradient(shift) shifted_mean = C.minus(x, shift) for axis", "final_output, final_states = _recurrence(rnn_inputs, states, mask) last_output = C.sequence.last(final_output) last_states", "min_value = -np.inf return C.clip(x, min_value, max_value) def binary_crossentropy(target, output,", "= _padding(x, padding[1], 1) x = _padding(x, padding[2], 2) else:", "under this mode, that's why # we need this check.", "x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]]) else: x =", "raise ValueError('Unknown data_format ' + str(data_format)) stride = strides[0] kernel_shape", "_get_dynamic_axis_num(x) if index < 0 or index > 1: raise", "dilation_rate[0] == dilation_rate[1] assert strides == (1, 1), 'Invalid strides", "'reduce_max') return _remove_dims(output, axis, keepdims) def min(x, axis=None, keepdims=False): axis", "mask = C.to_sequence_like(mask, rnn_inputs) states = tuple(initial) with C.default_options(axis_offset=1): def", "_ is None: raise ValueError('CNTK Backend: randomness op with '", "0, 1) kernel = C.swapaxes(kernel, 0, 2) padding = _preprocess_border_mode(padding)", "kernel, kernel_size, strides, output_shape, data_format=None): if data_format is None: data_format", "None: constants = [] if mask is not None: mask_shape", "as # workaround if isinstance(value, C.cntk_py.Function): value = eval(value) shape", "def tile(x, n): if isinstance(n, int): n = (n,) elif", "tensors[0]) return C.splice(*tensors, axis=axis[0]) def flatten(x): return reshape(x, (-1,)) def", "# Returns Boolean. \"\"\" return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder def", "_LEARNING_PHASE_PLACEHOLDER global _LEARNING_PHASE assert isinstance(inputs, (list, tuple)) feed_dict = {}", "shape, init=C.initializer.truncated_normal( stddev, seed=seed), dtype=dtype) def dtype(x): return _convert_dtype_string(x.dtype) def", "0 else _ + len(shape)) if len(_axis) == 0: return", "proper broadcasting. for axis in _axes: shift = C.reduce_mean(shift, axis=axis)", "alpha == 1: return res else: return C.element_select(C.greater(x, 0), res,", "n_s = [] for o, p in zip(new_states, place_holders): n_s.append(o.replace_placeholders({p:", "1 while i < len(cntk_axis): cntk_axis[i] -= nones i +=", "axis=-1): if len(tensors) == 0: return None axis = [axis]", "result = x for index in sorted(_axis, reverse=True): result =", "def _preprocess_border_mode(padding): if padding == 'same': padding = True elif", "Please specify a ' 'static length for your sequences.') rnn_inputs", "= _moments(x, _normalize_axis(reduction_axes, x)) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normalized =", "num_time_step) f_stats = [] for l_s, i_s in zip(last_states, initial_states):", "return _remove_dims(output, axis, keepdims) def logsumexp(x, axis=None, keepdims=False): return log(sum(exp(x),", "2) padding = _preprocess_border_mode(padding) strides = [strides] x = C.convolution(", "{0, 1} else _LEARNING_PHASE_PLACEHOLDER def set_learning_phase(value): global _LEARNING_PHASE if value", "defaultdict from contextlib import contextmanager import warnings C.set_global_option('align_axis', 1) b_any", "= C.ops.argmin(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def square(x): return C.square(x)", "i_s in zip(last_states, initial_states): if _get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s)", "tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x def", "_ in range(num_dynamic_axis - i)]) + shape new_shape = list(shape)", "and _get_dynamic_axis_num(l_s) == 1: if hasattr(C, 'unpack_batch'): f_stats.append(C.unpack_batch(l_s)) else: f_stats.append(C.user_function(ConvertToStatic(l_s,", "gradient as a constant placeholder, here use this global #", "dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.ones(shape, ctype), dtype=dtype,", "if data_format == 'channels_last': x = C.transpose(x, (1, 2, 0))", "= list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x =", "gather op which may cause crash. # We have made", "= _padding(x, padding, 1) return x def _padding(x, pattern, axis):", "axis=None): axis = [axis] axis = _normalize_axis(axis, x) norm =", "if not self._is_input_shape_compatible(value, tensor): raise ValueError('CNTK backend: The placeholder has", "only support calculate on float, do auto cast here if", "' 'expected 1 or %d dimensions' % (bias_dims, dims)) if", "name) def zeros_like(x, dtype=None, name=None): return x * 0 def", "np.asarray(value) else: # in current version cntk can't support input", "won't be applied during \"eval\" in cntk. # They only", "padding[2], 4) else: if num_dynamic_axis > 0: assert len(base_shape) ==", "int_shape(x) num_dynamic_axis = _get_dynamic_axis_num(x) # Padding the axis if len(n)", "padding, padding], groups=x.shape[0]) else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK", "v = C.parameter(shape=shape, init=value, dtype=dtype, name=_prepare_name(name, 'variable')) v._keras_shape = v.shape", "for l_s, i_s in zip(last_states, initial_states): if _get_dynamic_axis_num(i_s) == 0", "output_col)) if data_format == 'channels_last': # shape: batch, row, col,", "= constraint return v def bias_add(x, bias, data_format=None): if data_format", "'_keras_history') def is_tensor(x): return isinstance(x, (C.variables.Constant, C.variables.Variable, C.variables.Parameter, C.ops.functions.Function)) def", "- num_dynamic_axis) i += 1 return x def _normalize_axis(axis, x):", "return C.reshape(x, new_shape) def permute_dimensions(x, pattern): dims = len(int_shape(x)) num_dynamic_axis", "dims > 0 and x.shape[0] == C.InferredDimension: dims -= 1", "# try to cast to float to run the model", "C.ops.slice(inputs, time_axis, i, i + 1) # remove dummy dimension", "# Arguments value: Numpy array, initial value of the tensor.", "unrelated_updates.extend(u_ops) if len(unrelated_updates) > 0: self.unrelated_updates = C.combine([_.output for _", ":, slice_row, slice_col], (-1, 1, feature_dim))) else: xs.append(reshape(inputs[:, slice_row, slice_col,", "'channels_first': output = repeat_elements(x, depth_factor, axis=2) output = repeat_elements(output, height_factor,", "- 1) else: return C.times(x, y) def batch_dot(x, y, axes=None):", "x in xs: if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): result.append(x.value)", "_recurrence(x, states, m): # create place holder place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes)", "_preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = strides + (strides[0],)", "+ shape return shape def ndim(x): shape = int_shape(x) return", "has_seq_axis(mask): if go_backwards: mask = reverse(mask, 1) if len(int_shape(mask)) ==", "= ones_like(x) else: gamma = ones_like(beta) if beta is None:", "0: assert len(base_shape) == 3 if hasattr(C, 'pad'): x =", "variable(value=p.value + low + scale) def random_normal_variable( shape, mean, scale,", "root_gradients): return C.cntk_py.Value(root_gradients.data()) class LambdaFunc(C.ops.functions.UserFunction): def __init__(self, arg, when=lambda arg:", "dtype=None, seed=None): # use numpy workaround now if seed is", "' 'Please provide fixed dimension ' 'instead of `None`.') #", "else: if num_dynamic_axis > 0: assert len(base_shape) == 3 if", "+ str(pool_mode)) return _postprocess_conv3d_output(x, data_format) def relu(x, alpha=0., max_value=None): if", "slice_row = slice(i * stride_row, i * stride_row + kernel_size[0])", "and stddev return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed) def truncated_normal(shape, mean=0.0,", "how to repeat it in cntk now # return the", "+ i]) return C.element_select(condition, then_expression, else_expression) def elu(x, alpha=1.): res", "_get_cntk_version() >= 2.2: return C.ops.gather(reference, indices) else: num_classes = reference.shape[0]", "__future__ import division from __future__ import print_function import cntk as", "' 'Please provide fixed dimension ' 'instead of `None`.') size", "and p != C.InferredDimension and p != C.FreeDimension: return False", "backward(self, state, root_gradients): return C.cntk_py.Value(root_gradients.data()) class ConvertToStatic(C.ops.functions.UserFunction): \"\"\"Converts input first", "'int' in str(dtype) else dtype v = C.parameter(shape=shape, init=value, dtype=dtype,", "return random_uniform_variable(shape, minval, maxval, dtype, seed) def random_uniform_variable(shape, low, high,", "input_length=None): shape = int_shape(inputs) dims = len(shape) global uses_learning_phase uses_learning_phase", "3, 0)) return x def _get_dynamic_axis_num(x): if hasattr(x, 'dynamic_axes'): return", "needed # in cntk, need to remove those dummy axis.", "if pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0]", "check how the gradient node ' 'is constructed.' % g)", "raise ValueError('CNTK Backend: Invalid data_format:', data_format) def repeat_elements(x, rep, axis):", "_postprocess_conv3d_output(x, data_format) def pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max'):", "as channel dimension, # instead of the 2nd one. #", "is None: name = '' scale = (high - low)", "inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape = int_shape(inputs)", "C.square(x - m) return mean(devs_squared, axis=axis, keepdims=keepdims) def std(x, axis=None,", "= time_step return reshape(x, tmp_shape) def local_conv1d(inputs, kernel, kernel_size, strides,", "# We have made a fix but not catched in", "auto_padding=[ False, padding, padding], output_shape=output_shape) return _postprocess_conv2d_output(x, data_format) def identity(x,", "== 0: warnings.warn( 'CNTK backend warning: GPU is not detected.", "axis=3) # Shape: (batch, output_length, filters) return permute_dimensions(output, (0, 2,", "training. global grad_parameter_dict if isinstance(variables, list) is False: variables =", "of `None`.') size *= _ binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape)", "_preprocess_conv3d_input(x, data_format) kernel = _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides", "target = C.one_hot(target, output.shape[-1]) target = C.reshape(target, output.shape) return categorical_crossentropy(target,", "or 1.' % value) _LEARNING_PHASE = value def clear_session(): \"\"\"Reset", "def backward(self, state, root_gradients): grad_array_view = root_gradients.data() num_element = root_gradients.shape()[0]", "def eval(x): if isinstance(x, C.cntk_py.Function): return x.eval() elif isinstance(x, C.variables.Constant)", "= (axes, axes) if axes is None: # behaves like", "constructing trainer, ' 'found gradient node `%s` which is not", "_preprocess_conv2d_kernel(kernel, data_format): # As of Keras 2.0.0, all kernels are", "if dynamic_axis_num > len(cntk_shape): raise ValueError('CNTK backend: creating placeholder with", "_ in states] past_values = [] for s, p in", "at least rank 3 to run RNN.' % dims) if", "seed=None): if seed is None: seed = np.random.randint(1, 10e6) if", "_reshape_sequence(x, time_step): tmp_shape = list(int_shape(x)) tmp_shape[1] = time_step return reshape(x,", "y): return C.greater_equal(x, y) def less(x, y): return C.less(x, y)", "output, axis=-1), argmax( target, axis=-1)), axis=C.Axis.all_axes()) def argmax(x, axis=-1): axis", "raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv3d_input(x, data_format)", "def zeros(shape, dtype=None, name=None): if dtype is None: dtype =", "list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0], 1) x =", "else: raise ValueError('CNTK backend: metrics argument %s ' 'is not", "argmax( output, axis=-1), argmax( target, axis=-1)), axis=C.Axis.all_axes()) def argmax(x, axis=-1):", "negative_part = C.relu(-x) x = C.relu(x) if max_value is not", "in range(output_row): for j in range(output_col): slice_row = slice(i *", "list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0], 1) x =", "result = C.reshape(x, new_shape) if index < nones: result._keras_shape =", "x = C.transpose(x, (3, 0, 1, 2)) return x def", "not ' 'related to any parameters in the model. '", "x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), [0, 0]]) else:", "pattern %s ' 'requested permute on dynamic axis, ' 'which", "padding input tensor with ' 'shape `%s` contains non-specified dimension,", "= zeros_like(output) else: prev_output = outputs[-1] output = C.ops.element_select(mask_slice, output,", "shape=shape, init=C.initializer.normal( scale=scale, seed=seed), dtype=dtype, name=name) def random_normal(shape, mean=0.0, stddev=1.0,", "= [] for i in range(output_length): slice_length = slice(i *", "bias_dims == 1: shape = (1, bias.shape[0]) else: shape =", "nones > 0 else 1 if go_backwards: i = shape[1]", "x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) else: if dilation_rate[0] !=", "def softsign(x): return x / (1 + C.abs(x)) def categorical_crossentropy(target,", "0 for i in range(ndim): if shape[i] is None and", "'`%s` type is not supported. ' 'CNTK only supports `eval`", "x = C.reshape(x, new_shape) temp = [x] * n return", "a != C.Axis.default_batch_axis() \\ and hasattr(C.sequence, reduce_fun_name): x = getattr(C.sequence,", "f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0]))) else: f_stats.append(l_s) last_output._uses_learning_phase = uses_learning_phase return last_output, final_output,", "2 num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if num_dynamic_axis >", "C.element_max(x, y) def minimum(x, y): return C.element_min(x, y) def sin(x):", "return [ C.output_variable( self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def forward(self, argument, device=None,", "time_step axis back output_slice = expand_dims(outputs[i], 1) final_output = C.splice(final_output,", "result.append(x.value) else: result.append(eval(x)) return result def set_value(x, value): if (isinstance(x,", "shape[3] = output_shape[2] output_shape = tuple(shape) x = C.convolution_transpose( kernel,", "(2, 0, 1)) # shape: batch, filters, output_length, input_length *", "_ == C.FreeDimension else _ for _ in shape] return", "initial.append(C.user_function(ConvertToBatch(s))) else: initial.append(s) need_convert = not has_seq_axis(inputs) if go_backwards and", "rows, cols, input_depth) x = C.transpose(x, (2, 0, 1)) return", "return zeros_like(x) + 1 def count_params(x): for _ in x.shape:", "= False if dims < 3: raise ValueError('CNTK Backend: the", "s) for n, s in zip(new_states, past_values)] n_s = []", "_get_dynamic_axis_num(l_s) == 1: if hasattr(C, 'unpack_batch'): f_stats.append(C.unpack_batch(l_s)) else: f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0])))", "output = repeat_elements(output, width_factor, axis=4) return output elif data_format ==", "C.ops.element_select(mask_slice, output, prev_output) return_states = [] for s, n_s in", "(int, int), ' + 'Provided: ' + str(axes)) if len(x_shape)", "return C.reduce_sum(any_matrix) else: return any_matrix def all(x, axis=None, keepdims=False): reduce_result", "is None: if gamma is None: beta = zeros_like(x) else:", "dtype is None: dtype = floatx() if seed is None:", "* stride, i * stride + kernel_size[0]) xs.append(reshape(inputs[:, slice_length, :],", "return C.cntk_py.Value( grad_array_view.as_shape( (num_old_batch,) + self.from_shape)) class ConvertToBatch(C.ops.functions.UserFunction): \"\"\"Converts input", "ndim(condition)=' + str(ndim_cond) + ', ndim(then_expression)' '=' + str(ndim_expr)) elif", "fixed in GA. if n is C.InferredDimension or n is", "up with keras model # we will return a constant", "hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]),", "* n return C.splice(*temp, axis=index) def tanh(x): return C.tanh(x) def", "[] num_time_step = shape[1] if num_time_step is None and not", "num_dynamic = get_num_dynamic_axis(placeholder) input_shape = input.shape[num_dynamic:] placeholder_shape = placeholder.shape for", "[] for s, p in zip(states, place_holders): past_values.append(C.sequence.past_value(p, s)) new_output,", "if alpha != 0.: negative_part = C.relu(-x) x = C.relu(x)", "< len(shape): n = tuple([1 for _ in range(len(shape) -", "C.square(x) def abs(x): return C.abs(x) def sqrt(x): return C.sqrt(x) def", "1 for _ in shape: if _ is None: raise", "> 0: unrelated_updates.extend(u_ops) if len(unrelated_updates) > 0: self.unrelated_updates = C.combine([_.output", "softmax(x, axis=-1): return C.softmax(x, axis=axis) def softplus(x): return C.softplus(x) def", "stride_row + kernel_size[0]) slice_col = slice(j * stride_col, j *", "name=name) def eye(size, dtype=None, name=None): if dtype is None: dtype", "warnings.warn( 'CNTK backend warning: CNTK version not detected. ' 'Will", "np.prod(np.asarray(self.from_shape)) num_old_batch = int(num_element / num_static_element) return C.cntk_py.Value( grad_array_view.as_shape( (num_old_batch,)", "== 1: shape = (1, 1, 1, bias.shape[0]) else: shape", "convolution: left_pad = dilation_rate * (kernel.shape[0] - 1) x =", "numerical instability with epsilon clipping output = C.clip(output, epsilon(), 1.0", "i * stride + kernel_size[0]) xs.append(reshape(inputs[:, slice_length, :], (-1, 1,", "parameter grad_parameter_dict = {} NAME_SCOPE_STACK = [] @contextmanager def name_scope(name):", "False else: raise ValueError('Invalid border mode: ' + str(padding)) return", "bias dimensions %d, ' 'expected 1 or %d dimensions' %", "_moments(x, axes=None, shift=None, keep_dims=False): _axes = tuple(axes) if shift is", "in zip(states, place_holders): past_values.append(C.sequence.past_value(p, s)) new_output, new_states = step_function( x,", "C.sqrt(C.reduce_sum(C.square(x), axis=axis[0])) return x / norm def hard_sigmoid(x): x =", "1) i -= 1 result = C.times(x, y, output_rank=(len(y.shape) -", "def repeat(x, n): # this is a workaround for recurrent", "if pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1]", "# behaves like tf.batch_matmul as default axes = [len(x_shape) -", "+ input.shape def infer_outputs(self): return [ C.output_variable( self.target_shape, self.inputs[0].dtype, [])]", "shape[1] = output_shape[0] shape[2] = output_shape[1] output_shape = tuple(shape) x", "= _padding(x, padding[2], 4) else: if num_dynamic_axis > 0: assert", "np.float32 else: dtype = _convert_string_dtype(dtype) if name is None: name", "variant, beta, gamma, epsilon) else: # need broadcasting target_shape =", "if beta is None: gamma = ones_like(x) else: gamma =", "= list(x.shape) _axis = [_ + len(shape) if _ <", "if axis >= 0 else len(shape) + 1 shape.insert(index, 1)", "with float, # try to cast to float to run", "i, i + 1) for _ in range(rep): slices.append(tmp) i", "# here to mapping the correct axis. Will remove this", "int)): value = np.full(x.shape, value, dtype=floatx()) x.value = value else:", "0. or level >= 1: raise ValueError('CNTK Backend: Invalid dropout", "for cntk backend. \"\"\" global _LEARNING_PHASE global _LEARNING_PHASE_PLACEHOLDER _LEARNING_PHASE =", "= tile(condition, shape_expr[ndim_cond + i]) return C.element_select(condition, then_expression, else_expression) def", "% argument.name) # Some ops (like dropout) won't be applied", "isinstance(axis, list): axis = [axis] shape = list(int_shape(x)) _axis =", "cond op, so here we use # element_select approach as", "axis = [axis] axis = _normalize_axis(axis, x) output = C.ops.argmin(x,", "[] for i in range(len(x.shape)): if shape[i + num_dynamic] is", "normalized = batch_normalization( x, mean, variant, beta, gamma, epsilon) else:", "-1 _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0) def in_train_phase(x, alt, training=None): global _LEARNING_PHASE", "return x.eval() elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter): return x.value", "1) return x def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None,", "non-specified dimension, ' 'which is not supported. Please give fixed", "kernel, x, strides=tuple(strides), auto_padding=[ False, padding]) if data_format == 'channels_last':", "_LEARNING_PHASE_PLACEHOLDER _LEARNING_PHASE = -1 _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0) def in_train_phase(x, alt,", "padding[1], 2) x = _padding(x, padding[2], 3) return x def", "0: return C.reduce_sum(all_matrix) else: return all_matrix def classification_error(target, output, axis=-1):", "else: if _get_dynamic_axis_num(constant) == 1: rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs)) else: rnn_constants.append(constant) else:", "Invalid dropout level %s, ' 'must be in interval [0,", "in range(dims)]) if num_dynamic_axis > 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]:", "normalized_axis[1] while i > 0: y = C.swapaxes(y, i, i", "in_train_phase(alt, x, training=training) def _convert_string_dtype(dtype): # cntk only support float32", "dtype == 'float64': return np.float64 else: # cntk only running", "name=None): return zeros_like(x) + 1 def count_params(x): for _ in", "return x.value else: return eval(x) def batch_get_value(xs): result = []", "not has_seq_axis(inputs): num_time_step = inputs.shape[0] initial = [] for s", "collapse, then first need to padding the shape if num_dynamic_axis", "conv_dim1, conv_dim2, conv_dim3, # input_depth) x = C.transpose(x, (3, 0,", "absolute_import from __future__ import division from __future__ import print_function import", "_remove_dims(output, axis, keepdims) def logsumexp(x, axis=None, keepdims=False): return log(sum(exp(x), axis=axis,", "raise ValueError('Unknown data_format ' + str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x) base_shape", "int_shape(x) # skip the batch axis for axis in range(1,", "then_expression, else_expression) def elu(x, alpha=1.): res = C.elu(x) if alpha", "learning phase tensor return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1}", "y): return C.element_min(x, y) def sin(x): return C.sin(x) def cos(x):", "isinstance(axis, tuple): _axis = list(axis) elif isinstance(axis, int): _axis =", "# Shape: (batch, filters, output_length, input_length * kernel_size) output =", "def conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1,", "num_classes): return C.one_hot(indices, num_classes) def get_value(x): if isinstance( x, C.variables.Parameter)", "in axis: if isinstance(_, int): _axis.append(_ if _ >= 0", "' else expressions. ndim(condition)=' + str(ndim_cond) + ', ndim(then_expression)' '='", "u_ops.append(u) else: unrelated_updates.append(u) update_func = C.combine([u.output for u in u_ops])", "image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format", "return C.transpose(x, axis) def resize_images(x, height_factor, width_factor, data_format): if data_format", "+ low + scale) def random_normal_variable( shape, mean, scale, dtype=None,", "that's why # we need this check. if (self.unrelated_updates is", "if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]),", "is None: self.metrics_outputs = [f.output for f in outputs] self.metrics_func", "True def __call__(self, inputs): global _LEARNING_PHASE_PLACEHOLDER global _LEARNING_PHASE assert isinstance(inputs,", "# collapse axis with batch axis if b_any(_ == C.InferredDimension", "== 'channels_first': output = repeat_elements(x, depth_factor, axis=2) output = repeat_elements(output,", "+ tuple(constants)) if getattr(output, '_uses_learning_phase', False): uses_learning_phase = True if", "= C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding]) x = C.convolution(pointwise_kernel,", "tuple): if len(update) != 2: raise NotImplementedError else: u =", "strides output_row, output_col = output_shape kernel_shape = int_shape(kernel) _, feature_dim,", "num_time_step = shape[1] if num_time_step is None and not has_seq_axis(inputs):", "is not found in inputs. ' 'Please double check the", "= C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x =", "shape = () np_value = value * np.ones(shape) const =", "None axis = [axis] axis = _normalize_axis(axis, tensors[0]) return C.splice(*tensors,", "'Expected a symbolic tensor instance.') return hasattr(x, '_keras_history') def is_tensor(x):", "None self.updates = updates if len(updates) > 0: assert len(outputs)", "CPU version is not fully optimized,' 'please run with GPU", "randomness is conditioned by the Numpy RNG seed = np.random.randint(10e3)", "1) output, new_states = step_function( current, tuple(states) + tuple(constants)) if", "if index < 0 or index > 1: raise NotImplementedError", "= axis[0] slices = [] shape = x.shape i =", "variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v", "shape[i + num_dynamic] is None: non_dyn_shape.append(x.shape[i]) else: non_dyn_shape.append(shape[i + num_dynamic])", "' 'CNTK only supports `eval` with ' '`Function`, `Constant` or", "()) else: # scale preds so that the class probas", "< nones: cntk_axis.append(x.dynamic_axes[dynamic_axis_index]) dynamic_axis_index += 1 else: cntk_axis.append(i - dynamic_axis_index)", "False if dims < 3: raise ValueError('Input should be at", "name=name) self.from_shape = input.shape self.target_shape = shape def infer_outputs(self): batch_axis", "return C.reshape(x, new_shape) def tile(x, n): if isinstance(n, int): n", "if gamma is None: beta = zeros_like(x) else: beta =", "= C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads", "None: if beta is None: gamma = ones_like(x) else: gamma", "why # we need this check. if (self.unrelated_updates is None", "if isinstance(variables, list) is False: variables = [variables] grads =", "for i in range(output_length): slice_length = slice(i * stride, i", "slice_col], (-1, 1, feature_dim))) else: xs.append(reshape(inputs[:, slice_row, slice_col, :], (-1,", "Tensor type. name: Optional name string for the tensor. constraint:", "arg, when=lambda arg: True, execute=lambda arg: print(arg), name=''): self.when =", "def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'): if", "default): prefix = '_'.join(NAME_SCOPE_STACK) if name is None or name", "dynamic learning phase tensor return _LEARNING_PHASE if _LEARNING_PHASE in {0,", "x)) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normalized = batch_normalization( x, mean,", "output_length) output = sum(output, axis=3) # Shape: (batch, output_length, filters)", "hook up with keras model # we will return a", "dtype=dtype, name=name) return variable(value=p.value + low + scale) def random_normal_variable(", "if name is None: name = '' return C.parameter( shape=shape,", "i = num_dynamic_axis for i, rep in enumerate(n): if i", "bool tensor used to run Keras models in # either", "= C.one_hot(target, output.shape[-1]) target = C.reshape(target, output.shape) return categorical_crossentropy(target, output,", "in_test_phase(x, alt, training=None): return in_train_phase(alt, x, training=training) def _convert_string_dtype(dtype): #", "x = _padding(x, padding[0], 2) x = _padding(x, padding[1], 3)", "to enable padding.' % base_shape) if pattern[0] > 0: prefix_shape", "return C.splice(*tensors, axis=axis[0]) def flatten(x): return reshape(x, (-1,)) def reshape(x,", ">= 1: raise ValueError('CNTK Backend: Invalid dropout level %s, '", "if from_logits: result = C.cross_entropy_with_softmax(output, target) # cntk's result shape", "self.unrelated_updates.eval(input_dict, as_numpy=False) return updated def function(inputs, outputs, updates=[], **kwargs): return", "1)) return kernel def _preprocess_border_mode(padding): if padding == 'same': padding", "if _ < 0 else _ for _ in axis]", "dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype)", "axis = _normalize_axis(axis, x) axis = axis[0] slices = []", "new_c.append(c) rnn_constants.append(new_c) else: if _get_dynamic_axis_num(constant) == 1: rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs)) else:", "keepdims=False): return C.sqrt(var(x, axis=axis, keepdims=keepdims)) def expand_dims(x, axis=-1): shape =", "_convert_string_dtype(dtype) if name is None: name = '' scale =", "= _padding(x, padding[1], 3) else: if num_dynamic_axis > 0: assert", "shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for", "as_numpy=False, name=name) def infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return [ C.output_variable(", "i = normalized_axis[0] while i < len(x.shape) - 1: x", "data_format == 'channels_last': # TF uses the last dimension as", "level %s, ' 'must be in interval [0, 1].' %", "_ for _ in shape] return C.reshape(x, shape) def mean(x,", "= repeat_elements(x, depth_factor, axis=1) output = repeat_elements(output, height_factor, axis=2) output", "instead of the 2nd one. # TH input shape: (samples,", "C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding]) x = C.convolution(pointwise_kernel, x,", "make it work, call # \"forward\" method to let cntk", "input shape: (samples, rows, cols, input_depth) x = C.transpose(x, (2,", "new_x) def moving_average_update(variable, value, momentum): return C.assign(variable, variable * momentum", "_LEARNING_PHASE = value def clear_session(): \"\"\"Reset learning phase flag for", "num_dynamic]) return shape[:num_dynamic] + non_dyn_shape def is_sparse(tensor): return tensor.is_sparse def", "(1, 1, bias.shape[0]) else: shape = bias.shape elif dims ==", "alpha * res) def in_top_k(predictions, targets, k): _targets = C.one_hot(targets,", "cntk_axis = [] dynamic_axis_index = 0 for i in range(ndim):", "and a < 0: _axis[i] = (a % ndim) if", "name=name) def infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return [ C.output_variable( self.inputs[0].shape[1:],", "warnings C.set_global_option('align_axis', 1) b_any = any dev = C.device.use_default_device() if", "num_dynamic = _get_dynamic_axis_num(x) non_dyn_shape = [] for i in range(len(x.shape)):", "stop_gradient(variables): if isinstance(variables, (list, tuple)): return map(C.stop_gradient, variables) else: return", "def var(x, axis=None, keepdims=False): m = mean(x, axis, keepdims=True) devs_squared", "for _ in range(num_dynamic_axis - i)]) + shape new_shape =", "'dtype') and value.dtype != np.float32 and value.dtype != np.float64): value", "causal (dilated) convolution: left_pad = dilation_rate * (kernel.shape[0] - 1)", "dtype=np.float32, value=1.0, name='_keras_learning_phase') # static learning phase flag, if it", "[len(y_shape) - 2] permutation += list(range(len(y_shape) - 2)) permutation +=", "= [f.output for f in outputs[2:]] self.metrics_func = C.combine(self.metrics_outputs) else:", "None or shape[i] == -1: i += 1 else: break", "output, new_states = step_function( current, tuple(states) + tuple(constants)) if getattr(output,", "auto cast here if (hasattr(value, 'dtype') and value.dtype != np.float32", "_normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_min') return _remove_dims(output, axis,", "index > 1: raise NotImplementedError new_shape = list(x.shape) new_shape.insert(index, 1)", "cntk_shape = [dynamic_dimension if s is None else s for", "dtype(x): return _convert_dtype_string(x.dtype) def zeros(shape, dtype=None, name=None): if dtype is", "= n_s[0] return new_output, n_s final_output, final_states = _recurrence(rnn_inputs, states,", "len(u_ops) > 0: unrelated_updates.extend(u_ops) if len(unrelated_updates) > 0: self.unrelated_updates =", "keepdims=keepdims)) def var(x, axis=None, keepdims=False): m = mean(x, axis, keepdims=True)", "new_states = step_function( x, tuple(past_values) + tuple(rnn_constants)) if getattr(new_output, '_uses_learning_phase',", "C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x def spatial_2d_padding(x, padding=((1, 1),", "have made a fix but not catched in CNTK 2.1", "if b_any([isinstance(a, (list, tuple)) for a in axes]): raise ValueError('Multiple", "level) def batch_flatten(x): # cntk's batch axis is not in", "str(data_format)) padding = _preprocess_border_mode(padding) x = _preprocess_conv3d_input(x, data_format) if pool_mode", "n is C.FreeDimension: return x index = 1 - _get_dynamic_axis_num(x)", "cntk_axis[i] = x.dynamic_axes[dynamic_axis_index] i += 1 dynamic_axis_index += 1 while", "= sum(x, axis, keepdims=keepdims) any_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result))", "hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]])", "repeat_elements(x, rep, axis): axis = _normalize_axis(axis, x) axis = axis[0]", "size).astype(dtype).reshape(shape) return variable(value=binomial, dtype=dtype) def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):", "return x return _reshape_batch(x, shape) else: # no collapse, then", "== 3 assert len(padding[0]) == 2 assert len(padding[1]) == 2", "2: result = squeeze(result, -1) return result def transpose(x): return", "take place.') return x return _reshape_batch(x, shape) else: # no", "tuple)): return map(C.stop_gradient, variables) else: return C.stop_gradient(variables) def switch(condition, then_expression,", "return _postprocess_conv2d_output(x, data_format) def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None,", "except: warnings.warn( 'CNTK backend warning: CNTK version not detected. '", "callable(alt) and isinstance(alt, C.cntk_py.Function) is False: alt = alt() if", "raise ValueError('CNTK Backend: Invalid dropout level %s, ' 'must be", "1 else: i = 0 while i < shape[1]: current", "please ' 'double check the keras shape history.' % (str(shape),", "xs.append(reshape(inputs[:, slice_length, :], (-1, 1, feature_dim))) x_aggregate = concatenate(xs, axis=1)", "C.times(x, y) def batch_dot(x, y, axes=None): x_shape = int_shape(x) y_shape", "1) else: shape = (bias.shape[2],) + bias.shape[:2] elif data_format ==", "C.greater(x, y) def greater_equal(x, y): return C.greater_equal(x, y) def less(x,", "padding]) return _postprocess_conv3d_output(x, data_format) def conv3d_transpose(x, kernel, output_shape, strides=(1, 1,", "return _remove_dims(output, axis, keepdims) def min(x, axis=None, keepdims=False): axis =", "return last_output, final_output, f_stats def has_seq_axis(x): return hasattr(x, 'dynamic_axes') and", "if alpha == 1: return res else: return C.element_select(C.greater(x, 0),", "dimension current = squeeze(current, time_axis) output, new_states = step_function( current,", "dilation_rate=1): raise NotImplementedError def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid',", "def not_equal(x, y): return C.not_equal(x, y) def greater(x, y): return", "'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv3d_input(x,", "in range(num_dynamic_axis - i)]) + shape new_shape = list(shape) new_shape", "axis = [axis] axis = _normalize_axis(axis, x) norm = C.sqrt(C.reduce_sum(C.square(x),", "keepdims is False and isinstance(axis, list): # sequence axis is", "x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) else: if", "the class probas of each sample sum to 1 output", "isinstance( x, C.variables.Constant): return x.value else: return eval(x) def batch_get_value(xs):", "condition should be less' ' than or equal to rank", "= pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x,", "C.convolution_transpose( kernel, x, strides, auto_padding=[ False, padding, padding, padding], output_shape=output_shape)", "== 'channels_last': output = repeat_elements(x, height_factor, axis=1) output = repeat_elements(output,", "C.InferredDimension cntk_shape = [dynamic_dimension if s is None else s", "None: _axis[i] = cntk_axis[_axis[i]] else: if _axis is None: _axis", "2)) permutation += [len(y_shape) - 1] y = C.transpose(y, perm=permutation)", "[0 for _ in cntk_axes] end_index = [0 for _", "placeholder with ' '%d dimension is not supported, at least", "nones: cntk_axis[i] = x.dynamic_axes[dynamic_axis_index] i += 1 dynamic_axis_index += 1", "x def _preprocess_conv3d_input(x, data_format): if data_format == 'channels_last': # TF", "pool_size x = _preprocess_conv2d_input(x, data_format) if pool_mode == 'max': x", "seed=None): for _ in shape: if _ is None: raise", "states = tuple(initial_states) outputs = [] time_axis = 1 -", "cntk broadcast feature # to make the recurrent layer work.", "padding=((1, 1), (1, 1)), data_format=None): assert len(padding) == 2 assert", "x def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):", "default return prefix + '/' + name def constant(value, dtype=None,", "sum to 1 output /= C.reduce_sum(output, axis=-1) # avoid numerical", "pool_size = pool_size x = _preprocess_conv2d_input(x, data_format) if pool_mode ==", "x = x() if callable(alt) and isinstance(alt, C.cntk_py.Function) is False:", "states, mask) last_output = C.sequence.last(final_output) last_states = [C.sequence.last(s) for s", "not in shape, # so just flatten all the dim", "scale=scale, seed=seed), dtype=dtype, name=name) def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):", "type(x)) def placeholder( shape=None, ndim=None, dtype=None, sparse=False, name=None, dynamic_axis_num=1): if", "output, from_logits=False): if from_logits: output = C.sigmoid(output) output = C.clip(output,", "== 'channels_last': shape = list(output_shape) shape[0] = output_shape[2] shape[1] =", "+ '/' + default return prefix + '/' + name", "= C.sqrt(C.reduce_sum(C.square(x), axis=axis[0])) return x / norm def hard_sigmoid(x): x", "dimensions %d, ' 'expected 1 or %d dimensions' % (bias_dims,", "* res) def in_top_k(predictions, targets, k): _targets = C.one_hot(targets, predictions.shape[-1])", "shape = int_shape(x) num_dynamic_axis = _get_dynamic_axis_num(x) # Padding the axis", "= floatx() if shape is None: shape = () np_value", "postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0,", "C.pooling( x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding]) elif pool_mode == 'avg':", "is not supported. Please give fixed ' 'dimension to enable", "elif len(outputs) > 2: self.metrics_outputs = [f.output for f in", "with dynamic learning phase tensor. _LEARNING_PHASE = -1 _UID_PREFIXES =", "momentum)) def update_add(x, increment): result = x + increment return", "tuple(n) shape = int_shape(x) num_dynamic_axis = _get_dynamic_axis_num(x) # Padding the", "== 0: return None axis = [axis] axis = _normalize_axis(axis,", "x.shape): warnings.warn( 'Warning: CNTK backend does not support ' 'collapse", "for argument in self.metrics_func.arguments: if argument in feed_dict: input_dict[argument] =", "zeros_like(reduce_result)) if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0: return", "if isinstance(a, C.Axis): has_seq = True break if has_seq: nones", "() np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype,", "x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape if", "to float to run the model return np.float32 def _convert_dtype_string(dtype):", "def classification_error(target, output, axis=-1): return C.ops.reduce_mean( C.equal( argmax( output, axis=-1),", "3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]),", "repeat(x, n): # this is a workaround for recurrent layer", "+ depthwise_kernel.shape[2:]) pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format) padding = _preprocess_border_mode(padding) if", "len(y_shape) == 2: y = expand_dims(y) normalized_axis = [] normalized_axis.append(_normalize_axis(axes[0],", "p = C.parameter( shape, init=C.initializer.uniform( scale, seed=seed), dtype=dtype, name=name) return", "0: u_ops.append(u) else: unrelated_updates.append(u) update_func = C.combine([u.output for u in", "C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), [0, 0]]) else: x =", "for _ in axis] if shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension)", "is not supported. Please provide ' 'fixed dimension instead of", "( outputs[0], ) self.trainer = C.trainer.Trainer( outputs[0], criterion, [learner]) self.trainer_output", "None: input_dict = {} for argument in self.loss.arguments: if argument", "g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return", "each sample sum to 1 output /= C.reduce_sum(output, axis=-1) #", "range(rep): slices.append(tmp) i += 1 return C.splice(*slices, axis=axis) def repeat(x,", "dilation_rate[1]: raise ValueError('CNTK Backend: non-square dilation_rate is ' 'not supported.')", "this check. if (self.unrelated_updates is None and (_LEARNING_PHASE_PLACEHOLDER.value == 1.0", "= zeros_like(x) else: beta = zeros_like(gamma) mean, variant = _moments(x,", "prefix + '/' + name def constant(value, dtype=None, shape=None, name=None):", "None: if gamma is None: beta = zeros_like(x) else: beta", "uses_learning_phase return last_output, final_output, states def rnn(step_function, inputs, initial_states, go_backwards=False,", "__init__(self, input, name='convert_to_batch'): super(ConvertToBatch, self).__init__([input], as_numpy=False, name=name) def infer_outputs(self): batch_axis", "hasattr(C, 'pad'): x = C.pad(x, pattern=[padding, (0, 0)]) else: x", "in interval [0, 1].' % level) return C.dropout(x, level) def", "list) is False: variables = [variables] grads = [] for", "is not detected. ' 'CNTK\\'s CPU version is not fully", "_axes: variance_mean = C.reduce_mean(variance_mean, axis=axis) variance = C.minus(variance_mean, C.square(shifted_mean)) mean", "as default.') return float(2.0) class ReshapeBatch(C.ops.functions.UserFunction): def __init__(self, input, shape,", "or name == '': return prefix + '/' + default", "p in zip(new_states, place_holders): n_s.append(o.replace_placeholders({p: o.output})) if len(n_s) > 0:", "learning phase _LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase') # static", "1].' % level) return C.dropout(x, level) def batch_flatten(x): # cntk's", "else: assert dilation_rate[0] == dilation_rate[1] assert strides == (1, 1),", "if bias_dims == 1: shape = (bias.shape[0], 1, 1) else:", "if g in grad_parameter_dict: p_list.append(grad_parameter_dict[g]) u_list.append(g) else: raise ValueError( 'CNTK", "x)[0]) normalized_axis.append(_normalize_axis(axes[1], y)[0]) # transpose i = normalized_axis[0] while i", "list(int_shape(x)) nones = _get_dynamic_axis_num(x) index = axis if axis >=", "output) return output def get_variable_shape(x): return int_shape(x) def update(x, new_x):", "== dims - 1: mask = expand_dims(mask) nones = _get_dynamic_axis_num(inputs)", "name=None): if dtype is None: dtype = floatx() return variable(np.eye(size),", "output_slice = expand_dims(outputs[i], 1) final_output = C.splice(final_output, output_slice, axis=time_axis) last_output", "{} for argument in self.loss.arguments: if argument in feed_dict: input_dict[argument]", "C.cntk_py.Value(result) def backward(self, state, root_gradients): grad_array_view = root_gradients.data() num_element =", "constants=None, unroll=False, input_length=None): shape = int_shape(inputs) dims = len(shape) uses_learning_phase", "or isinstance(training, bool): result = x if training == 1", "= result[1] for o in self.trainer_output: updated.append(outputs[o]) if self.metrics_func is", "a cntk tensor which has batch axis batch_size: size of", "data_format == 'channels_last': shape = list(output_shape) shape[0] = output_shape[2] shape[1]", "padding]) if data_format == 'channels_last': x = C.swapaxes(x, 0, 1)", "if isinstance(value, C.cntk_py.Function): value = eval(value) shape = value.shape if", "now if seed is None: # ensure that randomness is", "int32, int64 # https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter dtype = 'float32' if 'int' in", "def backward(self, state, root_gradients): return C.cntk_py.Value(root_gradients.data()) class ConvertToStatic(C.ops.functions.UserFunction): \"\"\"Converts input", "(1. - momentum)) def update_add(x, increment): result = x +", "if gamma is None: gamma = ones_like(var) elif ndim(gamma) ==", "'has shape `%s`, the second axis ' 'is not static.", "if constants is None: constants = [] if mask is", "return 'float64' else: raise ValueError('CNTK Backend: Unsupported dtype: %s. '", "p=0.0, dtype=None, seed=None): # use numpy workaround now if seed", "dimensions are needed.' % (len(cntk_shape, dynamic_axis_num))) if name is None:", "len(shape): if shape[i] is None or shape[i] == -1: i", "+ (strides[0],) x = C.convolution( kernel, x, strides, auto_padding=[ False,", "output def reverse(x, axes): if isinstance(axes, int): axes = [axes]", "return x._keras_shape shape = x.shape if hasattr(x, 'dynamic_axes'): dynamic_shape =", "return C.assign(variable, variable * momentum + value * (1. -", "g in grads: if g in grad_parameter_dict: p_list.append(grad_parameter_dict[g]) u_list.append(g) else:", "= np.asarray(value) else: # in current version cntk can't support", "raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is", "x def _padding(x, pattern, axis): base_shape = x.shape if b_any([dim", "'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) stride = strides[0]", "xs = [] for i in range(output_length): slice_length = slice(i", "3: if data_format == 'channels_first': if bias_dims == 1: shape", "= reference.shape[0] one_hot_matrix = C.ops.one_hot(indices, num_classes) return C.times(one_hot_matrix, reference, output_rank=len(reference.shape)", "data_format) def pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max'): if", "NotImplementedError else: u = C.assign(update[0], update[1]) else: u = update", "shape.count(C.FreeDimension) > 1: result = x for index in sorted(_axis,", "C.InferredDimension: dims -= 1 bias_dims = len(bias.shape) if bias_dims !=", "if hasattr(C, 'unpack_batch') and _get_cntk_version() >= 2.2: const_a = C.unpack_batch(x)", "= arguments.shape()[0] * np.prod(np.asarray(self.from_shape)) num_static_element = np.prod(np.asarray(self.target_shape)) num_batch = int(num_element", "if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]])", "arguments.shape()[0] * np.prod(np.asarray(self.from_shape)) num_static_element = np.prod(np.asarray(self.target_shape)) num_batch = int(num_element /", "result._uses_learning_phase = uses_learning_phase return result def in_test_phase(x, alt, training=None): return", "repeat_elements(output, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output", "rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape =", "if shape[i] is None and dynamic_axis_index < nones: cntk_axis.append(x.dynamic_axes[dynamic_axis_index]) dynamic_axis_index", "= zeros_like(mean) elif ndim(beta) == ndim(x) and shape(beta)[0] == 1:", "axis=None, keepdims=False): reduce_result = prod(x, axis, keepdims=keepdims) all_matrix = C.element_select(", "strides=(1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is None: data_format", "x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding]) elif pool_mode == 'avg': x", "x = getattr(C, reduce_fun_name)(x, axis) return x def _reshape_sequence(x, time_step):", "keepdims=False): if keepdims is False and isinstance(axis, list): # sequence", "= ones_like(var) elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1:", "sparse=False, name=None, dynamic_axis_num=1): if dtype is None: dtype = floatx()", "alt() if training is True: x._uses_learning_phase = uses_learning_phase return x", "else: if num_dynamic_axis > 0: assert len(base_shape) == 4 if", "dtype=dtype, name=name) def ones(shape, dtype=None, name=None): if dtype is None:", "< len(shape): if shape[i] is None or shape[i] == -1:", "if isinstance( x, C.variables.Parameter) or isinstance( x, C.variables.Constant): return x.value", "in {0, 1}: raise ValueError('CNTK Backend: Set learning phase '", "= [axis] axis = _normalize_axis(axis, tensors[0]) return C.splice(*tensors, axis=axis[0]) def", "(3, 0, 1, 2)) return x def _preprocess_conv3d_kernel(kernel, dim_ordering): kernel", "shape) else: num_dynamic_axis = _get_dynamic_axis_num(x) if num_dynamic_axis == 1 and", "name is None: name = '' scale = (high -", "y) def batch_dot(x, y, axes=None): x_shape = int_shape(x) y_shape =", "x: A candidate placeholder. # Returns Boolean. \"\"\" return hasattr(x,", "second axis ' 'is not static. If you want to", "axis] if shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension) > 1: result", "x.shape if num_dynamic_axis > 0: assert len(base_shape) == 2 if", "has_seq: nones = _get_dynamic_axis_num(x) x = expand_dims(x, nones) return x", "_ == C.FreeDimension for _ in x.shape): warnings.warn( 'Warning: CNTK", "def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): if dtype is", "== C.InferredDimension or _ == C.FreeDimension: raise ValueError('CNTK backend: `count_params`", "if dtype is None: dtype = floatx() return variable(np.eye(size), dtype,", "with non-static axis, please try ' 'dynamic rnn with sequence", "learning phase ' 'with value %s is not supported, '", "cntk know we want to evaluate them.from # But the", "pattern=[[0, 0], list(padding[0]), list(padding[1]), [0, 0]]) else: x = _padding(x,", "gamma is None: beta = zeros_like(x) else: beta = zeros_like(gamma)", "[] shape = x.shape i = 0 while i <", "def random_binomial(shape, p=0.0, dtype=None, seed=None): # use numpy workaround now", "cntk only support float32 and float64 if dtype == 'float32':", "return C.assign(x, result) def gradients(loss, variables): # cntk does not", "# we don't support init parameter with symbolic op, so", "(list, tuple)) feed_dict = {} for tensor, value in zip(self.placeholders,", "return map(C.stop_gradient, variables) else: return C.stop_gradient(variables) def switch(condition, then_expression, else_expression):", "% x.name return C.alias(x, name=name) def _preprocess_conv2d_input(x, data_format): if data_format", "== C.FreeDimension else _ for _ in new_shape]) return C.reshape(x,", "C.times(x, y, len(y_shape) - 1) else: return C.times(x, y) def", "return all_matrix def classification_error(target, output, axis=-1): return C.ops.reduce_mean( C.equal( argmax(", "output, from_logits) class Function(object): def __init__(self, inputs, outputs, updates=[], **kwargs):", "release. # Will update with gather op in next release", "C.clip(x, 0.0, 1.0) return x def conv1d(x, kernel, strides=1, padding='valid',", "axis with 1, it is not needed # in cntk,", "= permute_dimensions(kernel, (2, 0, 1)) # shape: batch, filters, output_length,", "dtype: %s. ' 'CNTK only supports float32 and ' 'float64.'", "break if has_seq: nones = _get_dynamic_axis_num(x) x = expand_dims(x, nones)", "x = _preprocess_conv3d_input(x, data_format) kernel = _preprocess_conv3d_kernel(kernel, data_format) padding =", "1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is None: data_format", "so just flatten all the dim in x.shape dim =", "_postprocess_conv2d_output(x, data_format) def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None,", "1)): if data_format is None: data_format = image_data_format() if data_format", "self.execute = execute super(LambdaFunc, self).__init__([arg], name=name) def infer_outputs(self): return [", "up with keras model, # we will create gradient as", "return output elif data_format == 'channels_last': output = repeat_elements(x, height_factor,", "first axis to CNTK static axis. We may introduce this", "to be applied to the variable after an optimizer update.", "' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel,", "C.ops.argmax(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def argmin(x, axis=-1): axis =", "nones: result._keras_shape = shape return result def squeeze(x, axis): if", "0: if hasattr(C, 'to_batch'): initial.append(C.to_batch(s)) else: initial.append(C.user_function(ConvertToBatch(s))) else: initial.append(s) need_convert", "Backend: Unsupported dtype: %s. ' 'CNTK only supports float32 and", "0), res, alpha * res) def in_top_k(predictions, targets, k): _targets", "2 and len(y_shape) == 2: if axes[0] == axes[1]: result", "input shape: (samples, input_depth, rows, cols) # TF input shape:", "int): _axis = [axis] elif isinstance(axis, list): _axis = list(axis)", "for hot fix, ignore all the . except the first", "filters, output_length, input_length * kernel_size) output = x_aggregate * weight", "new_states): return_states.append( C.ops.element_select( mask_slice, n_s, s)) new_states = return_states outputs.append(output)", "def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if seed is None:", "self.unrelated_updates is not None: input_dict = {} for argument in", "ValueError('CNTK Backend: non-square dilation_rate is ' 'not supported.') if strides", "in next release if _get_cntk_version() >= 2.2: return C.ops.gather(reference, indices)", "Backend: padding input tensor with ' 'shape `%s` contains non-specified", "3D.') # if the second axis is static axis, CNTK", "= _normalize_axis(axis, x) return C.transpose(x, axis) def resize_images(x, height_factor, width_factor,", "# There is a bug in cntk gather op which", "if isinstance(output_values, dict): for o in self.metrics_outputs: value = output_values[o]", "updated.append(v) else: v = output_values.asarray() for o in self.metrics_outputs: updated.append(v)", "= list(axis) else: _axis = axis if isinstance(_axis, list): for", "self.metrics_func.outputs, (self.metrics_func.outputs[0],), as_numpy=False) else: output_values = self.metrics_func.eval(input_dict, as_numpy=False) if isinstance(output_values,", "second axis is static axis, CNTK will do unroll by", "variant = _moments(x, _normalize_axis(reduction_axes, x)) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normalized", "= False return const def random_binomial(shape, p=0.0, dtype=None, seed=None): #", "= shape x._uses_learning_phase = False x._cntk_placeholder = True return x", "axis if len(n) < len(shape): n = tuple([1 for _", "C.ops.argmin(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def square(x): return C.square(x) def", "= _reduce_on_axis(x, axis, 'reduce_sum') return _remove_dims(output, axis, keepdims) def prod(x,", "None: dtype = floatx() if shape is None: shape =", "learner = C.cntk_py.universal_learner(p_list, u_list, update_func) criterion = ( outputs[0], outputs[1])", "return [ C.output_variable( self.target_shape, self.inputs[0].dtype, [batch_axis])] def forward(self, arguments, device=None,", "them.from # But the assign ops won't be executed under", "gradient place holder u_ops = [] unrelated_updates = [] for", "_ in sorted(_axis, reverse=True): del shape[_] new_shape = shape[nones:] new_shape", ">= num_dynamic_axis and shape[i] is not None: tmp = [x]", "= x() if callable(alt) and isinstance(alt, C.cntk_py.Function) is False: alt", "C.reshape(variant, target_shape) broadcast_gamma = C.reshape(gamma, target_shape) broadcast_beta = C.reshape(beta, target_shape)", "!= 2: raise NotImplementedError else: u = C.assign(update[0], update[1]) else:", "_remove_dims(output, axis, keepdims) def sum(x, axis=None, keepdims=False): axis = _normalize_axis(axis,", "# we can't figure out how to repeat it in", "0 while dynamic_axis_index < nones: cntk_axis[i] = x.dynamic_axes[dynamic_axis_index] i +=", "keepdims) def logsumexp(x, axis=None, keepdims=False): return log(sum(exp(x), axis=axis, keepdims=keepdims)) def", "= C.plus(shifted_mean, shift) if not keep_dims: mean = squeeze(mean, _axes)", "result = x + increment return C.assign(x, result) def gradients(loss,", "outputs] self.metrics_func = C.combine(self.metrics_outputs) # cntk only could handle loss", "uses_learning_phase = True if mask is not None: mask_slice =", "if _ == C.FreeDimension else _ for _ in shape]", "in updates: if isinstance(update, tuple): if len(update) != 2: raise", "axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_sum') return", "if num_dynamic_axis > 0: assert len(base_shape) == 2 if hasattr(C,", "batch_size=i_s.shape[0]))) else: f_stats.append(l_s) last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, f_stats", "= prod(x, axis, keepdims=keepdims) all_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result))", "is not supported, ' 'expected 0 or 1.' % value)", "self.trainer = None self.unrelated_updates = None self.updates = updates if", "nones i += 1 if isinstance(axis, tuple): _axis = list(axis)", "broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normalized, mean, variant def _moments(x,", "outputs[0], criterion, [learner]) self.trainer_output = tuple([f.output for f in criterion])", "self.inputs[0].dtype, [batch_axis])] def forward(self, arguments, device=None, outputs_to_retain=None): num_element = arguments.shape()[0]", "C.transpose(x, (1, 2, 0)) return x def _preprocess_conv3d_input(x, data_format): if", "Please give fixed ' 'dimension to enable padding.' % base_shape)", "grad_parameter_dict: p_list.append(grad_parameter_dict[g]) u_list.append(g) else: raise ValueError( 'CNTK backend: when constructing", "remove those dummy axis. if ndim(mean) == ndim(x) and shape(mean)[0]", "cntk_axes] end_index = [0 for _ in cntk_axes] strides =", "* stride + kernel_size[0]) xs.append(reshape(inputs[:, slice_length, :], (-1, 1, feature_dim)))", "**kwargs): return Function(inputs, outputs, updates=updates, **kwargs) def temporal_padding(x, padding=(1, 1)):", "# transpose kernel to put filters first weight = permute_dimensions(kernel,", "string for the tensor. constraint: Optional projection function to be", "= t[1] if isinstance(value, np.ndarray) is False: value = np.asarray(value)", "return categorical_crossentropy(target, output, from_logits) class Function(object): def __init__(self, inputs, outputs,", "for _ in unrelated_updates]) if self.trainer is None: self.metrics_outputs =", "if len(u.arguments) == 0: u_ops.append(u) else: unrelated_updates.append(u) update_func = C.combine([u.output", "return C.one_hot(indices, num_classes) def get_value(x): if isinstance( x, C.variables.Parameter) or", "return _axis def _reshape_dummy_dim(x, axis): shape = list(x.shape) _axis =", "bias_dims == 1: shape = (bias.shape[0], 1) else: shape =", "+ version[2:].replace('.', '') try: return float(version) except: warnings.warn( 'CNTK backend", "tuple( [C.InferredDimension if _ is None else _ for _", "if ndim: shape = tuple([None for _ in range(ndim)]) dynamic_dimension", "return x def _normalize_axis(axis, x): shape = int_shape(x) ndim =", "hasattr(x, 'dynamic_axes'): dynamic_shape = [None for a in x.dynamic_axes] shape", "give fixed ' 'dimension to enable padding.' % base_shape) if", "def _reshape_batch(x, shape): # there is a bug in cntk", "= (1,) + strides x = C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False,", "data_format) def identity(x, name=None): if name is None: name =", "scale) def random_normal_variable( shape, mean, scale, dtype=None, name=None, seed=None): if", "data_format == 'channels_last': if bias_dims == 1: shape = (1,", "figure out how to repeat it in cntk now #", "'reduce_prod') return _remove_dims(output, axis, keepdims) def logsumexp(x, axis=None, keepdims=False): return", "return C.sin(x) def cos(x): return C.cos(x) def normalize_batch_in_training(x, gamma, beta,", "padding='valid', data_format=None, dilation_rate=(1, 1, 1)): if data_format is None: data_format", "or len(y.shape) > 2: y_shape = int_shape(y) if len(y_shape) >", "= [dynamic_dimension if s is None else s for s", "# element_select approach as workaround. It may have # perf", "needed.' % (len(cntk_shape, dynamic_axis_num))) if name is None: name =", "new_shape = list(shape) new_shape = new_shape[num_dynamic_axis:] new_shape = [C.InferredDimension if", "not None: mask_shape = int_shape(mask) if len(mask_shape) == dims -", "beta, gamma, epsilon=1e-3): # The mean / var / beta", "\"\"\" def __init__(self, input, name='convert_to_batch'): super(ConvertToBatch, self).__init__([input], as_numpy=False, name=name) def", "'channels_last': output = repeat_elements(x, depth_factor, axis=1) output = repeat_elements(output, height_factor,", "C.cos(x) def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): if gamma is", "not None and num_time_step is not C.FreeDimension: final_output = _reshape_sequence(final_output,", "op. if callable(x) and isinstance(x, C.cntk_py.Function) is False: x =", "Please ' 'pass inputs that have a static shape.' %", "seed=None): if dtype is None: dtype = floatx() for _", "0, no_mask_output=True) if num_time_step is not None and num_time_step is", "= C.parameter(shape=shape, init=value, dtype=dtype, name=_prepare_name(name, 'variable')) v._keras_shape = v.shape v._uses_learning_phase", "num_dynamic_axis > 0: assert len(base_shape) == 2 if hasattr(C, 'pad'):", "1 and len(shape) > 0 and shape[0] == -1: #", "1, bias.shape[0]) else: shape = bias.shape elif dims == 3:", "shift is None: shift = x # Compute true mean", "if len(n_s) > 0: new_output = n_s[0] return new_output, n_s", "pool_mode == 'max': x = C.pooling( x, C.MAX_POOLING, pool_size, strides,", "input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3) # TF input", "outputs = result[1] for o in self.trainer_output: updated.append(outputs[o]) if self.metrics_func", "slice_col, :], (-1, 1, feature_dim))) x_aggregate = concatenate(xs, axis=1) #", "isinstance(axis, list): has_seq = False for a in axis: if", "if not shape: if ndim: shape = tuple([None for _", "= np.float32 else: dtype = _convert_string_dtype(dtype) return C.parameter( shape, init=C.initializer.truncated_normal(", "_LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER def set_learning_phase(value): global _LEARNING_PHASE", "'_uses_learning_phase', False): global uses_learning_phase uses_learning_phase = True if m is", "C.plus(shifted_mean, shift) if not keep_dims: mean = squeeze(mean, _axes) variance", "def learning_phase(): # If _LEARNING_PHASE is not 0 or 1,", "if it is not 0 or 1, we will go", "1) if len(y_shape) == 2: result = squeeze(result, -1) return", "return result if axes[0] == 1 else transpose(result) else: return", "forward(self, arguments, device=None, outputs_to_retain=None): return None, C.cntk_py.Value(arguments.data()) def backward(self, state,", "> 1 else 1) if len(y_shape) == 2: result =", "x, C.variables.Constant): return x.value else: return eval(x) def batch_get_value(xs): result", "getattr(C.sequence, reduce_fun_name)(x, a) else: x = getattr(C, reduce_fun_name)(x, a) else:", "argument in self.loss.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument]", "is (batch, 1), while keras expect (batch, ) return C.reshape(result,", "while i < shape[1]: current = C.ops.slice(inputs, time_axis, i, i", "self.unrelated_updates = None self.updates = updates if len(updates) > 0:", "rows, cols) # TF input shape: (samples, rows, cols, input_depth)", "= _get_dynamic_axis_num(x) base_shape = x.shape if data_format == 'channels_first': if", "+ kernel_size[1]) if data_format == 'channels_first': xs.append(reshape(inputs[:, :, slice_row, slice_col],", "mode (learning_phase == 0). # LEARNING_PHASE_PLACEHOLDER is the placeholder for", "[axis] axis = _normalize_axis(axis, x) norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0])) return", "Invalid data_format:', data_format) def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): if", "weight # shape: batch, filters, output_length output = sum(output, axis=3)", "strides x = C.convolution( kernel, x, strides, auto_padding=[ False, padding,", "output, from_logits=False): if from_logits: result = C.cross_entropy_with_softmax(output, target) # cntk's", "@contextmanager def name_scope(name): global NAME_SCOPE_STACK NAME_SCOPE_STACK.append(name) yield NAME_SCOPE_STACK.pop() def get_uid(prefix=''):", "holder place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states] past_values =", "0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(any_matrix) else: return any_matrix", "in the model. ' 'Please double check how the gradient", "for _ in range(len(shape) - len(n))]) + n if len(n)", "shape def infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return [ C.output_variable( self.target_shape,", "tile(x, n): if isinstance(n, int): n = (n,) elif isinstance(n,", "/ beta / gamma may be processed by broadcast #", "holder u_ops = [] unrelated_updates = [] for update in", "= sum(output, axis=3) # Shape: (batch, output_length, filters) return permute_dimensions(output,", "alt = alt() if training is True: x._uses_learning_phase = uses_learning_phase", "in xs: if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): result.append(x.value) else:", "C.user_function(ReshapeBatch(x, shape[1:])) def _get_cntk_version(): version = C.__version__ if version.endswith('+'): version", "+ 'Provided: ' + str(axes)) if len(x_shape) == 2 and", "'valid' if data_format == 'channels_last': x = C.swapaxes(x, 0, 1)", "floatx() return variable(np.eye(size), dtype, name) def zeros_like(x, dtype=None, name=None): return", "return _reshape_dummy_dim(x, reduce_axes) else: if isinstance(axis, list): has_seq = False", "3) else: assert len(base_shape) == 5 if hasattr(C, 'pad'): x", "= C.clip(x, 0.0, 1.0) return x def conv1d(x, kernel, strides=1,", "_padding(x, padding[2], 3) else: assert len(base_shape) == 5 if hasattr(C,", "border mode: ' + str(padding)) return padding def _postprocess_conv2d_output(x, data_format):", "axes=None): x_shape = int_shape(x) y_shape = int_shape(y) if isinstance(axes, int):", "end_index = [0 for _ in cntk_axes] strides = [-1", "variance_mean = C.square(C.minus(x, shift)) for axis in _axes: variance_mean =", "tensor. _LEARNING_PHASE = -1 _UID_PREFIXES = defaultdict(int) # cntk doesn't", "dims == 3: if data_format == 'channels_first': if bias_dims ==", "float64 if dtype == 'float32': return np.float32 elif dtype ==", "return _postprocess_conv3d_output(x, data_format) def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1),", "raise NotImplementedError def print_tensor(x, message=''): return C.user_function( LambdaFunc(x, when=lambda x:", "if isinstance(axes, int): axes = [axes] cntk_axes = _normalize_axis(axes, x)", "sum(x, axis, keepdims=keepdims) any_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if", "a) else: x = getattr(C, reduce_fun_name)(x, a) else: x =", "else: # need broadcasting target_shape = [] x_shape = int_shape(x)", "at least 3D.') # if the second axis is static", "weight = permute_dimensions(kernel, (2, 0, 1)) # Shape: (batch, filters,", "= _padding(x, padding, 0) else: assert len(base_shape) == 3 if", "def _moments(x, axes=None, shift=None, keep_dims=False): _axes = tuple(axes) if shift", "input_dict = {} for argument in self.unrelated_updates.arguments: if argument in", "clip(x, min_value, max_value): if max_value is not None and max_value", "dilation_rate == (1, 1): strides = (1,) + strides x", "1: shape = (1, bias.shape[0]) else: shape = bias.shape else:", "fixed dimension ' 'instead of `None`.') return random_uniform_variable(shape, minval, maxval,", "self.trainer is not None: input_dict = {} for argument in", "def sign(x): return x / C.abs(x) def pow(x, a): return", "4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]),", "if dtype is None: dtype = floatx() if not shape:", "for dynamic learning phase _LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase')", "hasattr(C, 'pad'): x = C.pad(x, pattern=[(0, 0), padding, (0, 0)])", "x index = 1 - _get_dynamic_axis_num(x) if index < 0", "shift) if not keep_dims: mean = squeeze(mean, _axes) variance =", "return np.float32 elif dtype == 'float64': return np.float64 else: #", "num_time_step is None and not has_seq_axis(inputs): num_time_step = inputs.shape[0] initial", "the dim in x.shape dim = np.prod(x.shape) x = C.reshape(x,", "is None: dtype = floatx() if not shape: if ndim:", "assign ops won't be executed under this mode, that's why", "dimension current = squeeze(current, 1) output, new_states = step_function( current,", "np.asarray(value) if isinstance(x, C.variables.Parameter): x.value = value else: raise NotImplementedError", "= C.__version__ if version.endswith('+'): version = version[:-1] # for hot", "output = -target * C.log(output) - (1.0 - target) *", "= np.full(x.shape, value, dtype=floatx()) x.value = value else: raise NotImplementedError", "= (high - low) / 2 p = C.parameter( shape,", "= int_shape(y) if len(y_shape) > 2: permutation = [len(y_shape) -", "x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x", "else: f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0]))) else: f_stats.append(l_s) last_output._uses_learning_phase = uses_learning_phase return last_output,", "tuple([None for _ in range(ndim)]) dynamic_dimension = C.FreeDimension if _get_cntk_version()", "3 to run RNN.' % dims) if _get_dynamic_axis_num(inputs) == 0", "# If _LEARNING_PHASE is not 0 or 1, return dynamic", "ValueError('CNTK backend: creating placeholder with ' '%d dimension is not", "= squeeze(variance, _axes) return mean, variance def batch_normalization(x, mean, var,", "= _recurrence(rnn_inputs, states, mask) last_output = C.sequence.last(final_output) last_states = [C.sequence.last(s)", "pow(x, a): return C.pow(x, a) def clip(x, min_value, max_value): if", "= _preprocess_border_mode(padding) strides = (1,) + strides # cntk output_shape", "= [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states] past_values = [] for", "(batch_size,) + input.shape def infer_outputs(self): return [ C.output_variable( self.target_shape, self.inputs[0].dtype,", "provide fixed dimension ' 'instead of `None`.') return random_uniform_variable(shape, minval,", "argument.name) result = self.trainer.train_minibatch( input_dict, self.trainer_output) assert(len(result) == 2) outputs", "As of Keras 2.0.0, all kernels are normalized # on", "[i for i in range(dims)] else: current_layout = tuple([i for", "% level) return C.dropout(x, level) def batch_flatten(x): # cntk's batch", "value, dtype=floatx()) x.value = value else: raise NotImplementedError def print_tensor(x,", "> 0: assert len(base_shape) == 2 if hasattr(C, 'pad'): x", "ndim_expr = ndim(then_expression) if ndim_cond > ndim_expr: raise ValueError('Rank of", "for a in axis: if isinstance(a, C.Axis): has_seq = True", "else: raise ValueError( 'CNTK backend: when constructing trainer, ' 'found", "= expand_dims(condition) condition = tile(condition, shape_expr[ndim_cond + i]) return C.element_select(condition,", "0: y = C.swapaxes(y, i, i - 1) i -=", "the cntk learner will apply # the gradient during training.", "be executed under this mode, that's why # we need", "= zeros_like(gamma) mean, variant = _moments(x, _normalize_axis(reduction_axes, x)) if sorted(reduction_axes)", "else: shape = (bias.shape[3],) + bias.shape[:3] elif data_format == 'channels_last':", "NAME_SCOPE_STACK.append(name) yield NAME_SCOPE_STACK.pop() def get_uid(prefix=''): _UID_PREFIXES[prefix] += 1 return _UID_PREFIXES[prefix]", "= [C.sequence.last(s) for s in final_states] if need_convert: final_output =", "== 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]),", "(1, 1), 'Invalid strides for dilated convolution' x = C.convolution(", "axis=2) return output else: raise ValueError('CNTK Backend: Invalid data_format:', data_format)", "does not support gradients as symbolic op, # to hook", "hasattr(value, 'dtype') and value.dtype != dtype and len(shape) > 0:", "= execute super(LambdaFunc, self).__init__([arg], name=name) def infer_outputs(self): return [ C.output_variable(", "= C.convolution( kernel, x, strides, auto_padding=[ False, padding, padding]) else:", "native cntk op cntk_axis = [] dynamic_axis_index = 0 for", "is C.InferredDimension or n is C.FreeDimension: return x index =", "in shape: if _ is None: raise ValueError('CNTK Backend: randomness", "!= (1, 1): raise ValueError('Invalid strides for dilated convolution') x", "0) x = _padding(x, padding[1], 1) x = _padding(x, padding[2],", "_padding(x, padding[1], 2) x = _padding(x, padding[2], 3) else: assert", "C.reshape(x, new_shape) def tile(x, n): if isinstance(n, int): n =", "conditioned by the Numpy RNG seed = np.random.randint(10e7) if dtype", "kernel_size, strides, output_shape, data_format=None): if data_format is None: data_format =", "num_dynamic_axis for i, rep in enumerate(n): if i >= num_dynamic_axis", "num_dynamic_axis = _get_dynamic_axis_num(x) # Padding the axis if len(n) <", "in # either train mode (learning_phase == 1) or test", "0, 2) padding = _preprocess_border_mode(padding) strides = [strides] x =", "_UID_PREFIXES[prefix] def learning_phase(): # If _LEARNING_PHASE is not 0 or", "is False: value = np.asarray(value) if isinstance(x, C.variables.Parameter): x.value =", "`eval` method on ' '`%s` type is not supported. '", "last_output = outputs[i] i += 1 last_output._uses_learning_phase = uses_learning_phase return", "len(outputs) > 2: self.metrics_outputs = [f.output for f in outputs[2:]]", "updated = [] if self.trainer is not None: input_dict =", "should be at least 3D.') # if the second axis", "'channels_first': xs.append(reshape(inputs[:, :, slice_row, slice_col], (-1, 1, feature_dim))) else: xs.append(reshape(inputs[:,", "as default axes = [len(x_shape) - 1, len(y_shape) - 2]", "[axis] axis = _normalize_axis(axis, x) output = C.ops.argmax(x, axis=axis[0]) return", "constants, unroll, input_length) if constants is None: constants = []", "'valid': padding = False else: raise ValueError('Invalid border mode: '", "def forward(self, arguments, device=None, outputs_to_retain=None): num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape))", "= int(num_element / num_static_element) result = arguments.data().as_shape((num_batch,) + self.target_shape) return", "bool / int return x def dot(x, y): if len(x.shape)", "(str(tensor.shape), str(value.shape))) feed_dict[tensor] = value updated = [] if self.trainer", "' 'to shape `%s`, but input shape is `%s`. Currently", "_axes) return mean, variance def batch_normalization(x, mean, var, beta, gamma,", "pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis)", "padding]) x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) return", "if ndim_cond > ndim_expr: raise ValueError('Rank of condition should be", "fix but not catched in CNTK 2.1 release. # Will", "n_s, s)) new_states = return_states outputs.append(output) states = new_states i", "= C.reduce_mean(shifted_mean, axis=axis) variance_mean = C.square(C.minus(x, shift)) for axis in", "output = C.ops.argmax(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def argmin(x, axis=-1):", "!= 0.: negative_part = C.relu(-x) x = C.relu(x) if max_value", "= num_dynamic_axis for i, rep in enumerate(n): if i >=", "raise ValueError('Unexpected bias dimensions %d, ' 'expected 1 or %d", "is not needed # in cntk, need to remove those", "n = (n,) elif isinstance(n, list): n = tuple(n) shape", "if isinstance(axis, list): has_seq = False for a in axis:", "= value else: raise NotImplementedError def print_tensor(x, message=''): return C.user_function(", "infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return [ C.output_variable( self.target_shape, self.inputs[0].dtype, [batch_axis])]", "def is_keras_tensor(x): if not is_tensor(x): raise ValueError('Unexpectedly found an instance", "+= [len(y_shape) - 1] y = C.transpose(y, perm=permutation) return C.times(x,", "x = C.convolution( kernel, x, strides=tuple(strides), auto_padding=[ False, padding]) if", "to apply mean and stddev return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed)", "updated.append(outputs[o]) if self.metrics_func is not None: input_dict = {} for", "for _ in axis: if isinstance(_, int): _axis.append(_ if _", "'Will using CNTK 2.0 GA as default.') return float(2.0) class", "== 2 assert len(padding[1]) == 2 if data_format is None:", ">= 2.2: const_a = C.unpack_batch(x) const_a = C.reshape(const_a, shape) return", "pool_mode == 'avg': x = C.pooling( x, C.AVG_POOLING, pool_size, strides,", "in axis] if shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension) > 1:", "prev_output = outputs[-1] output = C.ops.element_select(mask_slice, output, prev_output) return_states =", "get_uid(prefix=''): _UID_PREFIXES[prefix] += 1 return _UID_PREFIXES[prefix] def learning_phase(): # If", "Currently ' 'CNTK can not take variable length inputs. Please", "repeat_elements(output, width_factor, axis=3) return output elif data_format == 'channels_last': output", "shape[2] = output_shape[1] output_shape = tuple(shape) x = C.convolution_transpose( kernel,", "= int_shape(mask) if len(mask_shape) == dims - 1: mask =", "= floatx() if not shape: if ndim: shape = tuple([None", "-= 1 bias_dims = len(bias.shape) if bias_dims != 1 and", "indices) else: num_classes = reference.shape[0] one_hot_matrix = C.ops.one_hot(indices, num_classes) return", "[0 for _ in cntk_axes] strides = [-1 for _", "for _ in x.shape): warnings.warn( 'Warning: CNTK backend does not", "# LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase _LEARNING_PHASE_PLACEHOLDER", "'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else:", "if version.endswith('+'): version = version[:-1] # for hot fix, ignore", "= x.shape i = 0 while i < shape[axis]: tmp", "axis) def argmin(x, axis=-1): axis = [axis] axis = _normalize_axis(axis,", "dtype, name) def zeros_like(x, dtype=None, name=None): return x * 0", "need to remove those dummy axis. if ndim(mean) == ndim(x)", "def less_equal(x, y): return C.less_equal(x, y) def maximum(x, y): return", "i, i + 1) mask_slice = squeeze(mask_slice, 1) if len(outputs)", "range(len(x.shape)): if shape[i + num_dynamic] is None: non_dyn_shape.append(x.shape[i]) else: non_dyn_shape.append(shape[i", "2) x = _padding(x, padding[1], 3) x = _padding(x, padding[2],", "if dtype == 'float32': return np.float32 elif dtype == 'float64':", "axis. if ndim(mean) == ndim(x) and shape(mean)[0] == 1: mean", "'shape'): num_dynamic = get_num_dynamic_axis(placeholder) input_shape = input.shape[num_dynamic:] placeholder_shape = placeholder.shape", "for update in updates: if isinstance(update, tuple): if len(update) !=", "ndim(gamma) > axis: gamma = C.reduce_mean(gamma, axis - 1) beta", "dot(x, y): if len(x.shape) > 2 or len(y.shape) > 2:", "= C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) x =", "'pad'): x = C.pad(x, pattern=[padding, (0, 0)]) else: x =", "int), ' + 'Provided: ' + str(axes)) if len(x_shape) ==", "len(updates) > 0: assert len(outputs) > 0 self.loss = outputs[0]", "i > 0: y = C.swapaxes(y, i, i - 1)", "support gradients as symbolic op, # to hook up with", "- epsilon()) return -sum(target * C.log(output), axis=-1) def sparse_categorical_crossentropy(target, output,", "+ n if len(n) != len(shape): raise NotImplementedError i =", "weight = permute_dimensions(kernel, (2, 0, 1)) # shape: batch, filters,", "slices.append(tmp) i += 1 return C.splice(*slices, axis=axis) def repeat(x, n):", "momentum): return C.assign(variable, variable * momentum + value * (1.", "= True elif padding == 'valid': padding = False else:", "auto_padding=[ False, padding, padding]) return _postprocess_conv2d_output(x, data_format) def separable_conv1d(x, depthwise_kernel,", "output_shape, data_format=None): if data_format is None: data_format = image_data_format() if", "'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x,", "= value.value # we don't support init parameter with symbolic", "= C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase') # static learning phase flag,", "ndim=None, dtype=None, sparse=False, name=None, dynamic_axis_num=1): if dtype is None: dtype", "variable(value=np.zeros(shape, ctype), dtype=dtype, name=name) def ones(shape, dtype=None, name=None): if dtype", "= list(axis) if not isinstance(axis, list): axis = [axis] shape", "var = _reshape_dummy_dim(var, [0]) if gamma is None: gamma =", "1 last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, states def rnn(step_function,", "[len(x_shape) - 1, len(y_shape) - 2] if b_any([isinstance(a, (list, tuple))", "with keras shape: `%s` has ' '%d cntk dynamic axis,", "= x # Compute true mean while keeping the dims", "cols)`. kernel = C.transpose(kernel, (3, 2, 0, 1)) return kernel", "grads.append(g) grad_parameter_dict[g] = v return grads def equal(x, y): return", "= _convert_string_dtype(dtype) if name is None: name = '' scale", "in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x", "elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter): return x.value else: raise", "len(update) != 2: raise NotImplementedError else: u = C.assign(update[0], update[1])", "def is_sparse(tensor): return tensor.is_sparse def int_shape(x): if hasattr(x, '_keras_shape'): return", "_ binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape) return variable(value=binomial, dtype=dtype) def", "= output_shape[2] output_shape = tuple(shape) x = C.convolution_transpose( kernel, x,", "depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is", "< nones: result._keras_shape = shape return result def squeeze(x, axis):", "False, padding, padding], output_shape=output_shape) return _postprocess_conv2d_output(x, data_format) def identity(x, name=None):", "input, batch_size, name='convert_to_static'): super(ConvertToStatic, self).__init__([input], as_numpy=False, name=name) self.target_shape = (batch_size,)", "value %s is not supported, ' 'expected 0 or 1.'", "= C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) else: if dilation_rate[0]", "in cntk_axes] return C.slice(x, cntk_axes, begin_index, end_index, strides) def _reshape_batch(x,", "else _LEARNING_PHASE_PLACEHOLDER def set_learning_phase(value): global _LEARNING_PHASE if value not in", "C.combine(self.metrics_outputs) # cntk only could handle loss and 1 metric", "(0, 2, 1)) def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None):", "n = tuple(n) shape = int_shape(x) num_dynamic_axis = _get_dynamic_axis_num(x) #", "= [C.InferredDimension if _ == C.FreeDimension else _ for _", "i < len(cntk_axis): cntk_axis[i] -= nones i += 1 if", "prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape),", "return x def max(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x)", "is None: # behaves like tf.batch_matmul as default axes =", "# Some ops (like dropout) won't be applied during \"eval\"", "strides[0] kernel_shape = int_shape(kernel) output_length, feature_dim, filters = kernel_shape xs", "' 'dynamic rnn with sequence axis.' % shape) if constants", "argument %s ' 'is not found in inputs. Please double", "return C.ops.gather(reference, indices) else: num_classes = reference.shape[0] one_hot_matrix = C.ops.one_hot(indices,", "u_ops = [] unrelated_updates = [] for update in updates:", "len(padding[2]) == 2 if data_format is None: data_format = image_data_format()", "= _padding(x, padding[0], 0) x = _padding(x, padding[1], 1) x", "scale, dtype=None, name=None, seed=None): if dtype is None: dtype =", "cond op. if callable(x) and isinstance(x, C.cntk_py.Function) is False: x", "for recurrent layer # if n is inferred dimension, #", "i < len(outputs): # add the time_step axis back output_slice", "op, # to hook up with keras model # we", "go_backwards, mask, constants, unroll, input_length) if constants is None: constants", "bool): result = x if training == 1 or training", "placeholder, here use this global # map to keep the", "import division from __future__ import print_function import cntk as C", "level < 0. or level >= 1: raise ValueError('CNTK Backend:", "elif dims == 2: if data_format == 'channels_first': if bias_dims", "last_output, final_output, f_stats def has_seq_axis(x): return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes)", "_get_dynamic_axis_num(x) index = axis if axis >= 0 else len(shape)", "else: x = _padding(x, padding, 1) return x def _padding(x,", "in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) stride_row,", "= list(int_shape(x)) num_dynamic = _get_dynamic_axis_num(x) non_dyn_shape = [] for i", "and len(y_shape) == 2: if axes[0] == axes[1]: result =", "== 'channels_last': x = C.swapaxes(x, 0, 1) return x def", "if isinstance(axis, list): for a in axis: if isinstance(a, C.Axis)", "need manual eval elif len(outputs) > 2: self.metrics_outputs = [f.output", "if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError(", "dev = C.device.use_default_device() if dev.type() == 0: warnings.warn( 'CNTK backend", "return C.user_function( LambdaFunc(x, when=lambda x: True, execute=lambda x: print(message))) def", "_LEARNING_PHASE_PLACEHOLDER def set_learning_phase(value): global _LEARNING_PHASE if value not in {0,", "def one_hot(indices, num_classes): return C.one_hot(indices, num_classes) def get_value(x): if isinstance(", "the same x to take cntk broadcast feature # to", "= (bias.shape[2],) + bias.shape[:2] elif data_format == 'channels_last': if bias_dims", "% value) _LEARNING_PHASE = value def clear_session(): \"\"\"Reset learning phase", "= C.reshape(variant, target_shape) broadcast_gamma = C.reshape(gamma, target_shape) broadcast_beta = C.reshape(beta,", "= ndim(condition) ndim_expr = ndim(then_expression) if ndim_cond > ndim_expr: raise", "0 while i < shape[axis]: tmp = C.ops.slice(x, axis, i,", "shape=(), begin_axis=index, end_axis=index + 1) return result else: for index", "NAME_SCOPE_STACK NAME_SCOPE_STACK.append(name) yield NAME_SCOPE_STACK.pop() def get_uid(prefix=''): _UID_PREFIXES[prefix] += 1 return", "we don't support init parameter with symbolic op, so eval", "C.tanh(x) def _static_rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None):", "in range(1, ndim(x)): if axis in reduction_axes: target_shape.append(1) if ndim(gamma)", "x = C.pad(x, pattern=[(0, 0), padding, (0, 0)]) else: x", ">= len(shape): i = 0 while i < len(shape): if", "\\ and a != C.Axis.default_batch_axis() \\ and hasattr(C.sequence, reduce_fun_name): x", "self.execute(argument) return None, argument def backward(self, state, root_gradients): return root_gradients", "result else: for index in sorted(_axis, reverse=True): del shape[index] shape", "return v def bias_add(x, bias, data_format=None): if data_format is None:", "batch_axis = C.Axis.default_batch_axis() return [ C.output_variable( self.target_shape, self.inputs[0].dtype, [batch_axis])] def", "i in range(output_row): for j in range(output_col): slice_row = slice(i", "for f in outputs[2:]] self.metrics_func = C.combine(self.metrics_outputs) else: self.metrics_func =", "x # Compute true mean while keeping the dims for", "self.target_shape) return None, C.cntk_py.Value(result) def backward(self, state, root_gradients): grad_array_view =", "C.transpose(x, (1, 2, 3, 0)) return x def _get_dynamic_axis_num(x): if", "2.2: return C.ops.gather(reference, indices) else: num_classes = reference.shape[0] one_hot_matrix =", "None and a < 0: _axis[i] = (a % ndim)", "(with Keras metadata included). \"\"\" if dtype is None: dtype", "op with ' 'dynamic shape is not supported now. '", "new_x): return C.assign(x, new_x) def moving_average_update(variable, value, momentum): return C.assign(variable,", "isinstance(variables, (list, tuple)): return map(C.stop_gradient, variables) else: return C.stop_gradient(variables) def", "!= dilation_rate[1]: raise ValueError('CNTK Backend: non-square dilation_rate is ' 'not", "not fully optimized,' 'please run with GPU to get better", "_LEARNING_PHASE == 1)): _, output_values = self.metrics_func.forward( input_dict, self.metrics_func.outputs, (self.metrics_func.outputs[0],),", "is None: seed = np.random.randint(1, 10e6) if dtype is None:", "zip(states, place_holders): past_values.append(C.sequence.past_value(p, s)) new_output, new_states = step_function( x, tuple(past_values)", "kernel = C.transpose(kernel, (4, 3, 0, 1, 2)) return kernel", "Backend: randomness op with ' 'dynamic shape is not supported", "_postprocess_conv2d_output(x, data_format) def conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None,", "1) rnn_inputs = C.to_sequence(rnn_inputs) rnn_constants = [] for constant in", "pattern=[(0, 0), padding, (0, 0)]) else: x = _padding(x, padding,", "is a workaround for recurrent layer # if n is", "fixed ' 'dimension to enable padding.' % base_shape) if pattern[0]", "if b_any(_ == C.InferredDimension for _ in x.shape) or b_any(", "' 'must be in interval [0, 1].' % level) return", "dtype = _convert_string_dtype(dtype) if name is None: name = ''", "list(padding[1]), [0, 0]]) else: x = _padding(x, padding[0], 1) x", "super(ConvertToStatic, self).__init__([input], as_numpy=False, name=name) self.target_shape = (batch_size,) + input.shape def", "beta / gamma may be processed by broadcast # so", "!= dtype and len(shape) > 0: value = value.astype(dtype) #", "in cntk gather op which may cause crash. # We", "getattr(C, reduce_fun_name)(x, axis) return x def _reshape_sequence(x, time_step): tmp_shape =", "axis in range(1, ndim(x)): if axis in reduction_axes: target_shape.append(1) if", "try to cast to float to run the model return", "C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x", "[] unrelated_updates = [] for update in updates: if isinstance(update,", "C.reduce_mean(beta, axis - 1) else: target_shape.append(x_shape[axis]) broadcast_mean = C.reshape(mean, target_shape)", "filters output = permute_dimensions(output, (0, 2, 3, 1)) return output", "isinstance(value, C.cntk_py.Function): value = eval(value) shape = value.shape if hasattr(value,", "= return_states outputs.append(output) states = new_states[:len(states)] i += 1 i", "learning phase flag, if it is not 0 or 1,", "shape = bias.shape return x + reshape(bias, shape) def eval(x):", "tuple(axes) if shift is None: shift = x # Compute", "set_learning_phase(value): global _LEARNING_PHASE if value not in {0, 1}: raise", "permute on dynamic axis, ' 'which is not supported. Please", "strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) x = C.convolution(pointwise_kernel, x, strides=(1,", "shift = C.stop_gradient(shift) shifted_mean = C.minus(x, shift) for axis in", "len(cntk_shape): raise ValueError('CNTK backend: creating placeholder with ' '%d dimension", "epsilon=1e-3): if gamma is None: if beta is None: gamma", "x = _padding(x, padding[2], 2) else: assert len(base_shape) == 5", "'please run with GPU to get better performance.') # A", "True break if has_seq: nones = _get_dynamic_axis_num(x) x = expand_dims(x,", "now # return the same x to take cntk broadcast", "the input of static rnn ' 'has shape `%s`, the", "dtype = floatx() if not shape: if ndim: shape =", "shape] cntk_shape = tuple(cntk_shape) if dynamic_axis_num > len(cntk_shape): raise ValueError('CNTK", "num_static_element) result = arguments.data().as_shape((num_batch,) + self.target_shape) return None, C.cntk_py.Value(result) def", "return variable(value=np.ones(shape, ctype), dtype=dtype, name=name) def eye(size, dtype=None, name=None): if", "in _axes: shifted_mean = C.reduce_mean(shifted_mean, axis=axis) variance_mean = C.square(C.minus(x, shift))", "outputs[0], ) self.trainer = C.trainer.Trainer( outputs[0], criterion, [learner]) self.trainer_output =", "2: y = expand_dims(y) normalized_axis = [] normalized_axis.append(_normalize_axis(axes[0], x)[0]) normalized_axis.append(_normalize_axis(axes[1],", "in ' '`train_function`.' % argument.name) result = self.trainer.train_minibatch( input_dict, self.trainer_output)", "division from __future__ import print_function import cntk as C import", "[batch_axis])] def forward(self, arguments, device=None, outputs_to_retain=None): return None, C.cntk_py.Value(arguments.data()) def", "' 'fixed dimension instead of `None`.') return np.prod(int_shape(x)) def cast(x,", "= ones_like(beta) if beta is None: if gamma is None:", "1)) def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None): if data_format", "data_format == 'channels_last': output = repeat_elements(x, depth_factor, axis=1) output =", "shape = list(int_shape(x)) _axis = [] for _ in axis:", "return C.element_min(x, y) def sin(x): return C.sin(x) def cos(x): return", "cntk_shape = cntk_shape[dynamic_axis_num:] x = C.input( shape=cntk_shape, dtype=_convert_string_dtype(dtype), is_sparse=sparse, name=name)", "self.trainer = C.trainer.Trainer( outputs[0], criterion, [learner]) self.trainer_output = tuple([f.output for", "== 2 assert len(padding[0]) == 2 assert len(padding[1]) == 2", "else: raise ValueError('CNTK Backend: Invalid data_format:', data_format) def resize_volumes(x, depth_factor,", "# cntk output_shape does not include batch axis output_shape =", "= (n,) elif isinstance(n, list): n = tuple(n) shape =", "= C.reshape(x, (-1,)) x._keras_shape = (None, dim) return x def", "the . except the first one. if len(version) > 2", "= _preprocess_conv2d_kernel(pointwise_kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate == (1,", "axes[0] == axes[1]: result = sum(x * y, axis=axes[0], keepdims=True)", "in range(len(shape) - len(n))]) + n if len(n) != len(shape):", "tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape", "self).__init__([input], as_numpy=False, name=name) self.target_shape = (batch_size,) + input.shape def infer_outputs(self):", "!= p and p != C.InferredDimension and p != C.FreeDimension:", "repeat_elements(output, width_factor, axis=3) return output else: raise ValueError('CNTK Backend: Invalid", "hasattr(C, 'unpack_batch') and _get_cntk_version() >= 2.2: const_a = C.unpack_batch(x) const_a", "y): return C.not_equal(x, y) def greater(x, y): return C.greater(x, y)", "_preprocess_border_mode(padding) if dilation_rate == (1, 1): strides = (1,) +", "else: dtype = _convert_string_dtype(dtype) size = 1 for _ in", "`(rows, cols, input_depth, depth)`, # independently of `data_format`. # CNTK", "name=None, seed=None): if dtype is None: dtype = floatx() if", "height_factor, axis=1) output = repeat_elements(output, width_factor, axis=2) return output else:", "1 while i >= 0: current = C.ops.slice(inputs, time_axis, i,", "base_shape = x.shape if pattern[1] > 0: postfix_shape = list(base_shape)", "if name is None or name == '': return prefix", "placeholder.shape for i, p in zip(input_shape, placeholder_shape): if i !=", "0). # LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase", "# in keras2, need handle output shape in different format", "def clear_session(): \"\"\"Reset learning phase flag for cntk backend. \"\"\"", "epsilon) return normalized, mean, variant def _moments(x, axes=None, shift=None, keep_dims=False):", "learning_phase(): # If _LEARNING_PHASE is not 0 or 1, return", "keras model, # we will create gradient as a constant", "tensor.is_sparse def int_shape(x): if hasattr(x, '_keras_shape'): return x._keras_shape shape =", "__future__ import print_function import cntk as C import numpy as", "support shape like (1, batch). so using the workaround #", "inputs, initial_states, go_backwards, mask, constants, unroll, input_length) if constants is", "no_mask_output=True) if num_time_step is not None and num_time_step is not", "mean = _reshape_dummy_dim(mean, [0]) if ndim(var) == ndim(x) and shape(var)[0]", "dtype is None: dtype = floatx() if not shape: if", "= self.metrics_func.forward( input_dict, self.metrics_func.outputs, (self.metrics_func.outputs[0],), as_numpy=False) else: output_values = self.metrics_func.eval(input_dict,", "ones_like(var) elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1: gamma", "C.output_variable( self.inputs[0].shape[1:], self.inputs[0].dtype, [batch_axis])] def forward(self, arguments, device=None, outputs_to_retain=None): return", "shape[axis]: tmp = C.ops.slice(x, axis, i, i + 1) for", "reshape(bias, shape) def eval(x): if isinstance(x, C.cntk_py.Function): return x.eval() elif", "result = C.reshape(result, shape=(), begin_axis=index, end_axis=index + 1) return result", "= int_shape(x) return len(shape) def _prepare_name(name, default): prefix = '_'.join(NAME_SCOPE_STACK)", "- 1) def _remove_dims(x, axis, keepdims=False): if keepdims is False", "', ndim(then_expression)' '=' + str(ndim_expr)) elif ndim_cond < ndim_expr: shape_expr", "tmp = [x] * rep x = C.splice(*tmp, axis=i -", "def backward(self, state, root_gradients): return C.cntk_py.Value(root_gradients.data()) class LambdaFunc(C.ops.functions.UserFunction): def __init__(self,", "broadcasting target_shape = [] x_shape = int_shape(x) # skip the", "return C.reduce_sum(all_matrix) else: return all_matrix def classification_error(target, output, axis=-1): return", "> 1 else ( outputs[0], ) self.trainer = C.trainer.Trainer( outputs[0],", "cntk does not support gradients as symbolic op, # to", "'channels_last': # shape: batch, row, col, filters output = permute_dimensions(output,", "str(dtype) else dtype v = C.parameter(shape=shape, init=value, dtype=dtype, name=_prepare_name(name, 'variable'))", "1) # remove dummy dimension current = squeeze(current, time_axis) output,", "return C.cntk_py.Value(root_gradients.data()) class ConvertToStatic(C.ops.functions.UserFunction): \"\"\"Converts input first axis to CNTK", "ndim_expr - ndim_cond for i in range(ndim_diff): condition = expand_dims(condition)", "= shape def infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return [ C.output_variable(", "may have an extra batch axis with 1, it is", "'expected 1 or %d dimensions' % (bias_dims, dims)) if dims", "is not None: mask_slice = C.ops.slice(mask, time_axis, i, i +", "C.user_function( LambdaFunc(x, when=lambda x: True, execute=lambda x: print(message))) def batch_set_value(tuples):", "axis, keepdims) def any(x, axis=None, keepdims=False): reduce_result = sum(x, axis,", "axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_max') return", "with variable # length. Will support it in next release.", "1, 1) else: shape = (bias.shape[3],) + bias.shape[:3] elif data_format", "be fixed in GA. if n is C.InferredDimension or n", "= list(int_shape(x)) _axis = [] for _ in axis: if", "to padding the shape if num_dynamic_axis >= len(shape): i =", "False, padding, padding, padding]) return _postprocess_conv3d_output(x, data_format) def conv3d_transpose(x, kernel,", "is conditioned by the Numpy RNG seed = np.random.randint(10e7) np.random.seed(seed)", "kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is", "= temporal_padding(x, (left_pad, 0)) padding = 'valid' if data_format ==", "else: return False def get_num_dynamic_axis(x): return _get_dynamic_axis_num(x) def _reduce_on_axis(x, axis,", "keras shape history.' % (str(shape), nones)) # Current cntk does", "C.swapaxes(kernel, 0, 2) padding = _preprocess_border_mode(padding) strides = [strides] x", "0], [0, 0], list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0],", "+ str(ndim_cond) + ', ndim(then_expression)' '=' + str(ndim_expr)) elif ndim_cond", "= _convert_string_dtype(dtype) size = 1 for _ in shape: if", "(4, 3, 0, 1, 2)) return kernel def _postprocess_conv3d_output(x, dim_ordering):", "result if axes[0] == 1 else transpose(result) else: return sum(x", "int_shape(x) ndim = len(shape) nones = _get_dynamic_axis_num(x) if nones >", "print(message))) def batch_set_value(tuples): for t in tuples: x = t[0]", "_get_dynamic_axis_num(x) if isinstance(pattern, list): current_layout = [i for i in", "+ 1 def count_params(x): for _ in x.shape: if _", "x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1]), list(padding[2])])", "if _get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s) == 1: if hasattr(C,", "= [] for i in range(output_row): for j in range(output_col):", "/ norm def hard_sigmoid(x): x = (0.2 * x) +", "grad_array_view.as_shape( (num_old_batch,) + self.from_shape)) class ConvertToBatch(C.ops.functions.UserFunction): \"\"\"Converts input first axis", "== 'max': x = C.pooling( x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding])", "has_seq = True break if has_seq: nones = _get_dynamic_axis_num(x) x", "C.variables.Constant): return x.value else: return eval(x) def batch_get_value(xs): result =", "prod(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x,", "= int_shape(kernel) output_length, feature_dim, filters = kernel_shape xs = []", "x = _padding(x, padding[2], 4) else: if num_dynamic_axis > 0:", "if num_dynamic_axis > 0: assert len(base_shape) == 3 if hasattr(C,", "= C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding], groups=x.shape[0]) return _postprocess_conv2d_output(x,", "if isinstance(axes, int): axes = (axes, axes) if axes is", "self.target_shape = (batch_size,) + input.shape def infer_outputs(self): return [ C.output_variable(", "axis, please try ' 'dynamic rnn with sequence axis.' %", "!= C.FreeDimension: return False return True def __call__(self, inputs): global", "depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2,", "clipping output = C.clip(output, epsilon(), 1.0 - epsilon()) return -sum(target", "if self.trainer is None: self.metrics_outputs = [f.output for f in", "if n is inferred dimension, # we can't figure out", "raise ValueError('Multiple target dimensions are not supported. ' + 'Expected:", "# skip the batch axis for axis in range(1, ndim(x)):", "in sorted(_axis, reverse=True): result = C.reshape(result, shape=(), begin_axis=index, end_axis=index +", "C.reduce_sum(all_matrix) else: return all_matrix def classification_error(target, output, axis=-1): return C.ops.reduce_mean(", "1 if go_backwards: i = shape[1] - 1 while i", "conv_dim3, # input_depth) x = C.transpose(x, (3, 0, 1, 2))", "constraint: Optional projection function to be applied to the variable", "1 i = 1 # add the time_step axis back", "else transpose(result) else: return sum(x * transpose(y), axis=axes[0], keepdims=True) else:", "assert len(base_shape) == 2 if hasattr(C, 'pad'): x = C.pad(x,", "isinstance(value, np.ndarray) is False: value = np.asarray(value) if isinstance(x, C.variables.Parameter):", "stddev=1.0, dtype=None, seed=None): if seed is None: seed = np.random.randint(1,", "= list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x =", "condition = expand_dims(condition) condition = tile(condition, shape_expr[ndim_cond + i]) return", "in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError( 'CNTK backend:", "int_shape(mask) if len(mask_shape) == dims - 1: mask = expand_dims(mask)", "support float32 and float64 if dtype == 'float32': return np.float32", "value.shape if hasattr(value, 'shape') else () if hasattr(value, 'dtype') and", "temporal_padding(x, padding=(1, 1)): assert len(padding) == 2 num_dynamic_axis = _get_dynamic_axis_num(x)", "ndim = len(shape) nones = _get_dynamic_axis_num(x) if nones > ndim:", "def eye(size, dtype=None, name=None): if dtype is None: dtype =", "== 2 assert len(padding[1]) == 2 assert len(padding[2]) == 2", "C.Axis.default_batch_axis() \\ and hasattr(C.sequence, reduce_fun_name): x = getattr(C.sequence, reduce_fun_name)(x, a)", "variance def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): # The", "size = 1 for _ in shape: if _ is", "padding[0], 1) x = _padding(x, padding[1], 2) return x def", "batch axis is not in shape, # so just flatten", "mean, variance def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): #", "None and max_value < min_value: max_value = min_value if max_value", "least ' '%d dimensions are needed.' % (len(cntk_shape, dynamic_axis_num))) if", "rep, axis): axis = _normalize_axis(axis, x) axis = axis[0] slices", "ndim_expr: raise ValueError('Rank of condition should be less' ' than", "non-square dilation_rate is ' 'not supported.') if strides != (1,", "transpose i = normalized_axis[0] while i < len(x.shape) - 1:", "if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): if isinstance(value, (float, int)):", "equal to rank of then and' ' else expressions. ndim(condition)='", "* stride_row + kernel_size[0]) slice_col = slice(j * stride_col, j", "(str(shape), nones)) # Current cntk does not support shape like", "new_states = return_states outputs.append(output) states = new_states i -= 1", "'dynamic_axes') and len(x.dynamic_axes) > 1 def l2_normalize(x, axis=None): axis =", "False): uses_learning_phase = True if mask is not None: mask_slice", "= len(bias.shape) if bias_dims != 1 and bias_dims != dims:", "if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normalized = batch_normalization( x, mean, variant,", "axis=3) output = repeat_elements(output, width_factor, axis=4) return output elif data_format", "name=name) def infer_outputs(self): return [ C.output_variable( self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def", "data_format): if data_format == 'channels_first': output = repeat_elements(x, depth_factor, axis=2)", "shape[nones:] new_shape = tuple([C.InferredDimension if _ == C.FreeDimension else _", "ValueError( 'CNTK backend: argument %s is not found in inputs.", "only supports `eval` with ' '`Function`, `Constant` or ' '`Parameter`.'", "ReshapeBatch(C.ops.functions.UserFunction): def __init__(self, input, shape, name='reshape_with_batch'): super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name)", "if data_format == 'channels_last': # shape: batch, row, col, filters", "= C.relu(-x) x = C.relu(x) if max_value is not None:", "'%d dimension is not supported, at least ' '%d dimensions", "shape: (samples, rows, cols, input_depth) x = C.transpose(x, (2, 0,", "0 def _contain_seqence_axis(x): if _get_dynamic_axis_num(x) > 1: return x.dynamic_axes[1] ==", "in zip(last_states, initial_states): if _get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s) ==", "0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape)", "batch_get_value(xs): result = [] for x in xs: if (isinstance(x,", "in new_shape] return C.reshape(x, new_shape) def permute_dimensions(x, pattern): dims =", "C.combine(self.metrics_outputs) else: self.metrics_func = None @staticmethod def _is_input_shape_compatible(input, placeholder): if", "3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0,", "batch_normalization( x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normalized, mean,", "kernel_shape xs = [] for i in range(output_row): for j", "else: if isinstance(axis, list): has_seq = False for a in", "return _postprocess_conv2d_output(x, data_format) def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None,", "ValueError('CNTK Backend: Set learning phase ' 'with value %s is", "import defaultdict from contextlib import contextmanager import warnings C.set_global_option('align_axis', 1)", "'_keras_shape'): return x._keras_shape shape = x.shape if hasattr(x, 'dynamic_axes'): dynamic_shape", "= pool_size x = _preprocess_conv2d_input(x, data_format) if pool_mode == 'max':", "def prod(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output =", "'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x)", "' 'CNTK can not take variable length inputs. Please '", "is None: # ensure that randomness is conditioned by the", "zip(new_states, place_holders): n_s.append(o.replace_placeholders({p: o.output})) if len(n_s) > 0: new_output =", "phase flag for cntk backend. \"\"\" global _LEARNING_PHASE global _LEARNING_PHASE_PLACEHOLDER", "keepdims=True) else: if len(y_shape) == 2: y = expand_dims(y) normalized_axis", "self.metrics_outputs: updated.append(v) if self.unrelated_updates is not None: input_dict = {}", "version[:-1] # for hot fix, ignore all the . except", "value=1.0, name='_keras_learning_phase') # static learning phase flag, if it is", "not None and not has_seq_axis(mask): if go_backwards: mask = reverse(mask,", "C.abs(x) def sqrt(x): return C.sqrt(x) def exp(x): return C.exp(x) def", "= _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides = (1,)", "False, padding]) if data_format == 'channels_last': x = C.swapaxes(x, 0,", "not self._is_input_shape_compatible(value, tensor): raise ValueError('CNTK backend: The placeholder has been", "= C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x def spatial_2d_padding(x, padding=((1,", "batch_flatten(x): # cntk's batch axis is not in shape, #", "axis=axes[0], keepdims=True) else: if len(y_shape) == 2: y = expand_dims(y)", "if data_format == 'channels_first': if bias_dims == 1: shape =", "expand_dims(mask) mask = C.to_sequence_like(mask, rnn_inputs) states = tuple(initial) with C.default_options(axis_offset=1):", "add the time_step axis back output_slice = expand_dims(outputs[i], 1) final_output", ". except the first one. if len(version) > 2 and", "constants=None, unroll=False, input_length=None): shape = int_shape(inputs) dims = len(shape) global", "axis): axis = _normalize_axis(axis, x) axis = axis[0] slices =", "== 1: new_c.append(C.sequence.broadcast_as(c, rnn_inputs)) else: new_c.append(c) rnn_constants.append(new_c) else: if _get_dynamic_axis_num(constant)", "or index > 1: raise NotImplementedError new_shape = list(x.shape) new_shape.insert(index,", "stride_row, stride_col = strides output_row, output_col = output_shape kernel_shape =", "_preprocess_conv3d_kernel(kernel, dim_ordering): kernel = C.transpose(kernel, (4, 3, 0, 1, 2))", "len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(any_matrix) else:", "postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return", "Returns A variable instance (with Keras metadata included). \"\"\" if", "const.shape const._uses_learning_phase = False return const def random_binomial(shape, p=0.0, dtype=None,", "cntk doesn't support gradient as symbolic op, to hook up", "None: dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.zeros(shape, ctype),", "*= _ binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape) return variable(value=binomial, dtype=dtype)", "tensor with keras shape: `%s` has ' '%d cntk dynamic", "[0]) if gamma is None: gamma = ones_like(var) elif ndim(gamma)", "x, strides, auto_padding=[ False, padding, padding, padding]) return _postprocess_conv3d_output(x, data_format)", "inputs self.trainer = None self.unrelated_updates = None self.updates = updates", "(2, 0, 1)) return x def _preprocess_conv2d_kernel(kernel, data_format): # As", "need_convert is False: raise NotImplementedError('CNTK Backend: `go_backwards` is not supported", "shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads def equal(x,", "input_length * kernel_size output = x_aggregate * weight # shape:", "1)), data_format=None): assert len(padding) == 3 assert len(padding[0]) == 2", "else: uses_learning_phase = False # CNTK currently don't support cond", "super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name) self.from_shape = input.shape self.target_shape = shape", "\"eval\" in cntk. # They only evaluated in training phase.", "version = C.__version__ if version.endswith('+'): version = version[:-1] # for", "= placeholder.shape for i, p in zip(input_shape, placeholder_shape): if i", "create place holder place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states]", "i < shape[1]: current = C.ops.slice(inputs, time_axis, i, i +", "# add the time_step axis back final_output = expand_dims(outputs[0], 1)", "axis, ' 'which is not supported. Please do permute '", "C.cross_entropy_with_softmax(output, target) # cntk's result shape is (batch, 1), while", "We may introduce this operation in CNTK native implementation later.", "in trainer, for metrics more # than 2, need manual", "fixed dimension ' 'instead of `None`.') # how to apply", "(batch, ) return C.reshape(result, ()) else: # scale preds so", "0, 1)) # Shape: (batch, filters, output_length, input_length * kernel_size)", "nones)) # Current cntk does not support shape like (1,", "def log(x): return C.log(x) def round(x): return C.round(x) def sigmoid(x):", "else: for index in sorted(_axis, reverse=True): del shape[index] shape =", "_axis = [_ + len(shape) if _ < 0 else", "i + 1) for _ in range(rep): slices.append(tmp) i +=", "(1, 1)), data_format=None): assert len(padding) == 3 assert len(padding[0]) ==", "'causal': # causal (dilated) convolution: left_pad = dilation_rate * (kernel.shape[0]", "-1 _UID_PREFIXES = defaultdict(int) # cntk doesn't support gradient as", "parameter with symbolic op, so eval it first as #", "def dropout(x, level, noise_shape=None, seed=None): if level < 0. or", "C.InferredDimension or _ == C.FreeDimension: raise ValueError('CNTK backend: `count_params` with", "+ scale) def random_normal_variable( shape, mean, scale, dtype=None, name=None, seed=None):", "as_numpy=False, name=name) self.target_shape = (batch_size,) + input.shape def infer_outputs(self): return", "axis = [axis] axis = _normalize_axis(axis, x) output = C.ops.argmax(x,", "== -1: i += 1 else: break shape = tuple([-1", "x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]])", "ndim(then_expression)' '=' + str(ndim_expr)) elif ndim_cond < ndim_expr: shape_expr =", "C.FreeDimension: raise ValueError('CNTK backend: `count_params` with dynamic ' 'shape is", "1: shape = (bias.shape[0], 1) else: shape = (bias.shape[1],) +", "remove the conversion when cntk supports int32, int64 # https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter", "if isinstance(training, int) or isinstance(training, bool): result = x if", "float32 and float64 if dtype == 'float32': return np.float32 elif", "= get_num_dynamic_axis(placeholder) input_shape = input.shape[num_dynamic:] placeholder_shape = placeholder.shape for i,", "(a % ndim) if _axis[i] is not None: _axis[i] =", "self.metrics_outputs = [f.output for f in outputs[2:]] self.metrics_func = C.combine(self.metrics_outputs)", "'is constructed.' % g) if len(u_list) > 0: learner =", "create gradient as a constant placeholder, here use this global", "a placeholder. # Arguments x: A candidate placeholder. # Returns", "reduce_result = sum(x, axis, keepdims=keepdims) any_matrix = C.element_select( reduce_result, ones_like(reduce_result),", "target) * C.log(1.0 - output) return output def get_variable_shape(x): return", "for s in final_states] if need_convert: final_output = C.sequence.unpack(final_output, 0,", "+ non_dyn_shape def is_sparse(tensor): return tensor.is_sparse def int_shape(x): if hasattr(x,", "output, axis=-1): return C.ops.reduce_mean( C.equal( argmax( output, axis=-1), argmax( target,", "strides, auto_padding=[ False, padding, padding, padding]) return _postprocess_conv3d_output(x, data_format) def", "in constants: if isinstance(constant, list): new_c = [] for c", "+ C.abs(x)) def categorical_crossentropy(target, output, from_logits=False): if from_logits: result =", "str(data_format)) stride_row, stride_col = strides output_row, output_col = output_shape kernel_shape", "0 while i < shape[1]: current = C.ops.slice(inputs, time_axis, i,", "inputs that have a static shape.' % (str(tensor.shape), str(value.shape))) feed_dict[tensor]", "o in self.metrics_outputs: value = output_values[o] v = value.asarray() updated.append(v)", "x.shape dim = np.prod(x.shape) x = C.reshape(x, (-1,)) x._keras_shape =", "data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0],", "_ in cntk_axes] strides = [-1 for _ in cntk_axes]", "dtype=dtype) def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): for _ in", "shape=cntk_shape, dtype=_convert_string_dtype(dtype), is_sparse=sparse, name=name) x._keras_shape = shape x._uses_learning_phase = False", "if shape[i + num_dynamic] is None: non_dyn_shape.append(x.shape[i]) else: non_dyn_shape.append(shape[i +", "in {0, 1} else _LEARNING_PHASE_PLACEHOLDER def set_learning_phase(value): global _LEARNING_PHASE if", "None: # ensure that randomness is conditioned by the Numpy", "final_output = _reshape_sequence(final_output, num_time_step) f_stats = [] for l_s, i_s", "data_format=None): assert len(padding) == 3 assert len(padding[0]) == 2 assert", "i += 1 last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, states", "index in sorted(_axis, reverse=True): result = C.reshape(result, shape=(), begin_axis=index, end_axis=index", "def get_value(x): if isinstance( x, C.variables.Parameter) or isinstance( x, C.variables.Constant):", "= expand_dims(mask) mask = C.to_sequence_like(mask, rnn_inputs) states = tuple(initial) with", "= (a % ndim) if _axis[i] is not None: _axis[i]", "outputs[1]) if len(outputs) > 1 else ( outputs[0], ) self.trainer", "None: beta = zeros_like(mean) elif ndim(beta) == ndim(x) and shape(beta)[0]", "C.AVG_POOLING, pool_size, strides, auto_padding=[padding]) else: raise ValueError('Invalid pooling mode: '", "C import numpy as np from .common import floatx, epsilon,", "add support # in native cntk op cntk_axis = []", "x, strides=(1, 1, 1), auto_padding=[False]) return _postprocess_conv2d_output(x, data_format) def depthwise_conv2d(x,", "x = (0.2 * x) + 0.5 x = C.clip(x,", "`%s`, but input shape is `%s`. Currently ' 'CNTK can", "ValueError('CNTK Backend: Invalid data_format:', data_format) def repeat_elements(x, rep, axis): axis", "(0.2 * x) + 0.5 x = C.clip(x, 0.0, 1.0)", "softsign(x): return x / (1 + C.abs(x)) def categorical_crossentropy(target, output,", "'channels_last': # TF uses the last dimension as channel dimension,", "len(outputs) == 0: prev_output = zeros_like(output) else: prev_output = outputs[-1]", "name=name) def ones(shape, dtype=None, name=None): if dtype is None: dtype", "alpha != 0.: x -= alpha * negative_part return x", "manual eval elif len(outputs) > 2: self.metrics_outputs = [f.output for", "C.abs(x)) def categorical_crossentropy(target, output, from_logits=False): if from_logits: result = C.cross_entropy_with_softmax(output,", "name: name of this node \"\"\" def __init__(self, input, name='convert_to_batch'):", "learning_phase() uses_learning_phase = True else: uses_learning_phase = False # CNTK", "if max_value is not None and max_value < min_value: max_value", "C.pooling( x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding]) else: raise ValueError('Invalid pooling", "cos(x): return C.cos(x) def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): if", "if isinstance(_axis, list): for i, a in enumerate(_axis): if a", "for c in constant: if _get_dynamic_axis_num(c) == 1: new_c.append(C.sequence.broadcast_as(c, rnn_inputs))", "= repeat_elements(output, height_factor, axis=3) output = repeat_elements(output, width_factor, axis=4) return", "strides, auto_padding=[padding]) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode))", ") self.trainer = C.trainer.Trainer( outputs[0], criterion, [learner]) self.trainer_output = tuple([f.output", "'to shape `%s`, but input shape is `%s`. Currently '", "shape[1]: current = C.ops.slice(inputs, time_axis, i, i + 1) #", "else: x = _padding(x, padding[0], 0) x = _padding(x, padding[1],", "axis if isinstance(_axis, list): for i, a in enumerate(_axis): if", "1) return x def _padding(x, pattern, axis): base_shape = x.shape", "raise ValueError('Input should be at least 3D.') # if the", "implementation later. # Arguments inputs: a cntk tensor which has", "beta = zeros_like(x) else: beta = zeros_like(gamma) mean, variant =", "= int_shape(x) num_dynamic_axis = _get_dynamic_axis_num(x) # Padding the axis if", "dims for proper broadcasting. for axis in _axes: shift =", "_postprocess_conv2d_output(x, data_format) def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1):", "output_shape = tuple(shape) x = C.convolution_transpose( kernel, x, strides, auto_padding=[", "len(padding) == 2 num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if", "_preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = (1,) + strides", "return C.parameter( shape=shape, init=C.initializer.normal( scale=scale, seed=seed), dtype=dtype, name=name) def random_normal(shape,", "'dynamic rnn with sequence axis.' % shape) if constants is", "1): strides = (1,) + strides x = C.convolution(depthwise_kernel, x,", "ValueError('Unknown data_format ' + str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x) base_shape =", "= _preprocess_border_mode(padding) x = _preprocess_conv3d_input(x, data_format) if pool_mode == 'max':", "get_num_dynamic_axis(x): return _get_dynamic_axis_num(x) def _reduce_on_axis(x, axis, reduce_fun_name): if isinstance(axis, list):", "if num_time_step is not None and num_time_step is not C.FreeDimension:", "\\ and hasattr(C.sequence, reduce_fun_name): x = getattr(C.sequence, reduce_fun_name)(x, a) else:", "= C.ops.argmax(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def argmin(x, axis=-1): axis", "del shape[_] new_shape = shape[nones:] new_shape = tuple([C.InferredDimension if _", "1: new_c.append(C.sequence.broadcast_as(c, rnn_inputs)) else: new_c.append(c) rnn_constants.append(new_c) else: if _get_dynamic_axis_num(constant) ==", "len(tensors) == 0: return None axis = [axis] axis =", "def dtype(x): return _convert_dtype_string(x.dtype) def zeros(shape, dtype=None, name=None): if dtype", "len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]),", "x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normalized, mean, variant", "= image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown", "forward(self, argument, device=None, outputs_to_retain=None): if self.when(argument): self.execute(argument) return None, argument", "input of static rnn ' 'has shape `%s`, the second", "result = [] for x in xs: if (isinstance(x, C.variables.Parameter)", "shape = (bias.shape[0], 1) else: shape = (bias.shape[1],) + bias.shape[:1]", "(learning_phase == 1) or test mode (learning_phase == 0). #", "else: shape = bias.shape elif dims == 2: if data_format", "outputs[0] while i < len(outputs): # add the time_step axis", "past_values.append(C.sequence.past_value(p, s)) new_output, new_states = step_function( x, tuple(past_values) + tuple(rnn_constants))", "supported, ' 'expected 0 or 1.' % value) _LEARNING_PHASE =", "0)) padding = 'valid' if data_format == 'channels_last': x =", "a bool tensor used to run Keras models in #", "C.square(C.minus(x, shift)) for axis in _axes: variance_mean = C.reduce_mean(variance_mean, axis=axis)", "bias_dims == 1: shape = (1, 1, bias.shape[0]) else: shape", "dtype = _convert_string_dtype(dtype) return C.parameter( shape, init=C.initializer.truncated_normal( stddev, seed=seed), dtype=dtype)", "value in zip(self.placeholders, inputs): # cntk only support calculate on", "def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): assert", "eye(size, dtype=None, name=None): if dtype is None: dtype = floatx()", "this is not expected, please ' 'double check the keras", "static axis. We may introduce this operation in CNTK native", "output_shape=output_shape) return _postprocess_conv2d_output(x, data_format) def identity(x, name=None): if name is", "reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) ==", "return 1 - C.reshape(result, shape=()) def conv2d_transpose(x, kernel, output_shape, strides=(1,", "# cntk only support float32 and float64 if dtype ==", "update(x, new_x): return C.assign(x, new_x) def moving_average_update(variable, value, momentum): return", "ctype = _convert_string_dtype(dtype) return variable(value=np.ones(shape, ctype), dtype=dtype, name=name) def eye(size,", "= x for index in sorted(_axis, reverse=True): result = C.reshape(result,", "C.ops.reduce_mean( C.equal( argmax( output, axis=-1), argmax( target, axis=-1)), axis=C.Axis.all_axes()) def", "output_slice, axis=time_axis) last_output = outputs[i] i += 1 last_output._uses_learning_phase =", "_reshape_dummy_dim(gamma, [0]) if beta is None: beta = zeros_like(mean) elif", "= tuple([f.output for f in criterion]) elif len(u_ops) > 0:", "def _convert_dtype_string(dtype): if dtype == np.float32: return 'float32' elif dtype", "shifted_mean = C.minus(x, shift) for axis in _axes: shifted_mean =", "ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv3d_input(x, data_format) kernel", "output_values.asarray() for o in self.metrics_outputs: updated.append(v) if self.unrelated_updates is not", "value.asarray() updated.append(v) else: v = output_values.asarray() for o in self.metrics_outputs:", "np.float32 elif dtype == 'float64': return np.float64 else: # cntk", "provide fixed dimension ' 'instead of `None`.') # how to", "C.reshape(x, new_shape) if index < nones: result._keras_shape = shape return", "data_format) def relu(x, alpha=0., max_value=None): if alpha != 0.: negative_part", "constant in constants: if isinstance(constant, list): new_c = [] for", "[0]) if ndim(var) == ndim(x) and shape(var)[0] == 1: var", "flag, if it is not 0 or 1, we will", "_, output_values = self.metrics_func.forward( input_dict, self.metrics_func.outputs, (self.metrics_func.outputs[0],), as_numpy=False) else: output_values", "len(y.shape) > 2: y_shape = int_shape(y) if len(y_shape) > 2:", "C.convolution( kernel, x, strides, auto_padding=[ False, padding, padding, padding]) return", "depth)`, # independently of `data_format`. # CNTK expects `(depth, input_depth,", "_get_dynamic_axis_num(inputs) states = tuple(initial_states) outputs = [] time_axis = 1", "= _preprocess_border_mode(padding) strides = strides + (strides[0],) x = C.convolution(", "_padding(x, padding[0], 0) x = _padding(x, padding[1], 1) x =", "C.output_variable( self.target_shape, self.inputs[0].dtype, [])] def forward(self, arguments, device=None, outputs_to_retain=None): return", "def __call__(self, inputs): global _LEARNING_PHASE_PLACEHOLDER global _LEARNING_PHASE assert isinstance(inputs, (list,", "if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): result.append(x.value) else: result.append(eval(x)) return", "Numpy array, initial value of the tensor. dtype: Tensor type.", "' 'requested permute on dynamic axis, ' 'which is not", "_LEARNING_PHASE = -1 _UID_PREFIXES = defaultdict(int) # cntk doesn't support", "return prefix + '/' + default return prefix + '/'", "to cast to float to run the model return np.float32", "x = C.convolution_transpose( kernel, x, strides, auto_padding=[ False, padding, padding],", "C.one_hot(target, output.shape[-1]) target = C.reshape(target, output.shape) return categorical_crossentropy(target, output, from_logits)", "constants is None: constants = [] num_time_step = shape[1] if", "dtype=None, seed=None): if dtype is None: dtype = floatx() for", "calculate everything in float, so don't need case from bool", "time_axis, i, i + 1) mask_slice = squeeze(mask_slice, time_axis) if", "flag for cntk backend. \"\"\" global _LEARNING_PHASE global _LEARNING_PHASE_PLACEHOLDER _LEARNING_PHASE", "slice_row, slice_col], (-1, 1, feature_dim))) else: xs.append(reshape(inputs[:, slice_row, slice_col, :],", "self.metrics_func.forward( input_dict, self.metrics_func.outputs, (self.metrics_func.outputs[0],), as_numpy=False) else: output_values = self.metrics_func.eval(input_dict, as_numpy=False)", "argument.name) self.unrelated_updates.eval(input_dict, as_numpy=False) return updated def function(inputs, outputs, updates=[], **kwargs):", "if len(y_shape) == 2: result = squeeze(result, -1) return result", "(dilated) convolution: left_pad = dilation_rate * (kernel.shape[0] - 1) x", "0 or 1.' % value) _LEARNING_PHASE = value def clear_session():", "mean / var / beta / gamma may be processed", "shape[2] = output_shape[1] shape[3] = output_shape[2] output_shape = tuple(shape) x", "return variable(value=binomial, dtype=dtype) def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): for", "(batch, filters, output_length) output = sum(output, axis=3) # Shape: (batch,", "dim) return x def softmax(x, axis=-1): return C.softmax(x, axis=axis) def", "+ 1) i += 1 i = normalized_axis[1] while i", "is False: reduce_axes.append(a) return _reshape_dummy_dim(x, reduce_axes) else: if isinstance(axis, list):", "2) else: assert len(base_shape) == 5 if hasattr(C, 'pad'): x", "conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None): if data_format is", "if hasattr(C, 'unpack_batch'): f_stats.append(C.unpack_batch(l_s)) else: f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0]))) else: f_stats.append(l_s) last_output._uses_learning_phase", "on float, do auto cast here if (hasattr(value, 'dtype') and", "and value.dtype != np.float32 and value.dtype != np.float64): value =", "%s. ' 'CNTK only supports float32 and ' 'float64.' %", "= _convert_string_dtype(dtype) return C.parameter( shape, init=C.initializer.truncated_normal( stddev, seed=seed), dtype=dtype) def", "np from .common import floatx, epsilon, image_dim_ordering, image_data_format from collections", "gamma = ones_like(beta) if beta is None: if gamma is", "cntk_axes] strides = [-1 for _ in cntk_axes] return C.slice(x,", "GA. if n is C.InferredDimension or n is C.FreeDimension: return", "C.variables.Parameter): return x.value else: raise ValueError('CNTK Backend: `eval` method on", "Arguments value: Numpy array, initial value of the tensor. dtype:", "epsilon(), 1.0 - epsilon()) return -sum(target * C.log(output), axis=-1) def", "and shape[i] is not None: tmp = [x] * rep", "else: v = output_values.asarray() for o in self.metrics_outputs: updated.append(v) if", "bias.shape elif dims == 3: if data_format == 'channels_first': if", "_get_dynamic_axis_num(x) x = expand_dims(x, nones) return x def max(x, axis=None,", "low) / 2 p = C.parameter( shape, init=C.initializer.uniform( scale, seed=seed),", "for a in x.dynamic_axes] shape = tuple(dynamic_shape) + shape return", "'' cntk_shape = cntk_shape[dynamic_axis_num:] x = C.input( shape=cntk_shape, dtype=_convert_string_dtype(dtype), is_sparse=sparse,", "new_shape) def tile(x, n): if isinstance(n, int): n = (n,)", "'unpack_batch'): f_stats.append(C.unpack_batch(l_s)) else: f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0]))) else: f_stats.append(l_s) last_output._uses_learning_phase = uses_learning_phase", "+ str(data_format)) x = _preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format)", "= outputs[0] # need group update by gradient place holder", "axis, i, i + 1) for _ in range(rep): slices.append(tmp)", "min(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x,", "symbolic op, to hook up with keras model, # we", "C.reshape(gamma, target_shape) broadcast_beta = C.reshape(beta, target_shape) normalized = batch_normalization( x,", "C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x)", "shift = x # Compute true mean while keeping the", "to apply broadcast weight = permute_dimensions(kernel, (2, 0, 1)) #", "rnn_inputs)) else: new_c.append(c) rnn_constants.append(new_c) else: if _get_dynamic_axis_num(constant) == 1: rnn_constants.append(C.sequence.broadcast_as(constant,", "ops won't be executed under this mode, that's why #", "Arguments inputs: a cntk tensor which has batch axis batch_size:", "i = 0 while dynamic_axis_index < nones: cntk_axis[i] = x.dynamic_axes[dynamic_axis_index]", "from_logits: output = C.sigmoid(output) output = C.clip(output, epsilon(), 1.0 -", "pattern, axis): base_shape = x.shape if b_any([dim < 0 for", "does not support ' 'collapse of batch axis with inferred", "name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const", "new_shape = shape[nones:] new_shape = tuple( [C.InferredDimension if _ is", "return None axis = [axis] axis = _normalize_axis(axis, tensors[0]) return", "= self.trainer.train_minibatch( input_dict, self.trainer_output) assert(len(result) == 2) outputs = result[1]", "= [0 for _ in cntk_axes] end_index = [0 for", "pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is", "i = 0 while i < len(shape): if shape[i] is", "know we want to evaluate them.from # But the assign", "assert isinstance(inputs, (list, tuple)) feed_dict = {} for tensor, value", "floatx() if seed is None: # ensure that randomness is", "permute_dimensions(kernel, (2, 0, 1)) # shape: batch, filters, output_length, input_length", "== _LEARNING_PHASE_PLACEHOLDER: _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value) else: # in current version", "_ in axis] if shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension) >", "C.stop_gradient(variables) def switch(condition, then_expression, else_expression): ndim_cond = ndim(condition) ndim_expr =", "strides for dilated convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False,", "pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): raise NotImplementedError def separable_conv2d(x, depthwise_kernel,", "i - 1) i -= 1 result = C.times(x, y,", "supported. Please do permute ' 'on static axis.' % pattern)", "list(padding[2]), [0, 0]]) else: x = _padding(x, padding[0], 1) x", "is a bug in cntk 2.1's unpack_batch implementation if hasattr(C,", "states] past_values = [] for s, p in zip(states, place_holders):", "- ndim_cond for i in range(ndim_diff): condition = expand_dims(condition) condition", "new_shape]) return C.reshape(x, new_shape) def tile(x, n): if isinstance(n, int):", "return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1 def l2_normalize(x, axis=None):", "len(padding) == 3 assert len(padding[0]) == 2 assert len(padding[1]) ==", "else expressions. ndim(condition)=' + str(ndim_cond) + ', ndim(then_expression)' '=' +", "dynamic learning phase _LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase') #", "the input of rnn has only rank %d ' 'Need", "dimension, # we can't figure out how to repeat it", "list(x.shape) new_shape.insert(index, 1) new_shape = tuple(new_shape) x = C.reshape(x, new_shape)", "check. if (self.unrelated_updates is None and (_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or", "while i >= 0: current = C.ops.slice(inputs, time_axis, i, i", "final_output = C.sequence.unpack(final_output, 0, no_mask_output=True) if num_time_step is not None", "return C.times(x, y, len(y_shape) - 1) else: return C.times(x, y)", "= C.reshape(mean, target_shape) broadcast_var = C.reshape(variant, target_shape) broadcast_gamma = C.reshape(gamma,", "_convert_string_dtype(dtype) if name is None: name = '' return C.parameter(", "dims - 1: mask = expand_dims(mask) nones = _get_dynamic_axis_num(inputs) states", "if index < nones: result._keras_shape = shape return result def", "None, C.cntk_py.Value(arguments.data()) def backward(self, state, root_gradients): return C.cntk_py.Value(root_gradients.data()) class LambdaFunc(C.ops.functions.UserFunction):", "is None or shape[i] == -1: i += 1 else:", "{'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x =", "# Will update with gather op in next release if", "== (1, 1), 'Invalid strides for dilated convolution' x =", "return x.dynamic_axes[1] == C.Axis.default_dynamic_axis() else: return False def get_num_dynamic_axis(x): return", "print_tensor(x, message=''): return C.user_function( LambdaFunc(x, when=lambda x: True, execute=lambda x:", "* gamma + beta def concatenate(tensors, axis=-1): if len(tensors) ==", "output_shape[0] shape[2] = output_shape[1] shape[3] = output_shape[2] output_shape = tuple(shape)", "binary_crossentropy(target, output, from_logits=False): if from_logits: output = C.sigmoid(output) output =", "permutation += list(range(len(y_shape) - 2)) permutation += [len(y_shape) - 1]", "output else: raise ValueError('CNTK Backend: Invalid data_format:', data_format) def resize_volumes(x,", "def function(inputs, outputs, updates=[], **kwargs): return Function(inputs, outputs, updates=updates, **kwargs)", "= output_shape[0] shape[2] = output_shape[1] shape[3] = output_shape[2] output_shape =", "elif padding == 'valid': padding = False else: raise ValueError('Invalid", "reshape on it reduce_axes = [] for a in axis:", "only evaluated in training phase. To make it work, call", "0: assert len(base_shape) == 2 if hasattr(C, 'pad'): x =", "repeat_elements(output, width_factor, axis=2) return output else: raise ValueError('CNTK Backend: Invalid", "= C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else:", "mode, that's why # we need this check. if (self.unrelated_updates", "is static if isinstance(training, int) or isinstance(training, bool): result =", "need to be fixed in GA. if n is C.InferredDimension", "str(data_format)) if padding == 'causal': # causal (dilated) convolution: left_pad", "shape.' % (str(tensor.shape), str(value.shape))) feed_dict[tensor] = value updated = []", "C.parameter( shape=shape, init=C.initializer.normal( scale=scale, seed=seed), dtype=dtype, name=name) def random_normal(shape, mean=0.0,", "shape = tuple(dynamic_shape) + shape return shape def ndim(x): shape", "' 'float64.' % dtype) def variable(value, dtype=None, name=None, constraint=None): \"\"\"Instantiates", "shape: if ndim: shape = tuple([None for _ in range(ndim)])", "supported now. ' 'Please provide fixed dimension ' 'instead of", "= C.minus(x, shift) for axis in _axes: shifted_mean = C.reduce_mean(shifted_mean,", "value: Numpy array, initial value of the tensor. dtype: Tensor", "mask = expand_dims(mask) nones = _get_dynamic_axis_num(inputs) states = tuple(initial_states) outputs", "x) begin_index = [0 for _ in cntk_axes] end_index =", "= C.reshape(const_a, shape) return C.to_batch(const_a) else: return C.user_function(ReshapeBatch(x, shape[1:])) def", "data_format ' + str(data_format)) stride_row, stride_col = strides output_row, output_col", "C.variables.Parameter) or isinstance(x, C.variables.Constant)): result.append(x.value) else: result.append(eval(x)) return result def", "placeholder( shape=None, ndim=None, dtype=None, sparse=False, name=None, dynamic_axis_num=1): if dtype is", "not None and max_value < min_value: max_value = min_value if", "- 1) beta = C.reduce_mean(beta, axis - 1) else: target_shape.append(x_shape[axis])", "detected. ' 'CNTK\\'s CPU version is not fully optimized,' 'please", "global _LEARNING_PHASE_PLACEHOLDER global _LEARNING_PHASE assert isinstance(inputs, (list, tuple)) feed_dict =", "def concatenate(tensors, axis=-1): if len(tensors) == 0: return None axis", "variance_mean = C.reduce_mean(variance_mean, axis=axis) variance = C.minus(variance_mean, C.square(shifted_mean)) mean =", "kernel_size output = x_aggregate * weight # shape: batch, filters,", "> 0 and shape[0] == -1: # collapse axis with", "nones) return x def max(x, axis=None, keepdims=False): axis = _normalize_axis(axis,", "= (bias.shape[0], 1, 1, 1) else: shape = (bias.shape[3],) +", "index = 1 - _get_dynamic_axis_num(x) if index < 0 or", "will create gradient as a constant placeholder, here use this", "/ (C.sqrt(var) + epsilon) * gamma + beta def concatenate(tensors,", "2) else: assert len(base_shape) == 4 if hasattr(C, 'pad'): x", "_ for _ in shape]) if isinstance(x, C.variables.Parameter): return C.reshape(x,", "LambdaFunc(x, when=lambda x: True, execute=lambda x: print(message))) def batch_set_value(tuples): for", "is None: max_value = np.inf if min_value is None: min_value", "ValueError('Unknown data_format ' + str(data_format)) stride = strides[0] kernel_shape =", "'Expected: None, int, (int, int), ' + 'Provided: ' +", "str(pool_mode)) return _postprocess_conv3d_output(x, data_format) def relu(x, alpha=0., max_value=None): if alpha", "return isinstance(x, (C.variables.Constant, C.variables.Variable, C.variables.Parameter, C.ops.functions.Function)) def shape(x): shape =", "C.elu(x) if alpha == 1: return res else: return C.element_select(C.greater(x,", "# in native cntk op cntk_axis = [] dynamic_axis_index =", "assert len(padding[0]) == 2 assert len(padding[1]) == 2 assert len(padding[2])", "shape=None, ndim=None, dtype=None, sparse=False, name=None, dynamic_axis_num=1): if dtype is None:", "if dims == 4: if data_format == 'channels_first': if bias_dims", "input, shape, name='reshape_with_batch'): super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name) self.from_shape = input.shape", "strides != (1, 1): raise ValueError('Invalid strides for dilated convolution')", "new_shape = tuple( [C.InferredDimension if _ is None else _", "return True def __call__(self, inputs): global _LEARNING_PHASE_PLACEHOLDER global _LEARNING_PHASE assert", "== 'float32': return np.float32 elif dtype == 'float64': return np.float64", "x) output = _reduce_on_axis(x, axis, 'reduce_prod') return _remove_dims(output, axis, keepdims)", "x = C.clip(x, 0.0, 1.0) return x def conv1d(x, kernel,", "the second axis ' 'is not static. If you want", "% argument.name) result = self.trainer.train_minibatch( input_dict, self.trainer_output) assert(len(result) == 2)", "def __init__(self, arg, when=lambda arg: True, execute=lambda arg: print(arg), name=''):", "for axis in _axes: shifted_mean = C.reduce_mean(shifted_mean, axis=axis) variance_mean =", "time_step return reshape(x, tmp_shape) def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):", "conv_dim3) # TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, #", "else: raise NotImplementedError def stop_gradient(variables): if isinstance(variables, (list, tuple)): return", "raise NotImplementedError('CNTK Backend: `go_backwards` is not supported with ' 'variable-length", "output_shape = output_shape[1:] # in keras2, need handle output shape", "dimension, ' 'which is not supported. Please give fixed '", "is None and dynamic_axis_index < nones: cntk_axis.append(x.dynamic_axes[dynamic_axis_index]) dynamic_axis_index += 1", "catched in CNTK 2.1 release. # Will update with gather", "Padding the axis if len(n) < len(shape): n = tuple([1", "axes[0] == 1 else transpose(result) else: return sum(x * transpose(y),", "= len(int_shape(x)) num_dynamic_axis = _get_dynamic_axis_num(x) if isinstance(pattern, list): current_layout =", "else () if hasattr(value, 'dtype') and value.dtype != dtype and", "NotImplementedError i = num_dynamic_axis for i, rep in enumerate(n): if", "shift) for axis in _axes: shifted_mean = C.reduce_mean(shifted_mean, axis=axis) variance_mean", "num_static_element = np.prod(np.asarray(self.target_shape)) num_batch = int(num_element / num_static_element) result =", "tensor == _LEARNING_PHASE_PLACEHOLDER: _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value) else: # in current", "mask is not None and not has_seq_axis(mask): if go_backwards: mask", "# A learning phase is a bool tensor used to", "_get_dynamic_axis_num(x) if nones > ndim: raise ValueError('CNTK Backend: tensor with", "= [] if self.trainer is not None: input_dict = {}", "is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) size", "kernel = _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = (1,)", "# cntk calculate everything in float, so don't need case", "version[:2] + version[2:].replace('.', '') try: return float(version) except: warnings.warn( 'CNTK", "= True else: uses_learning_phase = False # CNTK currently don't", "output = C.ops.element_select(mask_slice, output, prev_output) return_states = [] for s,", "a in axis: if isinstance(a, C.Axis) \\ and a !=", "1) else: assert len(base_shape) == 4 if hasattr(C, 'pad'): x", "int_shape(kernel) _, feature_dim, filters = kernel_shape xs = [] for", "C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False", "= int_shape(x) y_shape = int_shape(y) if isinstance(axes, int): axes =", "supported. ' 'CNTK only supports `eval` with ' '`Function`, `Constant`", "[strides] x = C.convolution( kernel, x, strides=tuple(strides), auto_padding=[ False, padding])", "x = C.clip(x, 0.0, max_value) if alpha != 0.: x", "(1, batch). so using the workaround # here to mapping", "dtype=dtype, name=_prepare_name(name, 'variable')) v._keras_shape = v.shape v._uses_learning_phase = False v.constraint", "is not None: new_states = [C.element_select(m, n, s) for n,", "init=C.initializer.uniform( scale, seed=seed), dtype=dtype, name=name) return variable(value=p.value + low +", "_prepare_name(name, default): prefix = '_'.join(NAME_SCOPE_STACK) if name is None or", "def bias_add(x, bias, data_format=None): if data_format is None: data_format =", "shape return result def squeeze(x, axis): if isinstance(axis, tuple): axis", "= output_values.asarray() for o in self.metrics_outputs: updated.append(v) if self.unrelated_updates is", "temporal_padding(x, (left_pad, 0)) padding = 'valid' if data_format == 'channels_last':", "C.reshape(const_a, shape) return C.to_batch(const_a) else: return C.user_function(ReshapeBatch(x, shape[1:])) def _get_cntk_version():", "pooling mode: ' + str(pool_mode)) return _postprocess_conv3d_output(x, data_format) def relu(x,", "(self.unrelated_updates is None and (_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE ==", "repeat_elements(x, depth_factor, axis=2) output = repeat_elements(output, height_factor, axis=3) output =", "else: output_values = self.metrics_func.eval(input_dict, as_numpy=False) if isinstance(output_values, dict): for o", "0, 1) return x def conv2d(x, kernel, strides=(1, 1), padding='valid',", "1)) return x def _preprocess_conv2d_kernel(kernel, data_format): # As of Keras", "= C.square(C.minus(x, shift)) for axis in _axes: variance_mean = C.reduce_mean(variance_mean,", "x) output = _reduce_on_axis(x, axis, 'reduce_min') return _remove_dims(output, axis, keepdims)", "slice_length = slice(i * stride, i * stride + kernel_size[0])", "kernel_shape = int_shape(kernel) _, feature_dim, filters = kernel_shape xs =", "1) i += 1 i = normalized_axis[1] while i >", "is None: dtype = floatx() for _ in shape: if", "= C.splice(*tmp, axis=i - num_dynamic_axis) i += 1 return x", "in _axes: shift = C.reduce_mean(shift, axis=axis) shift = C.stop_gradient(shift) shifted_mean", "by the Numpy RNG seed = np.random.randint(10e7) if dtype is", "= value.asarray() updated.append(v) else: v = output_values.asarray() for o in", "None: mask_shape = int_shape(mask) if len(mask_shape) == dims - 1:", "ctype = _convert_string_dtype(dtype) return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name) def ones(shape,", "f_stats = [] for l_s, i_s in zip(last_states, initial_states): if", "not found in inputs. Please double ' 'check the model", "2) outputs = result[1] for o in self.trainer_output: updated.append(outputs[o]) if", "x = temporal_padding(x, (left_pad, 0)) padding = 'valid' if data_format", "else: cntk_axis.append(i - dynamic_axis_index) if dynamic_axis_index < nones: i =", "_reshape_dummy_dim(beta, [0]) return (x - mean) / (C.sqrt(var) + epsilon)", "== 0: return x nones = _get_dynamic_axis_num(x) for _ in", "channel dimension, # instead of the 2nd one. # TH", "norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0])) return x / norm def hard_sigmoid(x):", "= normalized_axis[1] while i > 0: y = C.swapaxes(y, i,", "current, tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase', False): uses_learning_phase =", "list(padding[1])]) else: x = _padding(x, padding[0], 2) x = _padding(x,", "phase tensor. _LEARNING_PHASE = -1 _UID_PREFIXES = defaultdict(int) # cntk", "maximum(x, y): return C.element_max(x, y) def minimum(x, y): return C.element_min(x,", "dims = len(int_shape(x)) num_dynamic_axis = _get_dynamic_axis_num(x) if isinstance(pattern, list): current_layout", "output_shape[3] shape[1] = output_shape[0] shape[2] = output_shape[1] shape[3] = output_shape[2]", "keep_dims: mean = squeeze(mean, _axes) variance = squeeze(variance, _axes) return", "feature # to make the recurrent layer work. # need", "if len(update) != 2: raise NotImplementedError else: u = C.assign(update[0],", "self.target_shape = shape def infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return [", "CNTK static axis. We may introduce this operation in CNTK", "list(int_shape(x)) num_dynamic = _get_dynamic_axis_num(x) non_dyn_shape = [] for i in", "TF input shape: (samples, rows, cols, input_depth) x = C.transpose(x,", "np.prod(np.asarray(self.target_shape)) num_batch = int(num_element / num_static_element) result = arguments.data().as_shape((num_batch,) +", "raise ValueError('CNTK backend: metrics argument %s ' 'is not found", "= [] x_shape = int_shape(x) # skip the batch axis", "auto_padding=[False, padding, padding], groups=x.shape[0]) return _postprocess_conv2d_output(x, data_format) def conv3d(x, kernel,", "kernel = C.transpose(kernel, (3, 2, 0, 1)) return kernel def", "variable after an optimizer update. # Returns A variable instance", "outputs, updates=[], **kwargs): return Function(inputs, outputs, updates=updates, **kwargs) def temporal_padding(x,", "3, 1)) return output def reverse(x, axes): if isinstance(axes, int):", "after an optimizer update. # Returns A variable instance (with", "1: gamma = _reshape_dummy_dim(gamma, [0]) if beta is None: beta", "> 1: raise NotImplementedError new_shape = list(x.shape) new_shape.insert(index, 1) new_shape", "raise ValueError('Unknown data_format ' + str(data_format)) if padding == 'causal':", "== 2: result = squeeze(result, -1) return result def transpose(x):", "axis=3) return output else: raise ValueError('CNTK Backend: Invalid data_format:', data_format)", "def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if", "0: return C.reduce_sum(any_matrix) else: return any_matrix def all(x, axis=None, keepdims=False):", "/ num_static_element) return C.cntk_py.Value( grad_array_view.as_shape( (num_old_batch,) + self.from_shape)) class ConvertToBatch(C.ops.functions.UserFunction):", "list): for a in axis: if isinstance(a, C.Axis) \\ and", "x = _padding(x, padding[2], 3) return x def one_hot(indices, num_classes):", "if data_format == 'channels_last': x = C.swapaxes(x, 0, 1) return", "if isinstance(a, C.Axis) is False: reduce_axes.append(a) return _reshape_dummy_dim(x, reduce_axes) else:", "if len(y_shape) == 2: y = expand_dims(y) normalized_axis = []", "axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_prod') return", "for index in sorted(_axis, reverse=True): result = C.reshape(result, shape=(), begin_axis=index,", "height_factor, axis=3) output = repeat_elements(output, width_factor, axis=4) return output elif", "return prefix + '/' + name def constant(value, dtype=None, shape=None,", "norm def hard_sigmoid(x): x = (0.2 * x) + 0.5", "u = update if len(u.arguments) == 0: u_ops.append(u) else: unrelated_updates.append(u)", "C.dropout(x, level) def batch_flatten(x): # cntk's batch axis is not", "dtype = np.float32 else: dtype = _convert_string_dtype(dtype) size = 1", "from_logits=False): if from_logits: output = C.sigmoid(output) output = C.clip(output, epsilon(),", "'shape') and hasattr(placeholder, 'shape'): num_dynamic = get_num_dynamic_axis(placeholder) input_shape = input.shape[num_dynamic:]", "with cntk cond op. if callable(x) and isinstance(x, C.cntk_py.Function) is", "grad_parameter_dict[g] = v return grads def equal(x, y): return C.equal(x,", "C.classification_error(predictions, _targets, topN=k) return 1 - C.reshape(result, shape=()) def conv2d_transpose(x,", "axis ' 'is not static. If you want to run", "the correct axis. Will remove this tricky after we add", "1 return x def _normalize_axis(axis, x): shape = int_shape(x) ndim", "isinstance(training, bool): result = x if training == 1 or", "flatten(x): return reshape(x, (-1,)) def reshape(x, shape): shape = tuple([C.InferredDimension", "i < shape[axis]: tmp = C.ops.slice(x, axis, i, i +", "kernel_shape xs = [] for i in range(output_length): slice_length =", "_get_dynamic_axis_num(x) if num_dynamic_axis == 1 and len(shape) > 0 and", "x) output = C.ops.argmax(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def argmin(x,", "# either train mode (learning_phase == 1) or test mode", "if hasattr(x, 'dynamic_axes'): return len(x.dynamic_axes) else: return 0 def _contain_seqence_axis(x):", "_ in unrelated_updates]) if self.trainer is None: self.metrics_outputs = [f.output", "while i < len(shape): if shape[i] is None or shape[i]", "0]]) else: x = _padding(x, padding[0], 1) x = _padding(x,", "shape[i] is None or shape[i] == -1: i += 1", "= bias.shape elif dims == 3: if data_format == 'channels_first':", "shape(x): shape = list(int_shape(x)) num_dynamic = _get_dynamic_axis_num(x) non_dyn_shape = []", "for _ in sorted(_axis, reverse=True): del shape[_] new_shape = shape[nones:]", "either train mode (learning_phase == 1) or test mode (learning_phase", "ndim(mean) == ndim(x) and shape(mean)[0] == 1: mean = _reshape_dummy_dim(mean,", "zeros_like(x) + 1 def count_params(x): for _ in x.shape: if", "del shape[index] shape = [C.InferredDimension if _ == C.FreeDimension else", "_LEARNING_PHASE assert isinstance(inputs, (list, tuple)) feed_dict = {} for tensor,", "list): new_c = [] for c in constant: if _get_dynamic_axis_num(c)", "None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) return C.parameter(", "raise ValueError('CNTK backend: `count_params` with dynamic ' 'shape is not", "zip(last_states, initial_states): if _get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s) == 1:", "int): axes = [axes] cntk_axes = _normalize_axis(axes, x) begin_index =", "temp = [x] * n return C.splice(*temp, axis=index) def tanh(x):", "== 1: gamma = _reshape_dummy_dim(gamma, [0]) if beta is None:", "kernel, x, strides, auto_padding=[ False, padding, padding]) else: assert dilation_rate[0]", "# TH input shape: (samples, input_depth, rows, cols) # TF", "i, i + 1) mask_slice = squeeze(mask_slice, time_axis) if len(outputs)", "is not 0 or 1, return dynamic learning phase tensor", "class probas of each sample sum to 1 output /=", "_preprocess_conv3d_input(x, data_format): if data_format == 'channels_last': # TF uses the", "pattern): dims = len(int_shape(x)) num_dynamic_axis = _get_dynamic_axis_num(x) if isinstance(pattern, list):", "'': return prefix + '/' + default return prefix +", "found in inputs. Please double ' 'check the model and", "* C.log(output), axis=-1) def sparse_categorical_crossentropy(target, output, from_logits=False): target = C.one_hot(target,", "for o in self.metrics_outputs: value = output_values[o] v = value.asarray()", "axis. We may introduce this operation in CNTK native implementation", "== '': return prefix + '/' + default return prefix", "len(outputs): # add the time_step axis back output_slice = expand_dims(outputs[i],", "const._keras_shape = const.shape const._uses_learning_phase = False return const def random_binomial(shape,", "return C.greater(x, y) def greater_equal(x, y): return C.greater_equal(x, y) def", "i in range(ndim): if shape[i] is None and dynamic_axis_index <", "value = value.astype(dtype) # TODO: remove the conversion when cntk", "normalized_axis.append(_normalize_axis(axes[0], x)[0]) normalized_axis.append(_normalize_axis(axes[1], y)[0]) # transpose i = normalized_axis[0] while", "feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError( 'CNTK backend: argument", "shape[:num_dynamic] + non_dyn_shape def is_sparse(tensor): return tensor.is_sparse def int_shape(x): if", "ndim(beta) == ndim(x) and shape(beta)[0] == 1: beta = _reshape_dummy_dim(beta,", "max(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x,", "to get better performance.') # A learning phase is a", "v._keras_shape = v.shape v._uses_learning_phase = False v.constraint = constraint return", "self.placeholders = inputs self.trainer = None self.unrelated_updates = None self.updates", "i -= 1 result = C.times(x, y, output_rank=(len(y.shape) - 1)", "= getattr(C.sequence, reduce_fun_name)(x, a) else: x = getattr(C, reduce_fun_name)(x, a)", "if len(tensors) == 0: return None axis = [axis] axis", "x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) x = C.convolution(pointwise_kernel, x,", "_normalize_axis(axis, x) norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0])) return x / norm", "input with variable # length. Will support it in next", "A variable instance (with Keras metadata included). \"\"\" if dtype", "is None: dtype = floatx() if shape is None: shape", "with keras model, # we will create gradient as a", "padding the shape if num_dynamic_axis >= len(shape): i = 0", "def _preprocess_conv2d_input(x, data_format): if data_format == 'channels_last': # TF uses", "hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])])", "if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER def set_learning_phase(value): global", "len(y_shape) - 1) else: return C.times(x, y) def batch_dot(x, y,", "ValueError('Unknown data_format ' + str(data_format)) dims = len(x.shape) if dims", "= _convert_string_dtype(dtype) if name is None: name = '' return", "TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3) # TF", "variable instance (with Keras metadata included). \"\"\" if dtype is", "' 'shape `%s` contains non-specified dimension, ' 'which is not", "dtype=None, name=None): if dtype is None: dtype = floatx() return", "return mean, variance def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):", "self.metrics_func.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise", "i = 0 while i < shape[axis]: tmp = C.ops.slice(x,", "import absolute_import from __future__ import division from __future__ import print_function", "= axis if axis >= 0 else len(shape) + 1", "contextmanager import warnings C.set_global_option('align_axis', 1) b_any = any dev =", "return_states outputs.append(output) states = new_states i -= 1 else: i", "strides = [strides] x = C.convolution( kernel, x, strides=tuple(strides), auto_padding=[", "and ' 'float64.' % dtype) def variable(value, dtype=None, name=None, constraint=None):", "_ in cntk_axes] end_index = [0 for _ in cntk_axes]", "sum(x * transpose(y), axis=axes[0], keepdims=True) else: if len(y_shape) == 2:", "1: raise NotImplementedError new_shape = list(x.shape) new_shape.insert(index, 1) new_shape =", "isinstance(x, C.variables.Constant)): if isinstance(value, (float, int)): value = np.full(x.shape, value,", "NotImplementedError def stop_gradient(variables): if isinstance(variables, (list, tuple)): return map(C.stop_gradient, variables)", "shape x._uses_learning_phase = False x._cntk_placeholder = True return x def", "self.inputs[0].dtype, [])] def forward(self, arguments, device=None, outputs_to_retain=None): return None, C.cntk_py.Value(arguments.data())", "dilation_rate[1] assert strides == (1, 1), 'Invalid strides for dilated", "do permute ' 'on static axis.' % pattern) axis =", "return shape[:num_dynamic] + non_dyn_shape def is_sparse(tensor): return tensor.is_sparse def int_shape(x):", "default, so don't need reshape on it reduce_axes = []", "rep x = C.splice(*tmp, axis=i - num_dynamic_axis) i += 1", "isinstance(x, C.variables.Parameter): return C.reshape(x, shape) else: num_dynamic_axis = _get_dynamic_axis_num(x) if", "_ in range(rep): slices.append(tmp) i += 1 return C.splice(*slices, axis=axis)", "return x def dropout(x, level, noise_shape=None, seed=None): if level <", "dropout level %s, ' 'must be in interval [0, 1].'", "It may have # perf issue, will resolve it later", "phase _LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase') # static learning", "it. # Arguments value: Numpy array, initial value of the", "cntk_axis[_axis[i]] else: if _axis is None: _axis = C.Axis.all_axes() return", "an extra batch axis with 1, it is not needed", "= x.shape if pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis]", "name is None: name = '' return C.parameter( shape=shape, init=C.initializer.normal(", "the gradient during training. global grad_parameter_dict if isinstance(variables, list) is", "mapping from grad placeholder to parameter grad_parameter_dict = {} NAME_SCOPE_STACK", "training=None): return in_train_phase(alt, x, training=training) def _convert_string_dtype(dtype): # cntk only", "workaround now if seed is None: # ensure that randomness", "# \"forward\" method to let cntk know we want to", "axis=-1), argmax( target, axis=-1)), axis=C.Axis.all_axes()) def argmax(x, axis=-1): axis =", "def clip(x, min_value, max_value): if max_value is not None and", "C.transpose(y, perm=permutation) return C.times(x, y, len(y_shape) - 1) else: return", "input shape is `%s`. Currently ' 'CNTK can not take", "backend. \"\"\" global _LEARNING_PHASE global _LEARNING_PHASE_PLACEHOLDER _LEARNING_PHASE = -1 _LEARNING_PHASE_PLACEHOLDER.value", "and shape[0] == -1: # collapse axis with batch axis", "axis, 'reduce_max') return _remove_dims(output, axis, keepdims) def min(x, axis=None, keepdims=False):", "data_format ' + str(data_format)) if padding == 'causal': # causal", "= int_shape(y) if isinstance(axes, int): axes = (axes, axes) if", "(_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE == 1)): _, output_values =", "input.shape self.target_shape = shape def infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return", "output_shape[1:] # in keras2, need handle output shape in different", "= C.to_sequence(rnn_inputs) rnn_constants = [] for constant in constants: if", "None: dtype = floatx() if name is None: name =", "name of this node. \"\"\" def __init__(self, input, batch_size, name='convert_to_static'):", "phase flag, if it is not 0 or 1, we", "C.splice(*slices, axis=axis) def repeat(x, n): # this is a workaround", "_normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_mean') return _remove_dims(output, axis,", "padding, padding], groups=x.shape[0]) x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1),", "gamma is None: if beta is None: gamma = ones_like(x)", "dtype is None: dtype = floatx() if name is None:", "False, padding, padding]) else: assert dilation_rate[0] == dilation_rate[1] assert strides", "= uses_learning_phase return result def in_test_phase(x, alt, training=None): return in_train_phase(alt,", "C.Axis): has_seq = True break if has_seq: nones = _get_dynamic_axis_num(x)", "# independently of `data_format`. # CNTK expects `(depth, input_depth, rows,", "all(x, axis=None, keepdims=False): reduce_result = prod(x, axis, keepdims=keepdims) all_matrix =", "a symbolic tensor instance.') return hasattr(x, '_keras_history') def is_tensor(x): return", "`eval` with ' '`Function`, `Constant` or ' '`Parameter`.' % type(x))", "moving_average_update(variable, value, momentum): return C.assign(variable, variable * momentum + value", "while dynamic_axis_index < nones: cntk_axis[i] = x.dynamic_axes[dynamic_axis_index] i += 1", "keepdims) def any(x, axis=None, keepdims=False): reduce_result = sum(x, axis, keepdims=keepdims)", "isinstance(alt, C.cntk_py.Function) is False: alt = alt() if training is", "output = _reduce_on_axis(x, axis, 'reduce_prod') return _remove_dims(output, axis, keepdims) def", "if i != p and p != C.InferredDimension and p", "rnn_constants.append(new_c) else: if _get_dynamic_axis_num(constant) == 1: rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs)) else: rnn_constants.append(constant)", "argmax(x, axis=-1): axis = [axis] axis = _normalize_axis(axis, x) output", "return x def _get_dynamic_axis_num(x): if hasattr(x, 'dynamic_axes'): return len(x.dynamic_axes) else:", "if gamma is None: if beta is None: gamma =", "`(depth, input_depth, rows, cols)`. kernel = C.transpose(kernel, (3, 2, 0,", "= np.inf if min_value is None: min_value = -np.inf return", "list): for i, a in enumerate(_axis): if a is not", "'reduce_min') return _remove_dims(output, axis, keepdims) def sum(x, axis=None, keepdims=False): axis", "= (bias.shape[0], 1, 1) else: shape = (bias.shape[2],) + bias.shape[:2]", "group update by gradient place holder u_ops = [] unrelated_updates", "for i, p in zip(input_shape, placeholder_shape): if i != p", "else: assert len(base_shape) == 3 if hasattr(C, 'pad'): x =", "take cntk broadcast feature # to make the recurrent layer", "elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1: gamma =", "isinstance( value, C.variables.Constant) or isinstance( value, C.variables.Parameter): value = value.value", "%d dimensions' % (bias_dims, dims)) if dims == 4: if", "last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, f_stats def has_seq_axis(x): return", "data_format) kernel = _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides =", "else 1 if go_backwards: i = shape[1] - 1 while", "time_axis) if len(outputs) == 0: prev_output = zeros_like(output) else: prev_output", "return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed) def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None,", "== 0). # LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning", "(-1, 1) + depthwise_kernel.shape[2:]) pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format) padding =", "Invalid data_format:', data_format) def repeat_elements(x, rep, axis): axis = _normalize_axis(axis,", "max_value): if max_value is not None and max_value < min_value:", "2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format)", "C.FreeDimension for _ in x.shape): warnings.warn( 'Warning: CNTK backend does", "ndim_diff = ndim_expr - ndim_cond for i in range(ndim_diff): condition", "for i, rep in enumerate(n): if i >= num_dynamic_axis and", "axis=axis[0]) return _reshape_dummy_dim(output, axis) def square(x): return C.square(x) def abs(x):", "inferred dimension, # we can't figure out how to repeat", "not supported now. ' 'Please provide fixed dimension ' 'instead", "= _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)),", "auto_padding=[ False, padding, padding, padding], output_shape=output_shape) return _postprocess_conv3d_output(x, data_format) def", "+ num_dynamic]) return shape[:num_dynamic] + non_dyn_shape def is_sparse(tensor): return tensor.is_sparse", "= x.dynamic_axes[dynamic_axis_index] i += 1 dynamic_axis_index += 1 while i", "time_axis = 1 - nones if nones > 0 else", "could handle loss and 1 metric in trainer, for metrics", "C.pow(x, a) def clip(x, min_value, max_value): if max_value is not", "mask_slice, n_s, s)) new_states = return_states outputs.append(output) states = new_states[:len(states)]", "strides=(1, 1, 1), padding='valid', data_format=None): if data_format is None: data_format", "dtype=None, seed=None): if seed is None: seed = np.random.randint(1, 10e6)", "None: _axis = C.Axis.all_axes() return _axis def _reshape_dummy_dim(x, axis): shape", "initial.append(C.to_batch(s)) else: initial.append(C.user_function(ConvertToBatch(s))) else: initial.append(s) need_convert = not has_seq_axis(inputs) if", "NotImplementedError def print_tensor(x, message=''): return C.user_function( LambdaFunc(x, when=lambda x: True,", "processed by broadcast # so it may have an extra", "def minimum(x, y): return C.element_min(x, y) def sin(x): return C.sin(x)", "for _ in shape] return C.reshape(x, shape) def mean(x, axis=None,", "= C.reduce_mean(variance_mean, axis=axis) variance = C.minus(variance_mean, C.square(shifted_mean)) mean = C.plus(shifted_mean,", "' 'Expected a symbolic tensor instance.') return hasattr(x, '_keras_history') def", "else: return any_matrix def all(x, axis=None, keepdims=False): reduce_result = prod(x,", "and p != C.FreeDimension: return False return True def __call__(self,", "C.log(output), axis=-1) def sparse_categorical_crossentropy(target, output, from_logits=False): target = C.one_hot(target, output.shape[-1])", "rnn_constants = constants if mask is not None and not", "int(num_element / num_static_element) result = arguments.data().as_shape((num_batch,) + self.target_shape) return None,", "image_dim_ordering, image_data_format from collections import defaultdict from contextlib import contextmanager", "output = repeat_elements(x, height_factor, axis=1) output = repeat_elements(output, width_factor, axis=2)", "from_logits) class Function(object): def __init__(self, inputs, outputs, updates=[], **kwargs): self.placeholders", "'CNTK only supports `eval` with ' '`Function`, `Constant` or '", "= C.stop_gradient(shift) shifted_mean = C.minus(x, shift) for axis in _axes:", "'rnn with non-static axis, please try ' 'dynamic rnn with", "x def dropout(x, level, noise_shape=None, seed=None): if level < 0.", "'reduce_sum') return _remove_dims(output, axis, keepdims) def prod(x, axis=None, keepdims=False): axis", "else: num_dynamic_axis = _get_dynamic_axis_num(x) if num_dynamic_axis == 1 and len(shape)", "if len(_axis) == 0: return x nones = _get_dynamic_axis_num(x) for", "' 'expected 0 or 1.' % value) _LEARNING_PHASE = value", "reshape(x, tmp_shape) def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): if data_format", "0: self.unrelated_updates = C.combine([_.output for _ in unrelated_updates]) if self.trainer", "broadcasting. for axis in _axes: shift = C.reduce_mean(shift, axis=axis) shift", "padding[2], 3) return x def one_hot(indices, num_classes): return C.one_hot(indices, num_classes)", "== 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[(0, 0),", "axis = _normalize_axis(axis, x) output = C.ops.argmin(x, axis=axis[0]) return _reshape_dummy_dim(output,", "low, high, dtype=None, name=None, seed=None): if dtype is None: dtype", "= None self.unrelated_updates = None self.updates = updates if len(updates)", "'which is not supported. Please give fixed ' 'dimension to", "keepdims=False): reduce_result = prod(x, axis, keepdims=keepdims) all_matrix = C.element_select( reduce_result,", "< 0 for dim in base_shape]): raise ValueError('CNTK Backend: padding", "_reshape_batch(x, shape): # there is a bug in cntk 2.1's", "_reduce_on_axis(x, axis, reduce_fun_name): if isinstance(axis, list): for a in axis:", "kernel to put filters first weight = permute_dimensions(kernel, (2, 0,", "= output_shape[0] shape[2] = output_shape[1] output_shape = tuple(shape) x =", "0: return x nones = _get_dynamic_axis_num(x) for _ in sorted(_axis,", "only supports float32 and ' 'float64.' % dtype) def variable(value,", "dilated convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding],", "broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normalized, mean, variant def", "input_depth, rows, cols)`. kernel = C.transpose(kernel, (3, 2, 0, 1))", "sum(output, axis=3) # Shape: (batch, output_length, filters) return permute_dimensions(output, (0,", "count_params(x): for _ in x.shape: if _ == C.InferredDimension or", "x = C.pad(x, pattern=[padding, (0, 0)]) else: x = _padding(x,", "self.updates = updates if len(updates) > 0: assert len(outputs) >", "to hook up with keras model # we will return", "def _preprocess_conv3d_input(x, data_format): if data_format == 'channels_last': # TF uses", "if _ == C.FreeDimension else _ for _ in shape])", "_normalize_axis(axis, x): shape = int_shape(x) ndim = len(shape) nones =", "'Please provide fixed dimension ' 'instead of `None`.') return random_uniform_variable(shape,", "base_shape = x.shape if b_any([dim < 0 for dim in", "output shape in different format if data_format == 'channels_last': shape", "squeeze(x, axis): if isinstance(axis, tuple): axis = list(axis) if not", "pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0],", "_ for _ in new_shape]) return C.reshape(x, new_shape) def tile(x,", "if dtype is None: dtype = floatx() for _ in", "dtype=dtype) def dtype(x): return _convert_dtype_string(x.dtype) def zeros(shape, dtype=None, name=None): if", "global _LEARNING_PHASE global _LEARNING_PHASE_PLACEHOLDER _LEARNING_PHASE = -1 _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0)", "x, alt) result._uses_learning_phase = uses_learning_phase return result def in_test_phase(x, alt,", "# cntk only support calculate on float, do auto cast", "C.Axis.default_batch_axis() return [ C.output_variable( self.inputs[0].shape[1:], self.inputs[0].dtype, [batch_axis])] def forward(self, arguments,", "x) + 0.5 x = C.clip(x, 0.0, 1.0) return x", "_axis = list(axis) elif isinstance(axis, int): _axis = [axis] elif", "return C.softplus(x) def softsign(x): return x / (1 + C.abs(x))", "# this is a workaround for recurrent layer # if", "last_output = C.sequence.last(final_output) last_states = [C.sequence.last(s) for s in final_states]", "Will update with gather op in next release if _get_cntk_version()", "unroll: return _static_rnn( step_function, inputs, initial_states, go_backwards, mask, constants, unroll,", "y_shape = int_shape(y) if isinstance(axes, int): axes = (axes, axes)", "int_shape(x): if hasattr(x, '_keras_shape'): return x._keras_shape shape = x.shape if", "s, n_s in zip(states, new_states): return_states.append( C.ops.element_select( mask_slice, n_s, s))", "parameters in the model. ' 'Please double check how the", "in final_states] if need_convert: final_output = C.sequence.unpack(final_output, 0, no_mask_output=True) if", "dtype = _convert_string_dtype(dtype) size = 1 for _ in shape:", "padding[0], 2) x = _padding(x, padding[1], 3) x = _padding(x,", "const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase", "If you want to run ' 'rnn with non-static axis,", "_preprocess_conv2d_input(x, data_format): if data_format == 'channels_last': # TF uses the", "1 def l2_normalize(x, axis=None): axis = [axis] axis = _normalize_axis(axis,", "bias.shape return x + reshape(bias, shape) def eval(x): if isinstance(x,", "is the placeholder for dynamic learning phase _LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(),", "dtype == np.float32: return 'float32' elif dtype == np.float64: return", "momentum + value * (1. - momentum)) def update_add(x, increment):", "+ 0.5 x = C.clip(x, 0.0, 1.0) return x def", "= _reduce_on_axis(x, axis, 'reduce_mean') return _remove_dims(output, axis, keepdims) def any(x,", "return C.round(x) def sigmoid(x): return C.sigmoid(x) def sign(x): return x", "def l2_normalize(x, axis=None): axis = [axis] axis = _normalize_axis(axis, x)", "return C.cntk_py.Value(root_gradients.data()) class LambdaFunc(C.ops.functions.UserFunction): def __init__(self, arg, when=lambda arg: True,", "= defaultdict(int) # cntk doesn't support gradient as symbolic op,", "C.parameter( shape, init=C.initializer.truncated_normal( stddev, seed=seed), dtype=dtype) def dtype(x): return _convert_dtype_string(x.dtype)", "go with dynamic learning phase tensor. _LEARNING_PHASE = -1 _UID_PREFIXES", "backend: `count_params` with dynamic ' 'shape is not supported. Please", "zip(new_states, past_values)] n_s = [] for o, p in zip(new_states,", "shape: (samples, conv_dim1, conv_dim2, conv_dim3, # input_depth) x = C.transpose(x,", "if _get_cntk_version() >= 2.2: return C.ops.gather(reference, indices) else: num_classes =", "shape(var)[0] == 1: var = _reshape_dummy_dim(var, [0]) if gamma is", "or 1, return dynamic learning phase tensor return _LEARNING_PHASE if", "a workaround for recurrent layer # if n is inferred", "== 1: shape = (bias.shape[0], 1, 1, 1) else: shape", "need_convert: final_output = C.sequence.unpack(final_output, 0, no_mask_output=True) if num_time_step is not", "shape, # so just flatten all the dim in x.shape", "hasattr(C, 'unpack_batch'): f_stats.append(C.unpack_batch(l_s)) else: f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0]))) else: f_stats.append(l_s) last_output._uses_learning_phase =", "= 1 - nones if nones > 0 else 1", "symbolic op, so eval it first as # workaround if", "_get_cntk_version() >= 2.2 else C.InferredDimension cntk_shape = [dynamic_dimension if s", "data_format) padding = _preprocess_border_mode(padding) strides = (1,) + strides #", "axis=None, keepdims=False): return C.sqrt(var(x, axis=axis, keepdims=keepdims)) def expand_dims(x, axis=-1): shape", "x = C.pooling( x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding]) elif pool_mode", "epsilon=1e-3): # The mean / var / beta / gamma", "= new_shape[num_dynamic_axis:] new_shape = [C.InferredDimension if _ is None else", "1)): _, output_values = self.metrics_func.forward( input_dict, self.metrics_func.outputs, (self.metrics_func.outputs[0],), as_numpy=False) else:", "n_s.append(o.replace_placeholders({p: o.output})) if len(n_s) > 0: new_output = n_s[0] return", "argument in self.unrelated_updates.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument]", "result = C.element_select(training, x, alt) result._uses_learning_phase = uses_learning_phase return result", "state, root_gradients): grad_array_view = root_gradients.data() num_element = root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape))", "col output = reshape(output, (-1, filters, output_row, output_col)) if data_format", "= _padding(x, padding[1], 2) x = _padding(x, padding[2], 3) return", "row, col output = reshape(output, (-1, filters, output_row, output_col)) if", "# cntk only running with float, # try to cast", "'float64' else: raise ValueError('CNTK Backend: Unsupported dtype: %s. ' 'CNTK", "= shape return result def squeeze(x, axis): if isinstance(axis, tuple):", "len(mask_shape) == dims - 1: mask = expand_dims(mask) nones =", "if data_format == 'channels_first': xs.append(reshape(inputs[:, :, slice_row, slice_col], (-1, 1,", "arguments.data().as_shape((num_batch,) + self.target_shape) return None, C.cntk_py.Value(result) def backward(self, state, root_gradients):", "supported. Please provide ' 'fixed dimension instead of `None`.') return", "# create place holder place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in", "data_format:', data_format) def repeat_elements(x, rep, axis): axis = _normalize_axis(axis, x)", "0], [0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x,", "be applied during \"eval\" in cntk. # They only evaluated", "name='convert_to_static'): super(ConvertToStatic, self).__init__([input], as_numpy=False, name=name) self.target_shape = (batch_size,) + input.shape", "def cast(x, dtype): # cntk calculate everything in float, so", "switch(condition, then_expression, else_expression): ndim_cond = ndim(condition) ndim_expr = ndim(then_expression) if", "def _prepare_name(name, default): prefix = '_'.join(NAME_SCOPE_STACK) if name is None", "# how to apply mean and stddev return random_normal_variable(shape=shape, mean=mean,", "_padding(x, padding[1], 3) x = _padding(x, padding[2], 4) else: if", "unrelated_updates.append(u) update_func = C.combine([u.output for u in u_ops]) grads =", "return C.sigmoid(x) def sign(x): return x / C.abs(x) def pow(x,", "< len(cntk_axis): cntk_axis[i] -= nones i += 1 if isinstance(axis,", "2 assert len(padding[2]) == 2 if data_format is None: data_format", "pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1])]) else: x = _padding(x,", "return C.slice(x, cntk_axes, begin_index, end_index, strides) def _reshape_batch(x, shape): #", "1 else ( outputs[0], ) self.trainer = C.trainer.Trainer( outputs[0], criterion,", "def in_test_phase(x, alt, training=None): return in_train_phase(alt, x, training=training) def _convert_string_dtype(dtype):", "tuple([i for i in range(dims)]) if num_dynamic_axis > 0 and", "_get_dynamic_axis_num(x) base_shape = x.shape if num_dynamic_axis > 0: assert len(base_shape)", "of static rnn ' 'has shape `%s`, the second axis", "round(x): return C.round(x) def sigmoid(x): return C.sigmoid(x) def sign(x): return", "i in range(len(x.shape)): if shape[i + num_dynamic] is None: non_dyn_shape.append(x.shape[i])", "False: alt = alt() if training is True: x._uses_learning_phase =", "level >= 1: raise ValueError('CNTK Backend: Invalid dropout level %s,", "may introduce this operation in CNTK native implementation later. #", "in unrelated_updates]) if self.trainer is None: self.metrics_outputs = [f.output for", "strides=tuple(strides), auto_padding=[ False, padding]) if data_format == 'channels_last': x =", "> ndim_expr: raise ValueError('Rank of condition should be less' '", "C.ops.slice(mask, time_axis, i, i + 1) mask_slice = squeeze(mask_slice, 1)", "shape[0] = output_shape[3] shape[1] = output_shape[0] shape[2] = output_shape[1] shape[3]", "in x.shape) or b_any( _ == C.FreeDimension for _ in", "rnn_inputs = reverse(rnn_inputs, 1) rnn_inputs = C.to_sequence(rnn_inputs) rnn_constants = []", "kernel, kernel_size, strides, data_format=None): if data_format is None: data_format =", "if max_value is not None: x = C.clip(x, 0.0, max_value)", "model # we will return a constant as place holder,", "is False: raise NotImplementedError('CNTK Backend: `go_backwards` is not supported with", "axis to CNTK batch axis. We may introduce this operation", "x) output = _reduce_on_axis(x, axis, 'reduce_mean') return _remove_dims(output, axis, keepdims)", "is None: beta = zeros_like(x) else: beta = zeros_like(gamma) mean,", "axes = [len(x_shape) - 1, len(y_shape) - 2] if b_any([isinstance(a,", "(self.metrics_func.outputs[0],), as_numpy=False) else: output_values = self.metrics_func.eval(input_dict, as_numpy=False) if isinstance(output_values, dict):", "1: if hasattr(C, 'unpack_batch'): f_stats.append(C.unpack_batch(l_s)) else: f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0]))) else: f_stats.append(l_s)", "data_format) def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)):", "not None: input_dict = {} for argument in self.unrelated_updates.arguments: if", "np.float32 else: dtype = _convert_string_dtype(dtype) return C.parameter( shape, init=C.initializer.truncated_normal( stddev,", "else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend: non-square dilation_rate", "bias.shape[0]) else: shape = bias.shape elif dims == 3: if", "grads = update_func.find_all_with_name('keras_grad_placeholder') u_list = [] p_list = [] for", "isinstance(value, (float, int)): value = np.full(x.shape, value, dtype=floatx()) x.value =", "shape) if constants is None: constants = [] if mask", "root_gradients): return C.cntk_py.Value(root_gradients.data()) class ConvertToStatic(C.ops.functions.UserFunction): \"\"\"Converts input first axis to", "mean=mean, scale=1.0, seed=seed) def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if", "concatenate(xs, axis=1) # transpose kernel to put filters first weight", "mean, variant def _moments(x, axes=None, shift=None, keep_dims=False): _axes = tuple(axes)", "_convert_string_dtype(dtype): # cntk only support float32 and float64 if dtype", "1) if len(y.shape) > 1 else 1) if len(y_shape) ==", "and a != C.Axis.default_batch_axis() \\ and hasattr(C.sequence, reduce_fun_name): x =", "mean and stddev return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed) def truncated_normal(shape,", "1 metric in trainer, for metrics more # than 2,", "= root_gradients.data() num_element = root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape)) num_static_element = np.prod(np.asarray(self.from_shape))", "padding[0], 2) x = _padding(x, padding[1], 3) else: if num_dynamic_axis", "list(padding[0]), list(padding[1]), [0, 0]]) else: x = _padding(x, padding[0], 1)", "filters = kernel_shape xs = [] for i in range(output_length):", "= x + increment return C.assign(x, result) def gradients(loss, variables):", "0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0], 1)", "axis, 'reduce_sum') return _remove_dims(output, axis, keepdims) def prod(x, axis=None, keepdims=False):", "gamma, epsilon) else: # need broadcasting target_shape = [] x_shape", "[ C.output_variable( self.inputs[0].shape[1:], self.inputs[0].dtype, [batch_axis])] def forward(self, arguments, device=None, outputs_to_retain=None):", "_ == C.FreeDimension: raise ValueError('CNTK backend: `count_params` with dynamic '", "_preprocess_border_mode(padding) strides = [strides] x = C.convolution( kernel, x, strides=tuple(strides),", "- i)]) + shape new_shape = list(shape) new_shape = new_shape[num_dynamic_axis:]", "output elif data_format == 'channels_last': output = repeat_elements(x, height_factor, axis=1)", "* kernel_size output = x_aggregate * weight # shape: batch,", "def all(x, axis=None, keepdims=False): reduce_result = prod(x, axis, keepdims=keepdims) all_matrix", "ValueError('Unknown data_format ' + str(data_format)) stride_row, stride_col = strides output_row,", "result[1] for o in self.trainer_output: updated.append(outputs[o]) if self.metrics_func is not", "for tensor, value in zip(self.placeholders, inputs): # cntk only support", "_ in shape]) if isinstance(x, C.variables.Parameter): return C.reshape(x, shape) else:", "past_values = [] for s, p in zip(states, place_holders): past_values.append(C.sequence.past_value(p,", "return mean(devs_squared, axis=axis, keepdims=keepdims) def std(x, axis=None, keepdims=False): return C.sqrt(var(x,", "if _axis is None: _axis = C.Axis.all_axes() return _axis def", "def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)):", "uses_learning_phase uses_learning_phase = False if dims < 3: raise ValueError('CNTK", "i = shape[1] - 1 while i >= 0: current", "' + str(data_format)) x = _preprocess_conv3d_input(x, data_format) kernel = _preprocess_conv3d_kernel(kernel,", "shift=None, keep_dims=False): _axes = tuple(axes) if shift is None: shift", "new_shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for", "min_value, max_value) def binary_crossentropy(target, output, from_logits=False): if from_logits: output =", "is not None and num_time_step is not C.FreeDimension: final_output =", "padding], groups=x.shape[0]) x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False])", "Optional name string for the tensor. constraint: Optional projection function", "non_dyn_shape def is_sparse(tensor): return tensor.is_sparse def int_shape(x): if hasattr(x, '_keras_shape'):", "x = getattr(C, reduce_fun_name)(x, a) else: x = getattr(C, reduce_fun_name)(x,", "states = tuple(initial) with C.default_options(axis_offset=1): def _recurrence(x, states, m): #", "return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder def is_keras_tensor(x): if not is_tensor(x):", "# input_depth) x = C.transpose(x, (3, 0, 1, 2)) return", "+ 1) # remove dummy dimension current = squeeze(current, time_axis)", "op which may cause crash. # We have made a", "and need_convert is False: raise NotImplementedError('CNTK Backend: `go_backwards` is not", "if len(version) > 2 and version[1] == '.': version =", "num_dynamic_axis == 1 and len(shape) > 0 and shape[0] ==", "%d, ' 'expected 1 or %d dimensions' % (bias_dims, dims))", "axes=None, shift=None, keep_dims=False): _axes = tuple(axes) if shift is None:", "beta, gamma, epsilon) else: # need broadcasting target_shape = []", "reference, output_rank=len(reference.shape) - 1) def _remove_dims(x, axis, keepdims=False): if keepdims", "' 'dimension to enable padding.' % base_shape) if pattern[0] >", "maxval, dtype, seed) def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):", "< ndim_expr: shape_expr = int_shape(then_expression) ndim_diff = ndim_expr - ndim_cond", "isinstance( value, C.variables.Parameter): value = value.value # we don't support", "padding]) return _postprocess_conv2d_output(x, data_format) def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid',", "name == '': return prefix + '/' + default return", "= tuple(axes) if shift is None: shift = x #", "p in zip(states, place_holders): past_values.append(C.sequence.past_value(p, s)) new_output, new_states = step_function(", "return C.abs(x) def sqrt(x): return C.sqrt(x) def exp(x): return C.exp(x)", "2: y_shape = int_shape(y) if len(y_shape) > 2: permutation =", "not detected. ' 'CNTK\\'s CPU version is not fully optimized,'", "shape = bias.shape elif dims == 2: if data_format ==", "[] for s, n_s in zip(states, new_states): return_states.append( C.ops.element_select( mask_slice,", "reduce_axes = [] for a in axis: if isinstance(a, C.Axis)", "else: initial.append(C.user_function(ConvertToBatch(s))) else: initial.append(s) need_convert = not has_seq_axis(inputs) if go_backwards", "return result def in_test_phase(x, alt, training=None): return in_train_phase(alt, x, training=training)", "bias.shape[:1] elif data_format == 'channels_last': if bias_dims == 1: shape", "C.relu(x) if max_value is not None: x = C.clip(x, 0.0,", "= _preprocess_conv3d_input(x, data_format) kernel = _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding)", "== '.': version = version[:2] + version[2:].replace('.', '') try: return", "== 1 or training is True else alt else: result", "'' scale = (high - low) / 2 p =", "shape = (bias.shape[2],) + bias.shape[:2] elif data_format == 'channels_last': if", "'CNTK only supports float32 and ' 'float64.' % dtype) def", "a) def clip(x, min_value, max_value): if max_value is not None", "the 2nd one. # TH input shape: (samples, input_depth, rows,", "C.InferredDimension and p != C.FreeDimension: return False return True def", "def get_variable_shape(x): return int_shape(x) def update(x, new_x): return C.assign(x, new_x)", "name='reshape_with_batch'): super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name) self.from_shape = input.shape self.target_shape =", "> 1 or shape.count(C.FreeDimension) > 1: result = x for", "epsilon) * gamma + beta def concatenate(tensors, axis=-1): if len(tensors)", "[] for l_s, i_s in zip(last_states, initial_states): if _get_dynamic_axis_num(i_s) ==", "if shape[1] is None: raise ValueError('CNTK Backend: the input of", "self.metrics_func = C.combine(self.metrics_outputs) else: self.metrics_func = None @staticmethod def _is_input_shape_compatible(input,", "return _remove_dims(output, axis, keepdims) def any(x, axis=None, keepdims=False): reduce_result =", "output_length output = sum(output, axis=3) # shape: batch, filters, row,", "def _postprocess_conv3d_output(x, dim_ordering): if dim_ordering == 'channels_last': x = C.transpose(x,", "len(version) > 2 and version[1] == '.': version = version[:2]", "= _get_dynamic_axis_num(inputs) states = tuple(initial_states) outputs = [] time_axis =", "* np.prod(np.asarray(self.from_shape)) num_static_element = np.prod(np.asarray(self.target_shape)) num_batch = int(num_element / num_static_element)", "in inputs. Please double ' 'check the model and inputs.'", "train mode (learning_phase == 1) or test mode (learning_phase ==", "strides=1, padding='valid', data_format=None, dilation_rate=1): raise NotImplementedError def separable_conv2d(x, depthwise_kernel, pointwise_kernel,", "approach as workaround. It may have # perf issue, will", "len(x.dynamic_axes) else: return 0 def _contain_seqence_axis(x): if _get_dynamic_axis_num(x) > 1:", "a fix but not catched in CNTK 2.1 release. #", "reverse(mask, 1) if len(int_shape(mask)) == 2: mask = expand_dims(mask) mask", "= floatx() if name is None: name = '' if", "'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) stride_row, stride_col =", "self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def forward(self, argument, device=None, outputs_to_retain=None): if self.when(argument): self.execute(argument)", "Compute true mean while keeping the dims for proper broadcasting.", "mask_slice = C.ops.slice(mask, time_axis, i, i + 1) mask_slice =", "included). \"\"\" if dtype is None: dtype = floatx() if", "global _LEARNING_PHASE assert isinstance(inputs, (list, tuple)) feed_dict = {} for", "x.shape: if _ == C.InferredDimension or _ == C.FreeDimension: raise", "version not detected. ' 'Will using CNTK 2.0 GA as", "axis if b_any(_ == C.InferredDimension for _ in x.shape) or", "= _get_dynamic_axis_num(x) index = axis if axis >= 0 else", "= False else: raise ValueError('Invalid border mode: ' + str(padding))", "Some ops (like dropout) won't be applied during \"eval\" in", "y) def not_equal(x, y): return C.not_equal(x, y) def greater(x, y):", "axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis,", "strides, auto_padding=[padding]) elif pool_mode == 'avg': x = C.pooling( x,", "= C.swapaxes(x, 0, 1) return x def conv2d(x, kernel, strides=(1,", "(-1, 1, feature_dim))) x_aggregate = concatenate(xs, axis=1) # transpose kernel", "if training is True: x._uses_learning_phase = uses_learning_phase return x else:", "x = _preprocess_conv2d_input(x, data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel =", "axes) if axes is None: # behaves like tf.batch_matmul as", "[0]) return (x - mean) / (C.sqrt(var) + epsilon) *", "_ in shape] return C.reshape(x, shape) def mean(x, axis=None, keepdims=False):", "def greater_equal(x, y): return C.greater_equal(x, y) def less(x, y): return", "C.cntk_py.universal_learner(p_list, u_list, update_func) criterion = ( outputs[0], outputs[1]) if len(outputs)", "ndim(x)): if axis in reduction_axes: target_shape.append(1) if ndim(gamma) > axis:", "dilated convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding])", "_normalize_axis(axes, x) begin_index = [0 for _ in cntk_axes] end_index", "= _padding(x, padding[1], 2) else: assert len(base_shape) == 4 if", "b_any([isinstance(a, (list, tuple)) for a in axes]): raise ValueError('Multiple target", "or test mode (learning_phase == 0). # LEARNING_PHASE_PLACEHOLDER is the", "one. # TH input shape: (samples, input_depth, rows, cols) #", "by gradient place holder u_ops = [] unrelated_updates = []", "[x] * n return C.splice(*temp, axis=index) def tanh(x): return C.tanh(x)", "+ bias.shape[:1] elif data_format == 'channels_last': if bias_dims == 1:", "isinstance(a, C.Axis): has_seq = True break if has_seq: nones =", "1 or training is True else alt else: result =", "= step_function( x, tuple(past_values) + tuple(rnn_constants)) if getattr(new_output, '_uses_learning_phase', False):", "variables) else: return C.stop_gradient(variables) def switch(condition, then_expression, else_expression): ndim_cond =", "!= dims: raise ValueError('Unexpected bias dimensions %d, ' 'expected 1", "= C.ops.slice(mask, time_axis, i, i + 1) mask_slice = squeeze(mask_slice,", "ValueError('Invalid strides for dilated convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0],", "+ str(data_format)) if padding == 'causal': # causal (dilated) convolution:", "result def in_test_phase(x, alt, training=None): return in_train_phase(alt, x, training=training) def", "= [] for a in axis: if isinstance(a, C.Axis) is", "learning phase is a bool tensor used to run Keras", "data_format is None: data_format = image_data_format() if data_format not in", "Numpy RNG seed = np.random.randint(10e7) if dtype is None: dtype", "random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): for _ in shape: if", "expects `(depth, input_depth, rows, cols)`. kernel = C.transpose(kernel, (3, 2,", "training == 1 or training is True else alt else:", "minimum(x, y): return C.element_min(x, y) def sin(x): return C.sin(x) def", "# static learning phase flag, if it is not 0", "== C.Axis.default_dynamic_axis() else: return False def get_num_dynamic_axis(x): return _get_dynamic_axis_num(x) def", "from __future__ import print_function import cntk as C import numpy", "groups=x.shape[0]) return _postprocess_conv2d_output(x, data_format) def conv3d(x, kernel, strides=(1, 1, 1),", "None: dtype = floatx() if seed is None: # ensure", "topN=k) return 1 - C.reshape(result, shape=()) def conv2d_transpose(x, kernel, output_shape,", "can't support input with variable # length. Will support it", "int return x def dot(x, y): if len(x.shape) > 2", "return C.square(x) def abs(x): return C.abs(x) def sqrt(x): return C.sqrt(x)", "padding[1], 2) return x def spatial_3d_padding(x, padding=((1, 1), (1, 1),", "C.device.use_default_device() if dev.type() == 0: warnings.warn( 'CNTK backend warning: GPU", "TODO: remove the conversion when cntk supports int32, int64 #", "for _ in range(rep): slices.append(tmp) i += 1 return C.splice(*slices,", "if isinstance( value, C.variables.Constant) or isinstance( value, C.variables.Parameter): value =", "1 or %d dimensions' % (bias_dims, dims)) if dims ==", "https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter dtype = 'float32' if 'int' in str(dtype) else dtype", "> 2: y_shape = int_shape(y) if len(y_shape) > 2: permutation", "if hasattr(x, 'dynamic_axes'): dynamic_shape = [None for a in x.dynamic_axes]", "infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return [ C.output_variable( self.inputs[0].shape[1:], self.inputs[0].dtype, [batch_axis])]", "depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format", "= (1, 1, 1, bias.shape[0]) else: shape = bias.shape elif", "list(x.shape) _axis = [_ + len(shape) if _ < 0", "' + str(data_format)) stride = strides[0] kernel_shape = int_shape(kernel) output_length,", "apply broadcast weight = permute_dimensions(kernel, (2, 0, 1)) # Shape:", "dynamic ' 'shape is not supported. Please provide ' 'fixed", "def pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format", "0 self.loss = outputs[0] # need group update by gradient", "resize_volumes(x, depth_factor, height_factor, width_factor, data_format): if data_format == 'channels_first': output", "o.output})) if len(n_s) > 0: new_output = n_s[0] return new_output,", "(num_old_batch,) + self.from_shape)) class ConvertToBatch(C.ops.functions.UserFunction): \"\"\"Converts input first axis to", "need this check. if (self.unrelated_updates is None and (_LEARNING_PHASE_PLACEHOLDER.value ==", "C.variables.Constant)): if isinstance(value, (float, int)): value = np.full(x.shape, value, dtype=floatx())", "# if n is inferred dimension, # we can't figure", "_axis[i] is not None: _axis[i] = cntk_axis[_axis[i]] else: if _axis", "C.cntk_py.Value( grad_array_view.as_shape( (num_old_batch,) + self.from_shape)) class ConvertToBatch(C.ops.functions.UserFunction): \"\"\"Converts input first", "def name_scope(name): global NAME_SCOPE_STACK NAME_SCOPE_STACK.append(name) yield NAME_SCOPE_STACK.pop() def get_uid(prefix=''): _UID_PREFIXES[prefix]", "CNTK native implementation later. # Arguments inputs: a cntk variable", "_static_rnn( step_function, inputs, initial_states, go_backwards, mask, constants, unroll, input_length) if", "= _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_min') return _remove_dims(output,", "C.clip(x, min_value, max_value) def binary_crossentropy(target, output, from_logits=False): if from_logits: output", "beta = zeros_like(mean) elif ndim(beta) == ndim(x) and shape(beta)[0] ==", "if beta is None: if gamma is None: beta =", "floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.ones(shape, ctype), dtype=dtype, name=name) def", "strides=1, padding='valid', data_format=None, dilation_rate=1): if data_format is None: data_format =", "of the 2nd one. # TH input shape: (samples, input_depth,", "3)), (-1, 1) + depthwise_kernel.shape[2:]) pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format) padding", "1, 2)) return x def _preprocess_conv3d_kernel(kernel, dim_ordering): kernel = C.transpose(kernel,", "list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x,", "i += 1 return x def _normalize_axis(axis, x): shape =", "device=None, outputs_to_retain=None): if self.when(argument): self.execute(argument) return None, argument def backward(self,", "padding = _preprocess_border_mode(padding) strides = strides pool_size = pool_size x", "i = 0 while i < shape[1]: current = C.ops.slice(inputs,", "else s for s in shape] cntk_shape = tuple(cntk_shape) if", "static rnn ' 'has shape `%s`, the second axis '", "def identity(x, name=None): if name is None: name = '%s_alias'", "n): # this is a workaround for recurrent layer #", "else: prev_output = outputs[-1] output = C.ops.element_select(mask_slice, output, prev_output) return_states", "shape[i] is None and dynamic_axis_index < nones: cntk_axis.append(x.dynamic_axes[dynamic_axis_index]) dynamic_axis_index +=", "initial.append(s) need_convert = not has_seq_axis(inputs) if go_backwards and need_convert is", "not None: new_states = [C.element_select(m, n, s) for n, s", "[-1 for _ in cntk_axes] return C.slice(x, cntk_axes, begin_index, end_index,", "x for index in sorted(_axis, reverse=True): result = C.reshape(result, shape=(),", "while i < len(x.shape) - 1: x = C.swapaxes(x, i,", "isinstance(pattern, list): current_layout = [i for i in range(dims)] else:", "'channels_last': output = repeat_elements(x, height_factor, axis=1) output = repeat_elements(output, width_factor,", "version.endswith('+'): version = version[:-1] # for hot fix, ignore all", "cntk backend. \"\"\" global _LEARNING_PHASE global _LEARNING_PHASE_PLACEHOLDER _LEARNING_PHASE = -1", "= pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape),", "= C.convolution_transpose( kernel, x, strides, auto_padding=[ False, padding, padding, padding],", "for _ in states] past_values = [] for s, p", "padding.' % base_shape) if pattern[0] > 0: prefix_shape = list(base_shape)", "str(data_format)) x = _preprocess_conv3d_input(x, data_format) kernel = _preprocess_conv3d_kernel(kernel, data_format) padding", "= 'float32' if 'int' in str(dtype) else dtype v =", "= list(shape) new_shape = new_shape[num_dynamic_axis:] new_shape = [C.InferredDimension if _", "u in u_ops]) grads = update_func.find_all_with_name('keras_grad_placeholder') u_list = [] p_list", "batch axis batch_size: size of batch axis. name: name of", "ValueError('Invalid border mode: ' + str(padding)) return padding def _postprocess_conv2d_output(x,", "axis=axes[0], keepdims=True) return result if axes[0] == 1 else transpose(result)", "type. name: Optional name string for the tensor. constraint: Optional", "* kernel_size) output = x_aggregate * weight # Shape: (batch,", "strides = (1,) + strides x = C.convolution( kernel, x,", "0 for dim in base_shape]): raise ValueError('CNTK Backend: padding input", "is None: name = '%s_alias' % x.name return C.alias(x, name=name)", "ConvertToBatch(C.ops.functions.UserFunction): \"\"\"Converts input first axis to CNTK batch axis. We", "include batch axis output_shape = output_shape[1:] # in keras2, need", "in axis: if isinstance(a, C.Axis): has_seq = True break if", "reduce_axes.append(a) return _reshape_dummy_dim(x, reduce_axes) else: if isinstance(axis, list): has_seq =", "len(padding[1]) == 2 assert len(padding[2]) == 2 if data_format is", "> 0: value = value.astype(dtype) # TODO: remove the conversion", "len(shape) + 1 shape.insert(index, 1) new_shape = shape[nones:] new_shape =", "and' ' else expressions. ndim(condition)=' + str(ndim_cond) + ', ndim(then_expression)'", "if n is C.InferredDimension or n is C.FreeDimension: return x", "zeros(shape, dtype=None, name=None): if dtype is None: dtype = floatx()", "s)) new_states = return_states outputs.append(output) states = new_states i -=", "name=None, constraint=None): \"\"\"Instantiates a variable and returns it. # Arguments", "by the Numpy RNG seed = np.random.randint(10e3) if dtype is", "x = _padding(x, padding[1], 1) x = _padding(x, padding[2], 2)", "need broadcasting target_shape = [] x_shape = int_shape(x) # skip", "1 return C.splice(*slices, axis=axis) def repeat(x, n): # this is", "go_backwards: mask = reverse(mask, 1) if len(int_shape(mask)) == 2: mask", "[])] def forward(self, arguments, device=None, outputs_to_retain=None): return None, C.cntk_py.Value(arguments.data()) def", "if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError('CNTK", "len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[(0,", "= C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) == 0 and", "else: rnn_constants = constants if mask is not None and", "last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, states def rnn(step_function, inputs,", "= C.swapaxes(x, i, i + 1) i += 1 i", "_preprocess_border_mode(padding): if padding == 'same': padding = True elif padding", "None: name = '' return C.parameter( shape=shape, init=C.initializer.normal( scale=scale, seed=seed),", "x.shape[0] == C.InferredDimension: dims -= 1 bias_dims = len(bias.shape) if", "if axes[0] == axes[1]: result = sum(x * y, axis=axes[0],", "shape) def eval(x): if isinstance(x, C.cntk_py.Function): return x.eval() elif isinstance(x,", "convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding]) x", "in x.shape: if _ == C.InferredDimension or _ == C.FreeDimension:", "dynamic_axis_index < nones: cntk_axis.append(x.dynamic_axes[dynamic_axis_index]) dynamic_axis_index += 1 else: cntk_axis.append(i -", "self.metrics_func = C.combine(self.metrics_outputs) # cntk only could handle loss and", "placeholder_shape): if i != p and p != C.InferredDimension and", "== 1: rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs)) else: rnn_constants.append(constant) else: rnn_constants = constants", "CNTK batch axis. We may introduce this operation in CNTK", "else: shape = bias.shape else: shape = bias.shape return x", "output_shape[1] output_shape = tuple(shape) x = C.convolution_transpose( kernel, x, strides,", "y): return C.element_max(x, y) def minimum(x, y): return C.element_min(x, y)", "x.value else: raise ValueError('CNTK Backend: `eval` method on ' '`%s`", "shape = (bias.shape[0], 1, 1) else: shape = (bias.shape[2],) +", "index < nones: result._keras_shape = shape return result def squeeze(x,", "expected, please ' 'double check the keras shape history.' %", "which may cause crash. # We have made a fix", "increment): result = x + increment return C.assign(x, result) def", "= C.ops.one_hot(indices, num_classes) return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1) def", "in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if", "axis with batch axis if b_any(_ == C.InferredDimension for _", "'double check the keras shape history.' % (str(shape), nones)) #", "def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): if gamma is None:", "ValueError('Unknown data_format ' + str(data_format)) if padding == 'causal': #", "return int_shape(x) def update(x, new_x): return C.assign(x, new_x) def moving_average_update(variable,", "= strides + (strides[0],) x = C.convolution( kernel, x, strides,", "global uses_learning_phase uses_learning_phase = True if m is not None:", "return 0 def _contain_seqence_axis(x): if _get_dynamic_axis_num(x) > 1: return x.dynamic_axes[1]", "unroll=False, input_length=None): shape = int_shape(inputs) dims = len(shape) uses_learning_phase =", "C.reduce_mean(shifted_mean, axis=axis) variance_mean = C.square(C.minus(x, shift)) for axis in _axes:", "data_format == 'channels_last': x = C.transpose(x, (1, 2, 0)) return", "= -1 _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0) def in_train_phase(x, alt, training=None): global", "def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): if data_format is None:", "ndim: raise ValueError('CNTK Backend: tensor with keras shape: `%s` has", "+ 1) return result else: for index in sorted(_axis, reverse=True):", "return x else: # if _LEARNING_PHASE is static if isinstance(training,", "C.transpose(kernel, (4, 3, 0, 1, 2)) return kernel def _postprocess_conv3d_output(x,", "= C.combine([_.output for _ in unrelated_updates]) if self.trainer is None:", "strides, auto_padding=[ False, padding, padding], output_shape=output_shape) return _postprocess_conv2d_output(x, data_format) def", "= C.transpose(y, perm=permutation) return C.times(x, y, len(y_shape) - 1) else:", "implementation if hasattr(C, 'unpack_batch') and _get_cntk_version() >= 2.2: const_a =", "nones = _get_dynamic_axis_num(x) x = expand_dims(x, nones) return x def", "cntk_shape[dynamic_axis_num:] x = C.input( shape=cntk_shape, dtype=_convert_string_dtype(dtype), is_sparse=sparse, name=name) x._keras_shape =", "CNTK currently don't support cond op, so here we use", "True else: uses_learning_phase = False # CNTK currently don't support", "def round(x): return C.round(x) def sigmoid(x): return C.sigmoid(x) def sign(x):", "raise ValueError('CNTK Backend: the input of static rnn ' 'has", "= squeeze(mask_slice, 1) if len(outputs) == 0: prev_output = zeros_like(output)", "def update_add(x, increment): result = x + increment return C.assign(x,", "1: result = x for index in sorted(_axis, reverse=True): result", "x -= alpha * negative_part return x def dropout(x, level,", "ValueError('CNTK backend: the permute pattern %s ' 'requested permute on", "bias.shape[0]) else: shape = bias.shape else: shape = bias.shape return", "axis=None, keepdims=False): return log(sum(exp(x), axis=axis, keepdims=keepdims)) def var(x, axis=None, keepdims=False):", "mask_shape = int_shape(mask) if len(mask_shape) == dims - 1: mask", "hasattr(C, 'to_batch'): initial.append(C.to_batch(s)) else: initial.append(C.user_function(ConvertToBatch(s))) else: initial.append(s) need_convert = not", "0: assert len(base_shape) == 4 if hasattr(C, 'pad'): x =", "return C.element_select(C.greater(x, 0), res, alpha * res) def in_top_k(predictions, targets,", "else: x = getattr(C, reduce_fun_name)(x, a) else: x = getattr(C,", "has only rank %d ' 'Need at least rank 3", "None: dtype = floatx() if not shape: if ndim: shape", "_LEARNING_PHASE is static if isinstance(training, int) or isinstance(training, bool): result", "or shape[i] == -1: i += 1 else: break shape", "(bias.shape[3],) + bias.shape[:3] elif data_format == 'channels_last': if bias_dims ==", "_, feature_dim, filters = kernel_shape xs = [] for i", "hasattr(x, '_keras_shape'): return x._keras_shape shape = x.shape if hasattr(x, 'dynamic_axes'):", "axis=4) return output elif data_format == 'channels_last': output = repeat_elements(x,", "all the dim in x.shape dim = np.prod(x.shape) x =", "# Shape: (batch, output_length, filters) return permute_dimensions(output, (0, 2, 1))", "new_shape = list(x.shape) new_shape.insert(index, 1) new_shape = tuple(new_shape) x =", "+ kernel_size[0]) slice_col = slice(j * stride_col, j * stride_col", "stddev return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed) def truncated_normal(shape, mean=0.0, stddev=1.0,", "if bias_dims == 1: shape = (1, 1, 1, bias.shape[0])", "initial_states: if _get_dynamic_axis_num(s) == 0: if hasattr(C, 'to_batch'): initial.append(C.to_batch(s)) else:", "[axis] axis = _normalize_axis(axis, tensors[0]) return C.splice(*tensors, axis=axis[0]) def flatten(x):", "cntk gather op which may cause crash. # We have", "be processed by broadcast # so it may have an", "beta, reduction_axes, epsilon=1e-3): if gamma is None: if beta is", "_LEARNING_PHASE_PLACEHOLDER: _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value) else: # in current version cntk", "not shape: if ndim: shape = tuple([None for _ in", "CNTK 2.1 release. # Will update with gather op in", "we use # element_select approach as workaround. It may have", "expressions. ndim(condition)=' + str(ndim_cond) + ', ndim(then_expression)' '=' + str(ndim_expr))", "it work, call # \"forward\" method to let cntk know", "else len(shape) + 1 shape.insert(index, 1) new_shape = shape[nones:] new_shape", "seed=seed), dtype=dtype, name=name) return variable(value=p.value + low + scale) def", "inputs.shape[0] initial = [] for s in initial_states: if _get_dynamic_axis_num(s)", "= [] for s in initial_states: if _get_dynamic_axis_num(s) == 0:", "hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]),", "C.sigmoid(x) def sign(x): return x / C.abs(x) def pow(x, a):", "height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output else:", "is None: min_value = -np.inf return C.clip(x, min_value, max_value) def", "return x def one_hot(indices, num_classes): return C.one_hot(indices, num_classes) def get_value(x):", "last_output, final_output, states def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None,", "reduce_fun_name)(x, a) else: x = getattr(C, reduce_fun_name)(x, a) else: x", "_reshape_dummy_dim(x, reduce_axes) else: if isinstance(axis, list): has_seq = False for", "in current version cntk can't support input with variable #", "squeeze(current, 1) output, new_states = step_function( current, tuple(states) + tuple(constants))", "== 'channels_first': if num_dynamic_axis > 0: assert len(base_shape) == 3", "to output_filters first, to apply broadcast weight = permute_dimensions(kernel, (2,", "squeeze(mean, _axes) variance = squeeze(variance, _axes) return mean, variance def", ">= 0: current = C.ops.slice(inputs, time_axis, i, i + 1)", "= input.shape[num_dynamic:] placeholder_shape = placeholder.shape for i, p in zip(input_shape,", "cols) # TF input shape: (samples, rows, cols, input_depth) x", "for o in self.trainer_output: updated.append(outputs[o]) if self.metrics_func is not None:", "range(len(shape) - len(n))]) + n if len(n) != len(shape): raise", "return None, C.cntk_py.Value(result) def backward(self, state, root_gradients): grad_array_view = root_gradients.data()", "a cntk variable (parameter/constant) name: name of this node \"\"\"", "expand_dims(outputs[i], 1) final_output = C.splice(final_output, output_slice, axis=time_axis) last_output = outputs[i]", "raise NotImplementedError new_shape = list(x.shape) new_shape.insert(index, 1) new_shape = tuple(new_shape)", "= feed_dict[argument] else: raise ValueError( 'CNTK backend: argument %s is", "if dev.type() == 0: warnings.warn( 'CNTK backend warning: GPU is", "the tensor. constraint: Optional projection function to be applied to", "= np.random.randint(10e7) np.random.seed(seed) if dtype is None: dtype = np.float32", "is True: x._uses_learning_phase = uses_learning_phase return x else: # if", "y): if len(x.shape) > 2 or len(y.shape) > 2: y_shape", "go_backwards and need_convert is False: raise NotImplementedError('CNTK Backend: `go_backwards` is", "axis=index) def tanh(x): return C.tanh(x) def _static_rnn(step_function, inputs, initial_states, go_backwards=False,", "workaround # here to mapping the correct axis. Will remove", "getattr(output, '_uses_learning_phase', False): uses_learning_phase = True if mask is not", "shape `%s`, but input shape is `%s`. Currently ' 'CNTK", "% (len(cntk_shape, dynamic_axis_num))) if name is None: name = ''", "1: return x.dynamic_axes[1] == C.Axis.default_dynamic_axis() else: return False def get_num_dynamic_axis(x):", "the batch axis for axis in range(1, ndim(x)): if axis", "epsilon(), 1.0 - epsilon()) output = -target * C.log(output) -", "'_cntk_placeholder') and x._cntk_placeholder def is_keras_tensor(x): if not is_tensor(x): raise ValueError('Unexpectedly", "bias.shape[:3] elif data_format == 'channels_last': if bias_dims == 1: shape", "self.loss.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise", "if the second axis is static axis, CNTK will do", "for i in range(ndim_diff): condition = expand_dims(condition) condition = tile(condition,", "static axis.' % pattern) axis = list(pattern) axis = axis[num_dynamic_axis:]", "def conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None): if data_format", "current_layout[:num_dynamic_axis]: raise ValueError('CNTK backend: the permute pattern %s ' 'requested", "def infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return [ C.output_variable( self.inputs[0].shape[1:], self.inputs[0].dtype,", "x = C.pooling( x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding]) else: raise", "1 # add the time_step axis back final_output = expand_dims(outputs[0],", "axis): base_shape = x.shape if b_any([dim < 0 for dim", "_UID_PREFIXES = defaultdict(int) # cntk doesn't support gradient as symbolic", "broadcast # so it may have an extra batch axis", "kernel = _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = (1,)", "else: return eval(x) def batch_get_value(xs): result = [] for x", "= len(shape) uses_learning_phase = False if dims < 3: raise", "grads def equal(x, y): return C.equal(x, y) def not_equal(x, y):", "getattr(C, reduce_fun_name)(x, a) else: x = getattr(C, reduce_fun_name)(x, axis) return", "= -1 _UID_PREFIXES = defaultdict(int) # cntk doesn't support gradient", "x_shape = int_shape(x) # skip the batch axis for axis", "# shape: batch, row, col, filters output = permute_dimensions(output, (0,", "sign(x): return x / C.abs(x) def pow(x, a): return C.pow(x,", "(0, 2, 3, 1)) return output def reverse(x, axes): if", "0 else len(shape) + 1 shape.insert(index, 1) new_shape = shape[nones:]", "data_format): if data_format == 'channels_last': # TF uses the last", "does not include batch axis output_shape = output_shape[1:] # in", "x, C.variables.Parameter) or isinstance( x, C.variables.Constant): return x.value else: return", "to rank of then and' ' else expressions. ndim(condition)=' +", "normalized_axis.append(_normalize_axis(axes[1], y)[0]) # transpose i = normalized_axis[0] while i <", "[] if mask is not None: mask_shape = int_shape(mask) if", "1) else: return C.times(x, y) def batch_dot(x, y, axes=None): x_shape", "Keras 2.0.0, all kernels are normalized # on the format", "separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if", "exp(x): return C.exp(x) def log(x): return C.log(x) def round(x): return", "is `%s`. Currently ' 'CNTK can not take variable length", "num_time_step = inputs.shape[0] initial = [] for s in initial_states:", "of `None`.') return random_uniform_variable(shape, minval, maxval, dtype, seed) def random_uniform_variable(shape,", "- momentum)) def update_add(x, increment): result = x + increment", "raise ValueError('Rank of condition should be less' ' than or", "1) if len(outputs) == 0: prev_output = zeros_like(output) else: prev_output", "pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format", "shape_expr = int_shape(then_expression) ndim_diff = ndim_expr - ndim_cond for i", "should be less' ' than or equal to rank of", "= C.clip(x, 0.0, max_value) if alpha != 0.: x -=", "'Provided: ' + str(axes)) if len(x_shape) == 2 and len(y_shape)", "seed = np.random.randint(1, 10e6) if dtype is None: dtype =", "if data_format == 'channels_first': output = repeat_elements(x, depth_factor, axis=2) output", "list): _axis = list(axis) else: _axis = axis if isinstance(_axis,", "hasattr(x, '_keras_history') def is_tensor(x): return isinstance(x, (C.variables.Constant, C.variables.Variable, C.variables.Parameter, C.ops.functions.Function))", "backend does not support ' 'collapse of batch axis with", "in cntk now # return the same x to take", "y = C.transpose(y, perm=permutation) return C.times(x, y, len(y_shape) - 1)", "range(num_dynamic_axis - i)]) + shape new_shape = list(shape) new_shape =", "collections import defaultdict from contextlib import contextmanager import warnings C.set_global_option('align_axis',", "def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype", "1): raise ValueError('Invalid strides for dilated convolution') x = C.convolution(depthwise_kernel,", "abs(x): return C.abs(x) def sqrt(x): return C.sqrt(x) def exp(x): return", "np.float32 def _convert_dtype_string(dtype): if dtype == np.float32: return 'float32' elif", "if bias_dims == 1: shape = (1, 1, bias.shape[0]) else:", "= batch_normalization( x, mean, variant, beta, gamma, epsilon) else: #", "auto_padding=[ False, padding, padding]) else: assert dilation_rate[0] == dilation_rate[1] assert", "if hasattr(value, 'dtype') and value.dtype != dtype and len(shape) >", "ValueError('CNTK backend: The placeholder has been resolved ' 'to shape", "if bias_dims == 1: shape = (bias.shape[0], 1) else: shape", "/= C.reduce_sum(output, axis=-1) # avoid numerical instability with epsilon clipping", "variable (parameter/constant) name: name of this node \"\"\" def __init__(self,", "shifted_mean = C.reduce_mean(shifted_mean, axis=axis) variance_mean = C.square(C.minus(x, shift)) for axis", "range(output_row): for j in range(output_col): slice_row = slice(i * stride_row,", "if _LEARNING_PHASE is static if isinstance(training, int) or isinstance(training, bool):", "None and not has_seq_axis(mask): if go_backwards: mask = reverse(mask, 1)", "_axis[i] = (a % ndim) if _axis[i] is not None:", "the model return np.float32 def _convert_dtype_string(dtype): if dtype == np.float32:", "not supported. Please give fixed ' 'dimension to enable padding.'", "global uses_learning_phase uses_learning_phase = False if dims < 3: raise", "x) return C.transpose(x, axis) def resize_images(x, height_factor, width_factor, data_format): if", "self.trainer is None: self.metrics_outputs = [f.output for f in outputs]", "m is not None: new_states = [C.element_select(m, n, s) for", "not needed # in cntk, need to remove those dummy", "y) def maximum(x, y): return C.element_max(x, y) def minimum(x, y):", "import floatx, epsilon, image_dim_ordering, image_data_format from collections import defaultdict from", "but not catched in CNTK 2.1 release. # Will update", "1: var = _reshape_dummy_dim(var, [0]) if gamma is None: gamma", "min_value if max_value is None: max_value = np.inf if min_value", "local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): if data_format is None: data_format", "for dilated convolution' x = C.convolution( kernel, x, strides=dilation_rate[0], auto_padding=[", "+ strides # cntk output_shape does not include batch axis", "1)): assert len(padding) == 2 num_dynamic_axis = _get_dynamic_axis_num(x) base_shape =", "data_format == 'channels_last': x = C.swapaxes(x, 0, 1) return x", "def shape(x): shape = list(int_shape(x)) num_dynamic = _get_dynamic_axis_num(x) non_dyn_shape =", "so eval it first as # workaround if isinstance(value, C.cntk_py.Function):", "x * 0 def ones_like(x, dtype=None, name=None): return zeros_like(x) +", "is not None and not has_seq_axis(mask): if go_backwards: mask =", "1 if isinstance(axis, tuple): _axis = list(axis) elif isinstance(axis, int):", "% dims) if _get_dynamic_axis_num(inputs) == 0 or unroll: return _static_rnn(", "with epsilon clipping output = C.clip(output, epsilon(), 1.0 - epsilon())", "+ '/' + name def constant(value, dtype=None, shape=None, name=None): if", "data_format == 'channels_last': shape = list(output_shape) shape[0] = output_shape[3] shape[1]", "of this node \"\"\" def __init__(self, input, name='convert_to_batch'): super(ConvertToBatch, self).__init__([input],", "v._uses_learning_phase = False v.constraint = constraint return v def bias_add(x,", "i in range(dims)]) if num_dynamic_axis > 0 and pattern[:num_dynamic_axis] !=", "'channels_first': if num_dynamic_axis > 0: assert len(base_shape) == 3 if", "1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)): if data_format is", "numpy as np from .common import floatx, epsilon, image_dim_ordering, image_data_format", "return reshape(x, tmp_shape) def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): if", "yield NAME_SCOPE_STACK.pop() def get_uid(prefix=''): _UID_PREFIXES[prefix] += 1 return _UID_PREFIXES[prefix] def", "list(output_shape) shape[0] = output_shape[2] shape[1] = output_shape[0] shape[2] = output_shape[1]", "C.convolution( kernel, x, strides=dilation_rate[0], auto_padding=[ False, padding, padding]) return _postprocess_conv2d_output(x,", "states = new_states[:len(states)] i += 1 i = 1 #", "result.append(eval(x)) return result def set_value(x, value): if (isinstance(x, C.variables.Parameter) or", "def less(x, y): return C.less(x, y) def less_equal(x, y): return", "x_shape = int_shape(x) y_shape = int_shape(y) if isinstance(axes, int): axes", "is_keras_tensor(x): if not is_tensor(x): raise ValueError('Unexpectedly found an instance of", "will resolve it later with cntk cond op. if callable(x)", "else: new_c.append(c) rnn_constants.append(new_c) else: if _get_dynamic_axis_num(constant) == 1: rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs))", "variable(np.eye(size), dtype, name) def zeros_like(x, dtype=None, name=None): return x *", "s for s in shape] cntk_shape = tuple(cntk_shape) if dynamic_axis_num", "[] for o, p in zip(new_states, place_holders): n_s.append(o.replace_placeholders({p: o.output})) if", "keepdims=keepdims) def std(x, axis=None, keepdims=False): return C.sqrt(var(x, axis=axis, keepdims=keepdims)) def", "value.dtype != dtype and len(shape) > 0: value = value.astype(dtype)", "alpha != 0.: negative_part = C.relu(-x) x = C.relu(x) if", "str(data_format)) stride = strides[0] kernel_shape = int_shape(kernel) output_length, feature_dim, filters", "in inputs. ' 'Please double check the model and inputs", "np.prod(np.asarray(self.target_shape)) num_static_element = np.prod(np.asarray(self.from_shape)) num_old_batch = int(num_element / num_static_element) return", "'CNTK backend warning: GPU is not detected. ' 'CNTK\\'s CPU", "== 1 and len(shape) > 0 and shape[0] == -1:", "num_dynamic_axis > 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]: raise ValueError('CNTK backend:", "1) b_any = any dev = C.device.use_default_device() if dev.type() ==", "return C.reshape(x, shape) def mean(x, axis=None, keepdims=False): axis = _normalize_axis(axis,", "beta = _reshape_dummy_dim(beta, [0]) return (x - mean) / (C.sqrt(var)", "`%s`, the second axis ' 'is not static. If you", "bias.shape[0]) else: shape = bias.shape elif dims == 2: if", "it first as # workaround if isinstance(value, C.cntk_py.Function): value =", "shape = list(int_shape(x)) nones = _get_dynamic_axis_num(x) index = axis if", "y = expand_dims(y) normalized_axis = [] normalized_axis.append(_normalize_axis(axes[0], x)[0]) normalized_axis.append(_normalize_axis(axes[1], y)[0])", "if strides != (1, 1): raise ValueError('Invalid strides for dilated", "_convert_dtype_string(dtype): if dtype == np.float32: return 'float32' elif dtype ==", "== -1: # collapse axis with batch axis if b_any(_", "1: raise ValueError('CNTK Backend: Invalid dropout level %s, ' 'must", "1) def _remove_dims(x, axis, keepdims=False): if keepdims is False and", "negative_part return x def dropout(x, level, noise_shape=None, seed=None): if level", "shape[0] = output_shape[2] shape[1] = output_shape[0] shape[2] = output_shape[1] output_shape", "update[1]) else: u = update if len(u.arguments) == 0: u_ops.append(u)", "return output elif data_format == 'channels_last': output = repeat_elements(x, depth_factor,", "stride_col, j * stride_col + kernel_size[1]) if data_format == 'channels_first':", "try: return float(version) except: warnings.warn( 'CNTK backend warning: CNTK version", "padding='valid', data_format=None, pool_mode='max'): if data_format is None: data_format = image_data_format()", "(like dropout) won't be applied during \"eval\" in cntk. #", "'pad'): x = C.pad(x, pattern=[(0, 0), padding, (0, 0)]) else:", "batch, filters, output_length output = sum(output, axis=3) # shape: batch,", "/ gamma may be processed by broadcast # so it", "is not in shape, # so just flatten all the", "value.astype(dtype) # TODO: remove the conversion when cntk supports int32,", "output def get_variable_shape(x): return int_shape(x) def update(x, new_x): return C.assign(x,", "return _UID_PREFIXES[prefix] def learning_phase(): # If _LEARNING_PHASE is not 0", "def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): if data_format is", "= False v.constraint = constraint return v def bias_add(x, bias,", "dims == 4: if data_format == 'channels_first': if bias_dims ==", "len(cntk_axis): cntk_axis[i] -= nones i += 1 if isinstance(axis, tuple):", "is not supported with ' 'variable-length sequences. Please specify a", "def sparse_categorical_crossentropy(target, output, from_logits=False): target = C.one_hot(target, output.shape[-1]) target =", "raise ValueError('CNTK Backend: tensor with keras shape: `%s` has '", "> 2: self.metrics_outputs = [f.output for f in outputs[2:]] self.metrics_func", "the model and inputs.' % argument.name) # Some ops (like", "1), (1, 1), (1, 1)), data_format=None): assert len(padding) == 3", "conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): if data_format is None:", "when constructing trainer, ' 'found gradient node `%s` which is", "len(bias.shape) if bias_dims != 1 and bias_dims != dims: raise", "raise NotImplementedError i = num_dynamic_axis for i, rep in enumerate(n):", "level, noise_shape=None, seed=None): if level < 0. or level >=", "return eval(x) def batch_get_value(xs): result = [] for x in", "put filters first weight = permute_dimensions(kernel, (2, 0, 1)) #", "== 0: return C.reduce_sum(any_matrix) else: return any_matrix def all(x, axis=None,", "width_factor, data_format): if data_format == 'channels_first': output = repeat_elements(x, height_factor,", "_axes = tuple(axes) if shift is None: shift = x", "during training. global grad_parameter_dict if isinstance(variables, list) is False: variables", "dimension ' 'instead of `None`.') # how to apply mean", "final_states] if need_convert: final_output = C.sequence.unpack(final_output, 0, no_mask_output=True) if num_time_step", "None else _ for _ in new_shape]) result = C.reshape(x,", "shape: batch, filters, row, col output = reshape(output, (-1, filters,", "the tensor. dtype: Tensor type. name: Optional name string for", "keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_min')", "tuple): _axis = list(axis) elif isinstance(axis, int): _axis = [axis]", "'expected 0 or 1.' % value) _LEARNING_PHASE = value def", "_ >= 0 else _ + len(shape)) if len(_axis) ==", "= output_shape kernel_shape = int_shape(kernel) _, feature_dim, filters = kernel_shape", "[f.output for f in outputs[2:]] self.metrics_func = C.combine(self.metrics_outputs) else: self.metrics_func", "node ' 'is constructed.' % g) if len(u_list) > 0:", "super(ConvertToBatch, self).__init__([input], as_numpy=False, name=name) def infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return", "nones: i = 0 while dynamic_axis_index < nones: cntk_axis[i] =", "def variable(value, dtype=None, name=None, constraint=None): \"\"\"Instantiates a variable and returns", "+ 1) # remove dummy dimension current = squeeze(current, 1)", "inputs.' % argument.name) self.unrelated_updates.eval(input_dict, as_numpy=False) return updated def function(inputs, outputs,", "else dtype v = C.parameter(shape=shape, init=value, dtype=dtype, name=_prepare_name(name, 'variable')) v._keras_shape", "hot fix, ignore all the . except the first one.", "None: dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.ones(shape, ctype),", "len(outputs) > 1 else ( outputs[0], ) self.trainer = C.trainer.Trainer(", "dtype == np.float64: return 'float64' else: raise ValueError('CNTK Backend: Unsupported", "{'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) dims =", "' 'collapse of batch axis with inferred dimension. ' 'The", "it reduce_axes = [] for a in axis: if isinstance(a,", "C.ops.functions.Function)) def shape(x): shape = list(int_shape(x)) num_dynamic = _get_dynamic_axis_num(x) non_dyn_shape", "return False return True def __call__(self, inputs): global _LEARNING_PHASE_PLACEHOLDER global", "name def constant(value, dtype=None, shape=None, name=None): if dtype is None:", "data_format) def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'):", "def relu(x, alpha=0., max_value=None): if alpha != 0.: negative_part =", "= [axis] elif isinstance(axis, list): _axis = list(axis) else: _axis", "= slice(i * stride, i * stride + kernel_size[0]) xs.append(reshape(inputs[:,", "if _get_dynamic_axis_num(inputs) == 0 or unroll: return _static_rnn( step_function, inputs,", "C.reduce_sum(output, axis=-1) # avoid numerical instability with epsilon clipping output", "variable(value, dtype=None, name=None, constraint=None): \"\"\"Instantiates a variable and returns it.", "strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)): if data_format", "False # CNTK currently don't support cond op, so here", "current_layout = [i for i in range(dims)] else: current_layout =", "filters, output_row, output_col)) if data_format == 'channels_last': # shape: batch,", "transpose kernel to output_filters first, to apply broadcast weight =", "NotImplementedError def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1,", "0: warnings.warn( 'CNTK backend warning: GPU is not detected. '", "# Arguments x: A candidate placeholder. # Returns Boolean. \"\"\"", "a constant placeholder, here use this global # map to", "by default if shape[1] is None: raise ValueError('CNTK Backend: the", "[] if self.trainer is not None: input_dict = {} for", "C.convolution_transpose( kernel, x, strides, auto_padding=[ False, padding, padding], output_shape=output_shape) return", "return C.tanh(x) def _static_rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False,", "data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0,", "isinstance(n, int): n = (n,) elif isinstance(n, list): n =", "beta is None: gamma = ones_like(x) else: gamma = ones_like(beta)", "+ name def constant(value, dtype=None, shape=None, name=None): if dtype is", "in self.loss.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else:", "if _ is None: raise ValueError('CNTK Backend: randomness op with", ">= 2.2 else C.InferredDimension cntk_shape = [dynamic_dimension if s is", "back output_slice = expand_dims(outputs[i], 1) final_output = C.splice(final_output, output_slice, axis=time_axis)", "axis, reduce_fun_name): if isinstance(axis, list): for a in axis: if", "def _preprocess_conv2d_kernel(kernel, data_format): # As of Keras 2.0.0, all kernels", "outputs.append(output) states = new_states i -= 1 else: i =", "== (1, 1): strides = (1,) + strides x =", "2: self.metrics_outputs = [f.output for f in outputs[2:]] self.metrics_func =", "= [0 for _ in cntk_axes] strides = [-1 for", "placeholder. # Returns Boolean. \"\"\" return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder", "len(x.shape) - 1: x = C.swapaxes(x, i, i + 1)", "ValueError('CNTK Backend: tensor with keras shape: `%s` has ' '%d", "if self.when(argument): self.execute(argument) return None, argument def backward(self, state, root_gradients):", "= int_shape(kernel) _, feature_dim, filters = kernel_shape xs = []", "bias_add(x, bias, data_format=None): if data_format is None: data_format = image_data_format()", "def is_placeholder(x): \"\"\"Returns whether `x` is a placeholder. # Arguments", "-= 1 result = C.times(x, y, output_rank=(len(y.shape) - 1) if", "padding, 0) else: assert len(base_shape) == 3 if hasattr(C, 'pad'):", "strides = [-1 for _ in cntk_axes] return C.slice(x, cntk_axes,", "return output def get_variable_shape(x): return int_shape(x) def update(x, new_x): return", "= cntk_axis[_axis[i]] else: if _axis is None: _axis = C.Axis.all_axes()", "static shape.' % (str(tensor.shape), str(value.shape))) feed_dict[tensor] = value updated =", "instance.') return hasattr(x, '_keras_history') def is_tensor(x): return isinstance(x, (C.variables.Constant, C.variables.Variable,", "elif len(u_ops) > 0: unrelated_updates.extend(u_ops) if len(unrelated_updates) > 0: self.unrelated_updates", "not found in inputs. ' 'Please double check the model", "_preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)), (-1,", "None else s for s in shape] cntk_shape = tuple(cntk_shape)", "= 0 while i < shape[1]: current = C.ops.slice(inputs, time_axis,", "' 'CNTK only supports float32 and ' 'float64.' % dtype)", "we will create gradient as a constant placeholder, here use", "shape[1] if num_time_step is None and not has_seq_axis(inputs): num_time_step =", "inputs, outputs, updates=[], **kwargs): self.placeholders = inputs self.trainer = None", "is False: variables = [variables] grads = [] for v", "x = _padding(x, padding[1], 3) x = _padding(x, padding[2], 4)", "workaround if isinstance(value, C.cntk_py.Function): value = eval(value) shape = value.shape", "return result else: for index in sorted(_axis, reverse=True): del shape[index]", "_moments(x, _normalize_axis(reduction_axes, x)) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normalized = batch_normalization(", "identity(x, name=None): if name is None: name = '%s_alias' %", "if getattr(output, '_uses_learning_phase', False): uses_learning_phase = True if mask is", "different format if data_format == 'channels_last': shape = list(output_shape) shape[0]", "support it in next release. if not self._is_input_shape_compatible(value, tensor): raise", "tensor instance.') return hasattr(x, '_keras_history') def is_tensor(x): return isinstance(x, (C.variables.Constant,", "= [variables] grads = [] for v in variables: g", "def squeeze(x, axis): if isinstance(axis, tuple): axis = list(axis) if", "data_format == 'channels_last': output = repeat_elements(x, height_factor, axis=1) output =", "uses_learning_phase uses_learning_phase = True if m is not None: new_states", "kernel, x, strides=dilation_rate[0], auto_padding=[ False, padding, padding]) return _postprocess_conv2d_output(x, data_format)", "kernel def _postprocess_conv3d_output(x, dim_ordering): if dim_ordering == 'channels_last': x =", "= [x] * n return C.splice(*temp, axis=index) def tanh(x): return", "'fixed dimension instead of `None`.') return np.prod(int_shape(x)) def cast(x, dtype):", "go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape = int_shape(inputs) dims =", "+ str(axes)) if len(x_shape) == 2 and len(y_shape) == 2:", "% base_shape) if pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis]", "if len(unrelated_updates) > 0: self.unrelated_updates = C.combine([_.output for _ in", "the variable after an optimizer update. # Returns A variable", "C.less(x, y) def less_equal(x, y): return C.less_equal(x, y) def maximum(x,", "np.random.seed(seed) if dtype is None: dtype = np.float32 else: dtype", "same x to take cntk broadcast feature # to make", "ValueError('CNTK Backend: padding input tensor with ' 'shape `%s` contains", "1) return result else: for index in sorted(_axis, reverse=True): del", "output_shape[1] shape[3] = output_shape[2] output_shape = tuple(shape) x = C.convolution_transpose(", "return C.reshape(x, shape) else: num_dynamic_axis = _get_dynamic_axis_num(x) if num_dynamic_axis ==", "= _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) return", "C.swapaxes(x, 0, 1) kernel = C.swapaxes(kernel, 0, 2) padding =", "data_format ' + str(data_format)) stride = strides[0] kernel_shape = int_shape(kernel)", "and _get_cntk_version() >= 2.2: const_a = C.unpack_batch(x) const_a = C.reshape(const_a,", "do unroll by default if shape[1] is None: raise ValueError('CNTK", "None: non_dyn_shape.append(x.shape[i]) else: non_dyn_shape.append(shape[i + num_dynamic]) return shape[:num_dynamic] + non_dyn_shape", "== 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1)", "- 1: mask = expand_dims(mask) nones = _get_dynamic_axis_num(inputs) states =", "resize_images(x, height_factor, width_factor, data_format): if data_format == 'channels_first': output =", "from_logits: result = C.cross_entropy_with_softmax(output, target) # cntk's result shape is", "= C.reshape(beta, target_shape) normalized = batch_normalization( x, broadcast_mean, broadcast_var, broadcast_beta,", "final_output = expand_dims(outputs[0], 1) last_output = outputs[0] while i <", "tuple(cntk_shape) if dynamic_axis_num > len(cntk_shape): raise ValueError('CNTK backend: creating placeholder", "new_shape = [C.InferredDimension if _ is None else _ for", "' '`Function`, `Constant` or ' '`Parameter`.' % type(x)) def placeholder(", "%d ' 'Need at least rank 3 to run RNN.'", "x to take cntk broadcast feature # to make the", "on it reduce_axes = [] for a in axis: if", "num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape)) num_static_element = np.prod(np.asarray(self.target_shape)) num_batch =", "_get_dynamic_axis_num(x) base_shape = x.shape if data_format == 'channels_first': if num_dynamic_axis", "(1, 1)), data_format=None): assert len(padding) == 2 assert len(padding[0]) ==", "output = sum(output, axis=3) # shape: batch, filters, row, col", "= [] normalized_axis.append(_normalize_axis(axes[0], x)[0]) normalized_axis.append(_normalize_axis(axes[1], y)[0]) # transpose i =", "strides, output_shape, data_format=None): if data_format is None: data_format = image_data_format()", "self.when(argument): self.execute(argument) return None, argument def backward(self, state, root_gradients): return", "= floatx() for _ in shape: if _ is None:", "# They only evaluated in training phase. To make it", "metrics more # than 2, need manual eval elif len(outputs)", "return C.stop_gradient(variables) def switch(condition, then_expression, else_expression): ndim_cond = ndim(condition) ndim_expr", "None: dtype = floatx() return variable(np.eye(size), dtype, name) def zeros_like(x,", "[C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states] past_values = [] for s,", "data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)), (-1, 1)", "= strides[0] kernel_shape = int_shape(kernel) output_length, feature_dim, filters = kernel_shape", "Will remove this tricky after we add support # in", "_preprocess_border_mode(padding) strides = strides + (strides[0],) x = C.convolution( kernel,", "high, dtype=None, name=None, seed=None): if dtype is None: dtype =", "double ' 'check the model and inputs.' % argument.name) self.unrelated_updates.eval(input_dict,", "workaround. It may have # perf issue, will resolve it", "list(padding[1]), list(padding[2]), [0, 0]]) else: x = _padding(x, padding[0], 0)", "False return True def __call__(self, inputs): global _LEARNING_PHASE_PLACEHOLDER global _LEARNING_PHASE", "if axes[0] == 1 else transpose(result) else: return sum(x *", "i >= num_dynamic_axis and shape[i] is not None: tmp =", "C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) else: if dilation_rate[0]", "= _reshape_sequence(final_output, num_time_step) f_stats = [] for l_s, i_s in", "and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(all_matrix) else: return all_matrix def", "eval(x): if isinstance(x, C.cntk_py.Function): return x.eval() elif isinstance(x, C.variables.Constant) or", "gradients as symbolic op, # to hook up with keras", "= [-1 for _ in cntk_axes] return C.slice(x, cntk_axes, begin_index,", "= True if mask is not None: mask_slice = C.ops.slice(mask,", "padding, padding]) else: assert dilation_rate[0] == dilation_rate[1] assert strides ==", "argument, device=None, outputs_to_retain=None): if self.when(argument): self.execute(argument) return None, argument def", "in new_shape]) return C.reshape(x, new_shape) def tile(x, n): if isinstance(n,", "been resolved ' 'to shape `%s`, but input shape is", "self).__init__([input], as_numpy=False, name=name) def infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return [", "if isinstance(n, int): n = (n,) elif isinstance(n, list): n", "Shape: (batch, output_length, filters) return permute_dimensions(output, (0, 2, 1)) def", "shape = tuple([-1 for _ in range(num_dynamic_axis - i)]) +", "padding = 'valid' if data_format == 'channels_last': x = C.swapaxes(x,", "== 1.0 or _LEARNING_PHASE == 1)): _, output_values = self.metrics_func.forward(", "# sequence axis is removed by default, so don't need", "res = C.elu(x) if alpha == 1: return res else:", "num_dynamic_axis >= len(shape): i = 0 while i < len(shape):", "if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend: non-square dilation_rate is", "else: assert len(base_shape) == 4 if hasattr(C, 'pad'): x =", "candidate placeholder. # Returns Boolean. \"\"\" return hasattr(x, '_cntk_placeholder') and", "is not None: _axis[i] = cntk_axis[_axis[i]] else: if _axis is", "if isinstance(variables, (list, tuple)): return map(C.stop_gradient, variables) else: return C.stop_gradient(variables)", "random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed) def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):", "if value not in {0, 1}: raise ValueError('CNTK Backend: Set", "non-static axis, please try ' 'dynamic rnn with sequence axis.'", "version[1] == '.': version = version[:2] + version[2:].replace('.', '') try:", "mode (learning_phase == 1) or test mode (learning_phase == 0).", "axis: if isinstance(a, C.Axis) is False: reduce_axes.append(a) return _reshape_dummy_dim(x, reduce_axes)", "1 bias_dims = len(bias.shape) if bias_dims != 1 and bias_dims", "for i in range(dims)]) if num_dynamic_axis > 0 and pattern[:num_dynamic_axis]", "_ == C.FreeDimension else _ for _ in new_shape]) return", "if beta is None: beta = zeros_like(mean) elif ndim(beta) ==", "cntk calculate everything in float, so don't need case from", "run with GPU to get better performance.') # A learning", "may cause crash. # We have made a fix but", "`%s`. Currently ' 'CNTK can not take variable length inputs.", "_axis = list(axis) else: _axis = axis if isinstance(_axis, list):", "conv_dim2, conv_dim3, # input_depth) x = C.transpose(x, (3, 0, 1,", "= version[:2] + version[2:].replace('.', '') try: return float(version) except: warnings.warn(", "# on the format `(rows, cols, input_depth, depth)`, # independently", "batch axis output_shape = output_shape[1:] # in keras2, need handle", "keepdims=keepdims) any_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) ==", "axis=-1) def sparse_categorical_crossentropy(target, output, from_logits=False): target = C.one_hot(target, output.shape[-1]) target", "cntk supports int32, int64 # https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter dtype = 'float32' if", "infer_outputs(self): return [ C.output_variable( self.target_shape, self.inputs[0].dtype, [])] def forward(self, arguments,", "is not None: input_dict = {} for argument in self.metrics_func.arguments:", "mean, variant, beta, gamma, epsilon) else: # need broadcasting target_shape", "def _postprocess_conv2d_output(x, data_format): if data_format == 'channels_last': x = C.transpose(x,", "= list(int_shape(x)) nones = _get_dynamic_axis_num(x) index = axis if axis", "is None else _ for _ in new_shape]) result =", "ValueError( 'CNTK backend: assign ops argument %s ' 'is not", "value.dtype != np.float64): value = value.astype(np.float32) if tensor == _LEARNING_PHASE_PLACEHOLDER:", "list): current_layout = [i for i in range(dims)] else: current_layout", "input_length * kernel_size) output = x_aggregate * weight # Shape:", "probas of each sample sum to 1 output /= C.reduce_sum(output,", "x.value = value else: raise NotImplementedError def stop_gradient(variables): if isinstance(variables,", "with C.default_options(axis_offset=1): def _recurrence(x, states, m): # create place holder", "+= 1 i = 1 # add the time_step axis", "output_rank=len(reference.shape) - 1) def _remove_dims(x, axis, keepdims=False): if keepdims is", "_LEARNING_PHASE is not 0 or 1, return dynamic learning phase", "any_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) == 0", "== 1 else transpose(result) else: return sum(x * transpose(y), axis=axes[0],", "output /= C.reduce_sum(output, axis=-1) # avoid numerical instability with epsilon", "reference.shape[0] one_hot_matrix = C.ops.one_hot(indices, num_classes) return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) -", "if hasattr(x, '_keras_shape'): return x._keras_shape shape = x.shape if hasattr(x,", "axis: gamma = C.reduce_mean(gamma, axis - 1) beta = C.reduce_mean(beta,", "gradient node ' 'is constructed.' % g) if len(u_list) >", "'dynamic_axes'): return len(x.dynamic_axes) else: return 0 def _contain_seqence_axis(x): if _get_dynamic_axis_num(x)", "result = C.cross_entropy_with_softmax(output, target) # cntk's result shape is (batch,", "= '%s_alias' % x.name return C.alias(x, name=name) def _preprocess_conv2d_input(x, data_format):", "= [] for i in range(len(x.shape)): if shape[i + num_dynamic]", "_postprocess_conv2d_output(x, data_format) def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1,", "transpose kernel to put filters first weight = permute_dimensions(kernel, (2,", "in zip(input_shape, placeholder_shape): if i != p and p !=", "_ in new_shape]) return C.reshape(x, new_shape) def tile(x, n): if", "for proper broadcasting. for axis in _axes: shift = C.reduce_mean(shift,", "They only evaluated in training phase. To make it work,", "the format `(rows, cols, input_depth, depth)`, # independently of `data_format`.", "axis=1) output = repeat_elements(output, width_factor, axis=2) return output else: raise", "u_list, update_func) criterion = ( outputs[0], outputs[1]) if len(outputs) >", "if need_convert: if go_backwards: rnn_inputs = reverse(rnn_inputs, 1) rnn_inputs =", "< shape[axis]: tmp = C.ops.slice(x, axis, i, i + 1)", "if nones > ndim: raise ValueError('CNTK Backend: tensor with keras", "axis for axis in range(1, ndim(x)): if axis in reduction_axes:", "in zip(self.placeholders, inputs): # cntk only support calculate on float,", "1) else: shape = (bias.shape[3],) + bias.shape[:3] elif data_format ==", "it in next release. if not self._is_input_shape_compatible(value, tensor): raise ValueError('CNTK", "uses_learning_phase = False if dims < 3: raise ValueError('Input should", "= [C.InferredDimension if _ is None else _ for _", "False: variables = [variables] grads = [] for v in", "name=name) def _preprocess_conv2d_input(x, data_format): if data_format == 'channels_last': # TF", "_padding(x, pattern, axis): base_shape = x.shape if b_any([dim < 0", "# transpose kernel to output_filters first, to apply broadcast weight", "= C.reshape(target, output.shape) return categorical_crossentropy(target, output, from_logits) class Function(object): def", "in _axes: variance_mean = C.reduce_mean(variance_mean, axis=axis) variance = C.minus(variance_mean, C.square(shifted_mean))", "None: gamma = ones_like(x) else: gamma = ones_like(beta) if beta", "global # map to keep the mapping from grad placeholder", "== 1: beta = _reshape_dummy_dim(beta, [0]) return (x - mean)", "axis = [axis] shape = list(int_shape(x)) _axis = [] for", "C.times(x, y, output_rank=(len(y.shape) - 1) if len(y.shape) > 1 else", "= (1,) + strides # cntk output_shape does not include", "return np.float32 def _convert_dtype_string(dtype): if dtype == np.float32: return 'float32'", "0: new_output = n_s[0] return new_output, n_s final_output, final_states =", "-np.inf return C.clip(x, min_value, max_value) def binary_crossentropy(target, output, from_logits=False): if", "dim_ordering): kernel = C.transpose(kernel, (4, 3, 0, 1, 2)) return", "' 'Please double check how the gradient node ' 'is", "def sqrt(x): return C.sqrt(x) def exp(x): return C.exp(x) def log(x):", "i, a in enumerate(_axis): if a is not None and", "'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const def", "= C.to_sequence_like(mask, rnn_inputs) states = tuple(initial) with C.default_options(axis_offset=1): def _recurrence(x,", "if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])])", "list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0], 2) x", "raise ValueError('CNTK backend: the permute pattern %s ' 'requested permute", "= int_shape(x) ndim = len(shape) nones = _get_dynamic_axis_num(x) if nones", "result def transpose(x): return C.swapaxes(x, 0, 1) def gather(reference, indices):", "Please provide ' 'fixed dimension instead of `None`.') return np.prod(int_shape(x))", "'=' + str(ndim_expr)) elif ndim_cond < ndim_expr: shape_expr = int_shape(then_expression)", "for o, p in zip(new_states, place_holders): n_s.append(o.replace_placeholders({p: o.output})) if len(n_s)", "* stride_row, i * stride_row + kernel_size[0]) slice_col = slice(j", "max_value = min_value if max_value is None: max_value = np.inf", "_get_dynamic_axis_num(constant) == 1: rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs)) else: rnn_constants.append(constant) else: rnn_constants =", "cntk_axes] return C.slice(x, cntk_axes, begin_index, end_index, strides) def _reshape_batch(x, shape):", "in self.trainer_output: updated.append(outputs[o]) if self.metrics_func is not None: input_dict =", "int_shape(x) return len(shape) def _prepare_name(name, default): prefix = '_'.join(NAME_SCOPE_STACK) if", "+ str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if data_format", "'channels_last': x = C.swapaxes(x, 0, 1) kernel = C.swapaxes(kernel, 0,", "[len(y_shape) - 1] y = C.transpose(y, perm=permutation) return C.times(x, y,", "1 dynamic_axis_index += 1 while i < len(cntk_axis): cntk_axis[i] -=", "type `' + str(type(x)) + '`. ' 'Expected a symbolic", "0 or 1, we will go with dynamic learning phase", "to run ' 'rnn with non-static axis, please try '", "return C.to_batch(const_a) else: return C.user_function(ReshapeBatch(x, shape[1:])) def _get_cntk_version(): version =", "' 'check the model and inputs.' % argument.name) self.unrelated_updates.eval(input_dict, as_numpy=False)", "= list(int_shape(x)) tmp_shape[1] = time_step return reshape(x, tmp_shape) def local_conv1d(inputs,", "output = C.clip(output, epsilon(), 1.0 - epsilon()) return -sum(target *", "'is not static. If you want to run ' 'rnn", "or training is True else alt else: result = C.element_select(training,", "def equal(x, y): return C.equal(x, y) def not_equal(x, y): return", "= _padding(x, padding[2], 2) else: assert len(base_shape) == 5 if", "return C.splice(*slices, axis=axis) def repeat(x, n): # this is a", "# to make the recurrent layer work. # need to", "this node \"\"\" def __init__(self, input, name='convert_to_batch'): super(ConvertToBatch, self).__init__([input], as_numpy=False,", "C.exp(x) def log(x): return C.log(x) def round(x): return C.round(x) def", "i, i + 1) i += 1 i = normalized_axis[1]", "def _recurrence(x, states, m): # create place holder place_holders =", "max_value) def binary_crossentropy(target, output, from_logits=False): if from_logits: output = C.sigmoid(output)", "not support shape like (1, batch). so using the workaround", "= _preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding)", "bias_dims == 1: shape = (1, 1, 1, bias.shape[0]) else:", "tensor which has batch axis batch_size: size of batch axis.", "_ == C.InferredDimension or _ == C.FreeDimension: raise ValueError('CNTK backend:", "value updated = [] if self.trainer is not None: input_dict", "= [] for o, p in zip(new_states, place_holders): n_s.append(o.replace_placeholders({p: o.output}))", "def tanh(x): return C.tanh(x) def _static_rnn(step_function, inputs, initial_states, go_backwards=False, mask=None,", "C.clip(x, 0.0, max_value) if alpha != 0.: x -= alpha", "' + str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if", "[0, 1].' % level) return C.dropout(x, level) def batch_flatten(x): #", "if mask is not None: mask_shape = int_shape(mask) if len(mask_shape)", "x def one_hot(indices, num_classes): return C.one_hot(indices, num_classes) def get_value(x): if", "has_seq_axis(x): return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1 def l2_normalize(x,", "output = _reduce_on_axis(x, axis, 'reduce_max') return _remove_dims(output, axis, keepdims) def", "one. if len(version) > 2 and version[1] == '.': version", "shape: (samples, input_depth, rows, cols) # TF input shape: (samples,", "'to_batch'): initial.append(C.to_batch(s)) else: initial.append(C.user_function(ConvertToBatch(s))) else: initial.append(s) need_convert = not has_seq_axis(inputs)", "output_shape[2] shape[1] = output_shape[0] shape[2] = output_shape[1] output_shape = tuple(shape)", "None: mask_slice = C.ops.slice(mask, time_axis, i, i + 1) mask_slice", "x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x def spatial_2d_padding(x,", "# in cntk, need to remove those dummy axis. if", "value, C.variables.Constant) or isinstance( value, C.variables.Parameter): value = value.value #", "auto_padding=[ False, padding, padding, padding]) return _postprocess_conv3d_output(x, data_format) def conv3d_transpose(x,", "(high - low) / 2 p = C.parameter( shape, init=C.initializer.uniform(", "C.swapaxes(y, i, i - 1) i -= 1 result =", "grads = [] for v in variables: g = C.constant(0,", "only could handle loss and 1 metric in trainer, for", "strides, auto_padding=[ False, padding, padding]) else: assert dilation_rate[0] == dilation_rate[1]", "int_shape(y) if len(y_shape) > 2: permutation = [len(y_shape) - 2]", "= [axis] shape = list(int_shape(x)) _axis = [] for _", "= C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x =", "name=_prepare_name(name, 'variable')) v._keras_shape = v.shape v._uses_learning_phase = False v.constraint =", "dtype=_convert_string_dtype(dtype), is_sparse=sparse, name=name) x._keras_shape = shape x._uses_learning_phase = False x._cntk_placeholder", "shape[i] is not None: tmp = [x] * rep x", "rnn_inputs = inputs if need_convert: if go_backwards: rnn_inputs = reverse(rnn_inputs,", "argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError('CNTK backend:", "output_shape does not include batch axis output_shape = output_shape[1:] #", "gradient as symbolic op, to hook up with keras model,", "criterion = ( outputs[0], outputs[1]) if len(outputs) > 1 else", "for i in range(len(x.shape)): if shape[i + num_dynamic] is None:", "= _reshape_dummy_dim(var, [0]) if gamma is None: gamma = ones_like(var)", "return output def reverse(x, axes): if isinstance(axes, int): axes =", "def categorical_crossentropy(target, output, from_logits=False): if from_logits: result = C.cross_entropy_with_softmax(output, target)", "> 0: assert len(outputs) > 0 self.loss = outputs[0] #", "shape is (batch, 1), while keras expect (batch, ) return", "squeeze(mask_slice, time_axis) if len(outputs) == 0: prev_output = zeros_like(output) else:", "grads: if g in grad_parameter_dict: p_list.append(grad_parameter_dict[g]) u_list.append(g) else: raise ValueError(", "[] for s in initial_states: if _get_dynamic_axis_num(s) == 0: if", "x = C.convolution_transpose( kernel, x, strides, auto_padding=[ False, padding, padding,", "variable # length. Will support it in next release. if", "padding[1], 2) else: assert len(base_shape) == 4 if hasattr(C, 'pad'):", "class LambdaFunc(C.ops.functions.UserFunction): def __init__(self, arg, when=lambda arg: True, execute=lambda arg:", "depthwise_kernel.shape[2:]) pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate", "raise ValueError('Invalid pooling mode: ' + str(pool_mode)) return _postprocess_conv3d_output(x, data_format)", "= permute_dimensions(kernel, (2, 0, 1)) # Shape: (batch, filters, output_length,", "dtype = floatx() return variable(np.eye(size), dtype, name) def zeros_like(x, dtype=None,", "' '%d cntk dynamic axis, this is not expected, please", "C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) padding", "CNTK 2.0 GA as default.') return float(2.0) class ReshapeBatch(C.ops.functions.UserFunction): def", "np.float64: return 'float64' else: raise ValueError('CNTK Backend: Unsupported dtype: %s.", "expand_dims(x, nones) return x def max(x, axis=None, keepdims=False): axis =", "input_length=None): shape = int_shape(inputs) dims = len(shape) uses_learning_phase = False", "uses_learning_phase return result def in_test_phase(x, alt, training=None): return in_train_phase(alt, x,", "> len(cntk_shape): raise ValueError('CNTK backend: creating placeholder with ' '%d", "= repeat_elements(output, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return", "gradient during training. global grad_parameter_dict if isinstance(variables, list) is False:", "dummy dimension current = squeeze(current, time_axis) output, new_states = step_function(", "(1 + C.abs(x)) def categorical_crossentropy(target, output, from_logits=False): if from_logits: result", "output = repeat_elements(output, height_factor, axis=3) output = repeat_elements(output, width_factor, axis=4)", "constants: if isinstance(constant, list): new_c = [] for c in", "+ value * (1. - momentum)) def update_add(x, increment): result", "axis. Will remove this tricky after we add support #", "str(data_format)) x = _preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) padding", "if hasattr(C, 'to_batch'): initial.append(C.to_batch(s)) else: initial.append(C.user_function(ConvertToBatch(s))) else: initial.append(s) need_convert =", "dtype = floatx() if seed is None: # ensure that", "s)) new_states = return_states outputs.append(output) states = new_states[:len(states)] i +=", "than 2, need manual eval elif len(outputs) > 2: self.metrics_outputs", "Keras models in # either train mode (learning_phase == 1)", "= np.asarray(1.0) def in_train_phase(x, alt, training=None): global _LEARNING_PHASE if training", "contextlib import contextmanager import warnings C.set_global_option('align_axis', 1) b_any = any", "- (1.0 - target) * C.log(1.0 - output) return output", "%s is not supported, ' 'expected 0 or 1.' %", "cast(x, dtype): # cntk calculate everything in float, so don't", "= C.transpose(x, (1, 2, 0)) return x def _preprocess_conv3d_input(x, data_format):", "shape = list(output_shape) shape[0] = output_shape[2] shape[1] = output_shape[0] shape[2]", "with ' '`Function`, `Constant` or ' '`Parameter`.' % type(x)) def", "= list(output_shape) shape[0] = output_shape[3] shape[1] = output_shape[0] shape[2] =", "nones = _get_dynamic_axis_num(x) for _ in sorted(_axis, reverse=True): del shape[_]", "'.': version = version[:2] + version[2:].replace('.', '') try: return float(version)", "batch, filters, row, col output = reshape(output, (-1, filters, output_row,", "else: return sum(x * transpose(y), axis=axes[0], keepdims=True) else: if len(y_shape)", "padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format =", "permute_dimensions(output, (0, 2, 1)) def local_conv2d(inputs, kernel, kernel_size, strides, output_shape,", "truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if seed is None: seed", "axis) def square(x): return C.square(x) def abs(x): return C.abs(x) def", "+ str(data_format)) x = _preprocess_conv3d_input(x, data_format) kernel = _preprocess_conv3d_kernel(kernel, data_format)", "get_variable_shape(x): return int_shape(x) def update(x, new_x): return C.assign(x, new_x) def", "= True break if has_seq: nones = _get_dynamic_axis_num(x) x =", "0.0, 1.0) return x def conv1d(x, kernel, strides=1, padding='valid', data_format=None,", "if isinstance(value, np.ndarray) is False: value = np.asarray(value) if isinstance(x,", "= const.shape const._uses_learning_phase = False return const def random_binomial(shape, p=0.0,", "first axis to CNTK batch axis. We may introduce this", "= True return x def is_placeholder(x): \"\"\"Returns whether `x` is", "is None: constants = [] if mask is not None:", "case from bool / int return x def dot(x, y):", "axis in _axes: shift = C.reduce_mean(shift, axis=axis) shift = C.stop_gradient(shift)", "x = C.convolution( kernel, x, strides=dilation_rate[0], auto_padding=[ False, padding, padding])", "- m) return mean(devs_squared, axis=axis, keepdims=keepdims) def std(x, axis=None, keepdims=False):", "transpose(x): return C.swapaxes(x, 0, 1) def gather(reference, indices): # There", "else: raise ValueError('CNTK Backend: Invalid data_format:', data_format) def repeat_elements(x, rep,", "is None or name == '': return prefix + '/'", "return const def random_binomial(shape, p=0.0, dtype=None, seed=None): # use numpy", "time_step): tmp_shape = list(int_shape(x)) tmp_shape[1] = time_step return reshape(x, tmp_shape)", "if len(n) < len(shape): n = tuple([1 for _ in", "in GA. if n is C.InferredDimension or n is C.FreeDimension:", "= C.reshape(result, shape=(), begin_axis=index, end_axis=index + 1) return result else:", "output_row, output_col)) if data_format == 'channels_last': # shape: batch, row,", "init=C.initializer.truncated_normal( stddev, seed=seed), dtype=dtype) def dtype(x): return _convert_dtype_string(x.dtype) def zeros(shape,", "C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape if pattern[1] >", "(None, dim) return x def softmax(x, axis=-1): return C.softmax(x, axis=axis)", "C.FreeDimension else _ for _ in new_shape]) return C.reshape(x, new_shape)", "index < 0 or index > 1: raise NotImplementedError new_shape", "dynamic_axis_index < nones: i = 0 while dynamic_axis_index < nones:", "i < len(shape): if shape[i] is None or shape[i] ==", "+ str(data_format)) padding = _preprocess_border_mode(padding) x = _preprocess_conv3d_input(x, data_format) if", "global _LEARNING_PHASE if value not in {0, 1}: raise ValueError('CNTK", "% (str(tensor.shape), str(value.shape))) feed_dict[tensor] = value updated = [] if", "won't be executed under this mode, that's why # we", "return in_train_phase(alt, x, training=training) def _convert_string_dtype(dtype): # cntk only support", "if has_seq: nones = _get_dynamic_axis_num(x) x = expand_dims(x, nones) return", "for x in xs: if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)):", "step_function, inputs, initial_states, go_backwards, mask, constants, unroll, input_length) if constants", "placeholder to parameter grad_parameter_dict = {} NAME_SCOPE_STACK = [] @contextmanager", "0]]) else: x = _padding(x, padding[0], 0) x = _padding(x,", "of Keras 2.0.0, all kernels are normalized # on the", "sum(x * y, axis=axes[0], keepdims=True) return result if axes[0] ==", "+ str(type(x)) + '`. ' 'Expected a symbolic tensor instance.')", "str(data_format)) dims = len(x.shape) if dims > 0 and x.shape[0]", "variable length inputs. Please ' 'pass inputs that have a", "1) x = _padding(x, padding[1], 2) return x def spatial_3d_padding(x,", "elif isinstance(n, list): n = tuple(n) shape = int_shape(x) num_dynamic_axis", "dilation_rate * (kernel.shape[0] - 1) x = temporal_padding(x, (left_pad, 0))", "= bias.shape else: shape = bias.shape return x + reshape(bias,", "dtype, seed) def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): if", "n_s in zip(states, new_states): return_states.append( C.ops.element_select( mask_slice, n_s, s)) new_states", "axis. name: name of this node. \"\"\" def __init__(self, input,", "x + reshape(bias, shape) def eval(x): if isinstance(x, C.cntk_py.Function): return", "y) def sin(x): return C.sin(x) def cos(x): return C.cos(x) def", "axis=axis) base_shape = x.shape if pattern[1] > 0: postfix_shape =", "x def _preprocess_conv3d_kernel(kernel, dim_ordering): kernel = C.transpose(kernel, (4, 3, 0,", "dims)) if dims == 4: if data_format == 'channels_first': if", "return _get_dynamic_axis_num(x) def _reduce_on_axis(x, axis, reduce_fun_name): if isinstance(axis, list): for", "(bias.shape[0], 1, 1, 1) else: shape = (bias.shape[3],) + bias.shape[:3]", "'_uses_learning_phase', False): uses_learning_phase = True if mask is not None:", "True if m is not None: new_states = [C.element_select(m, n,", "= _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = (1,) +", "# the gradient during training. global grad_parameter_dict if isinstance(variables, list)", "recurrent layer work. # need to be fixed in GA.", "is not None: mask_shape = int_shape(mask) if len(mask_shape) == dims", "training is True else alt else: result = C.element_select(training, x,", "release if _get_cntk_version() >= 2.2: return C.ops.gather(reference, indices) else: num_classes", "if go_backwards and need_convert is False: raise NotImplementedError('CNTK Backend: `go_backwards`", "if mask is not None: mask_slice = C.ops.slice(mask, time_axis, i,", "x._cntk_placeholder = True return x def is_placeholder(x): \"\"\"Returns whether `x`", "ValueError('Unknown data_format ' + str(data_format)) padding = _preprocess_border_mode(padding) strides =", "'which is not supported. Please do permute ' 'on static", "cntk 2.1's unpack_batch implementation if hasattr(C, 'unpack_batch') and _get_cntk_version() >=", "running with float, # try to cast to float to", "padding[1], 1) else: assert len(base_shape) == 4 if hasattr(C, 'pad'):", "C.Axis) is False: reduce_axes.append(a) return _reshape_dummy_dim(x, reduce_axes) else: if isinstance(axis,", "of type `' + str(type(x)) + '`. ' 'Expected a", "strides=(1, 1), padding='valid', data_format=None): if data_format is None: data_format =", "epsilon clipping output = C.clip(output, epsilon(), 1.0 - epsilon()) return", "batch_size: size of batch axis. name: name of this node.", "or _LEARNING_PHASE == 1)): _, output_values = self.metrics_func.forward( input_dict, self.metrics_func.outputs,", "cause crash. # We have made a fix but not", "'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else:", "1: mask = expand_dims(mask) nones = _get_dynamic_axis_num(inputs) states = tuple(initial_states)", "return _reshape_batch(x, shape) else: # no collapse, then first need", "initial value of the tensor. dtype: Tensor type. name: Optional", "n_s, s)) new_states = return_states outputs.append(output) states = new_states[:len(states)] i", "if dtype is None: dtype = floatx() if name is", "3) return x def one_hot(indices, num_classes): return C.one_hot(indices, num_classes) def", "is None: dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.ones(shape,", "if len(outputs) > 1 else ( outputs[0], ) self.trainer =", "Shape: (batch, filters, output_length, input_length * kernel_size) output = x_aggregate", "len(int_shape(mask)) == 2: mask = expand_dims(mask) mask = C.to_sequence_like(mask, rnn_inputs)", "auto_padding=[False, padding, padding], groups=x.shape[0]) else: if dilation_rate[0] != dilation_rate[1]: raise", "repeat_elements(x, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output", "output_length, input_length * kernel_size output = x_aggregate * weight #", "strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) else: if dilation_rate[0] != dilation_rate[1]:", "is conditioned by the Numpy RNG seed = np.random.randint(10e3) if", "axis, keepdims=False): if keepdims is False and isinstance(axis, list): #", "Boolean. \"\"\" return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder def is_keras_tensor(x): if", "updates=updates, **kwargs) def temporal_padding(x, padding=(1, 1)): assert len(padding) == 2", "not_equal(x, y): return C.not_equal(x, y) def greater(x, y): return C.greater(x,", "enumerate(_axis): if a is not None and a < 0:", "i + 1) mask_slice = squeeze(mask_slice, time_axis) if len(outputs) ==", "Arguments x: A candidate placeholder. # Returns Boolean. \"\"\" return", "C.reshape(result, ()) else: # scale preds so that the class", "2 assert len(padding[0]) == 2 assert len(padding[1]) == 2 if", "str(pool_mode)) return _postprocess_conv2d_output(x, data_format) def pool3d(x, pool_size, strides=(1, 1, 1),", "list(padding[1]), [0, 0]]) else: x = _padding(x, padding[0], 0) x", "name is None: name = '' cntk_shape = cntk_shape[dynamic_axis_num:] x", "max_value) if alpha != 0.: x -= alpha * negative_part", "if len(int_shape(mask)) == 2: mask = expand_dims(mask) mask = C.to_sequence_like(mask,", "tuple([f.output for f in criterion]) elif len(u_ops) > 0: unrelated_updates.extend(u_ops)", "C.reshape(beta, target_shape) normalized = batch_normalization( x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma,", "1, 1), auto_padding=[False]) return _postprocess_conv2d_output(x, data_format) def depthwise_conv2d(x, depthwise_kernel, strides=(1,", "and shape(var)[0] == 1: var = _reshape_dummy_dim(var, [0]) if gamma", "repeat_elements(x, depth_factor, axis=1) output = repeat_elements(output, height_factor, axis=2) output =", "any(x, axis=None, keepdims=False): reduce_result = sum(x, axis, keepdims=keepdims) any_matrix =", "is None: name = '' cntk_shape = cntk_shape[dynamic_axis_num:] x =", "len(n) != len(shape): raise NotImplementedError i = num_dynamic_axis for i,", "= value def clear_session(): \"\"\"Reset learning phase flag for cntk", "instead of `None`.') return np.prod(int_shape(x)) def cast(x, dtype): # cntk", "== 'channels_first': xs.append(reshape(inputs[:, :, slice_row, slice_col], (-1, 1, feature_dim))) else:", "native implementation later. # Arguments inputs: a cntk variable (parameter/constant)", "with ' 'shape `%s` contains non-specified dimension, ' 'which is", "def batch_dot(x, y, axes=None): x_shape = int_shape(x) y_shape = int_shape(y)", "(1, 1): raise ValueError('Invalid strides for dilated convolution') x =", "which is not ' 'related to any parameters in the", "__call__(self, inputs): global _LEARNING_PHASE_PLACEHOLDER global _LEARNING_PHASE assert isinstance(inputs, (list, tuple))", "= C.reshape(x, new_shape) if index < nones: result._keras_shape = shape", "# Returns A variable instance (with Keras metadata included). \"\"\"", "rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs)) else: rnn_constants.append(constant) else: rnn_constants = constants if mask", "len(_axis) == 0: return x nones = _get_dynamic_axis_num(x) for _", "kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None): if data_format is", "C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) return _postprocess_conv2d_output(x, data_format) def", "== 'avg': x = C.pooling( x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding])", "assert(len(result) == 2) outputs = result[1] for o in self.trainer_output:", "# there is a bug in cntk 2.1's unpack_batch implementation", "did not take place.') return x return _reshape_batch(x, shape) else:", "_ == C.FreeDimension else _ for _ in shape]) if", "= np.prod(x.shape) x = C.reshape(x, (-1,)) x._keras_shape = (None, dim)", "= mean(x, axis, keepdims=True) devs_squared = C.square(x - m) return", "return x def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)),", "None: input_dict = {} for argument in self.metrics_func.arguments: if argument", "return hasattr(x, '_keras_history') def is_tensor(x): return isinstance(x, (C.variables.Constant, C.variables.Variable, C.variables.Parameter,", "C.round(x) def sigmoid(x): return C.sigmoid(x) def sign(x): return x /", "not supported with ' 'variable-length sequences. Please specify a '", "ndim) if _axis[i] is not None: _axis[i] = cntk_axis[_axis[i]] else:", "C.ops.slice(mask, time_axis, i, i + 1) mask_slice = squeeze(mask_slice, time_axis)", "s, p in zip(states, place_holders): past_values.append(C.sequence.past_value(p, s)) new_output, new_states =", "value * (1. - momentum)) def update_add(x, increment): result =", "warnings.warn( 'Warning: CNTK backend does not support ' 'collapse of", "op, so here we use # element_select approach as workaround.", "raise ValueError( 'CNTK backend: assign ops argument %s ' 'is", "C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]]) else: x = _padding(x, padding[0],", "np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name,", "'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) padding = _preprocess_border_mode(padding)", "- 1] y = C.transpose(y, perm=permutation) return C.times(x, y, len(y_shape)", "None: self.metrics_outputs = [f.output for f in outputs] self.metrics_func =", "return the same x to take cntk broadcast feature #", "kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): if data_format is None: data_format", "1 return _UID_PREFIXES[prefix] def learning_phase(): # If _LEARNING_PHASE is not", "permute_dimensions(kernel, (2, 0, 1)) # Shape: (batch, filters, output_length, input_length", "return _remove_dims(output, axis, keepdims) def sum(x, axis=None, keepdims=False): axis =", "== 0 and _get_dynamic_axis_num(l_s) == 1: if hasattr(C, 'unpack_batch'): f_stats.append(C.unpack_batch(l_s))", "[axis] axis = _normalize_axis(axis, x) output = C.ops.argmin(x, axis=axis[0]) return", "= uses_learning_phase return last_output, final_output, f_stats def has_seq_axis(x): return hasattr(x,", "normalized # on the format `(rows, cols, input_depth, depth)`, #", "1 def count_params(x): for _ in x.shape: if _ ==", "is False and isinstance(axis, list): # sequence axis is removed", "variant def _moments(x, axes=None, shift=None, keep_dims=False): _axes = tuple(axes) if", "and x._cntk_placeholder def is_keras_tensor(x): if not is_tensor(x): raise ValueError('Unexpectedly found", "== 1: shape = (1, bias.shape[0]) else: shape = bias.shape", "= () np_value = value * np.ones(shape) const = C.constant(np_value,", "None, C.cntk_py.Value(result) def backward(self, state, root_gradients): grad_array_view = root_gradients.data() num_element", "'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), [0, 0]])", "= alt() if training is True: x._uses_learning_phase = uses_learning_phase return", "in axis: if isinstance(a, C.Axis) \\ and a != C.Axis.default_batch_axis()", "isinstance(update, tuple): if len(update) != 2: raise NotImplementedError else: u", "axis=-1) # avoid numerical instability with epsilon clipping output =", "function(inputs, outputs, updates=[], **kwargs): return Function(inputs, outputs, updates=updates, **kwargs) def", "this mode, that's why # we need this check. if", "model return np.float32 def _convert_dtype_string(dtype): if dtype == np.float32: return", "initial = [] for s in initial_states: if _get_dynamic_axis_num(s) ==", "axis = [axis] axis = _normalize_axis(axis, tensors[0]) return C.splice(*tensors, axis=axis[0])", "' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel,", "is not C.FreeDimension: final_output = _reshape_sequence(final_output, num_time_step) f_stats = []", "= C.ops.element_select(mask_slice, output, prev_output) return_states = [] for s, n_s", "x = _padding(x, padding[1], 3) else: if num_dynamic_axis > 0:", "supports `eval` with ' '`Function`, `Constant` or ' '`Parameter`.' %", "def max(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output =", "x.dynamic_axes] shape = tuple(dynamic_shape) + shape return shape def ndim(x):", "data_format) def conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1,", "x def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)):", "mask=None, constants=None, unroll=False, input_length=None): shape = int_shape(inputs) dims = len(shape)", "warning: CNTK version not detected. ' 'Will using CNTK 2.0", "[learner]) self.trainer_output = tuple([f.output for f in criterion]) elif len(u_ops)", "var, beta, gamma, epsilon=1e-3): # The mean / var /", "tuple(initial_states) outputs = [] time_axis = 1 - nones if", "False x._cntk_placeholder = True return x def is_placeholder(x): \"\"\"Returns whether", "element_select approach as workaround. It may have # perf issue,", "0, 1, 2)) return x def _preprocess_conv3d_kernel(kernel, dim_ordering): kernel =", "or isinstance( x, C.variables.Constant): return x.value else: return eval(x) def", "please try ' 'dynamic rnn with sequence axis.' % shape)", "2 assert len(padding[1]) == 2 if data_format is None: data_format", "axis in _axes: shifted_mean = C.reduce_mean(shifted_mean, axis=axis) variance_mean = C.square(C.minus(x,", "zeros_like(output) else: prev_output = outputs[-1] output = C.ops.element_select(mask_slice, output, prev_output)", "result = x if training == 1 or training is", "l2_normalize(x, axis=None): axis = [axis] axis = _normalize_axis(axis, x) norm", "data_format) def repeat_elements(x, rep, axis): axis = _normalize_axis(axis, x) axis", "tensor used to run Keras models in # either train", "creating placeholder with ' '%d dimension is not supported, at", "1) final_output = C.splice(final_output, output_slice, axis=time_axis) last_output = outputs[i] i", "pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is None:", "new_shape = shape[nones:] new_shape = tuple([C.InferredDimension if _ == C.FreeDimension", "tmp = C.ops.slice(x, axis, i, i + 1) for _", "metadata included). \"\"\" if dtype is None: dtype = floatx()", "+ self.target_shape) return None, C.cntk_py.Value(result) def backward(self, state, root_gradients): grad_array_view", "then and' ' else expressions. ndim(condition)=' + str(ndim_cond) + ',", "' than or equal to rank of then and' '", "return variable(value=p.value + low + scale) def random_normal_variable( shape, mean,", "# Arguments inputs: a cntk tensor which has batch axis", "y) def less_equal(x, y): return C.less_equal(x, y) def maximum(x, y):", "shape, mean, scale, dtype=None, name=None, seed=None): if dtype is None:", "(samples, conv_dim1, conv_dim2, conv_dim3, # input_depth) x = C.transpose(x, (3,", "data_format ' + str(data_format)) dims = len(x.shape) if dims >", "_axis is None: _axis = C.Axis.all_axes() return _axis def _reshape_dummy_dim(x,", "shift = C.reduce_mean(shift, axis=axis) shift = C.stop_gradient(shift) shifted_mean = C.minus(x,", "= inputs.shape[0] initial = [] for s in initial_states: if", "mask) last_output = C.sequence.last(final_output) last_states = [C.sequence.last(s) for s in", "if num_time_step is None and not has_seq_axis(inputs): num_time_step = inputs.shape[0]", "not include batch axis output_shape = output_shape[1:] # in keras2,", "inputs: a cntk tensor which has batch axis batch_size: size", "C.cntk_py.Value(arguments.data()) def backward(self, state, root_gradients): return C.cntk_py.Value(root_gradients.data()) class LambdaFunc(C.ops.functions.UserFunction): def", "stride + kernel_size[0]) xs.append(reshape(inputs[:, slice_length, :], (-1, 1, feature_dim))) x_aggregate", "tile(condition, shape_expr[ndim_cond + i]) return C.element_select(condition, then_expression, else_expression) def elu(x,", "for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g)", "/ (1 + C.abs(x)) def categorical_crossentropy(target, output, from_logits=False): if from_logits:", "self.inputs[0].dynamic_axes)] def forward(self, argument, device=None, outputs_to_retain=None): if self.when(argument): self.execute(argument) return", "= _padding(x, padding[1], 1) else: assert len(base_shape) == 4 if", "backend warning: GPU is not detected. ' 'CNTK\\'s CPU version", "= uses_learning_phase return x else: # if _LEARNING_PHASE is static", "0)]) else: x = _padding(x, padding, 0) else: assert len(base_shape)", "i in range(dims)] else: current_layout = tuple([i for i in", "int): axes = (axes, axes) if axes is None: #", "def __init__(self, inputs, outputs, updates=[], **kwargs): self.placeholders = inputs self.trainer", "kernel_shape = int_shape(kernel) output_length, feature_dim, filters = kernel_shape xs =", "data_format ' + str(data_format)) padding = _preprocess_border_mode(padding) x = _preprocess_conv3d_input(x,", "padding=(1, 1)): assert len(padding) == 2 num_dynamic_axis = _get_dynamic_axis_num(x) base_shape", "is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic", "and len(shape) > 0: value = value.astype(dtype) # TODO: remove", "_preprocess_conv2d_input(x, data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1,", "first one. if len(version) > 2 and version[1] == '.':", "2.0 GA as default.') return float(2.0) class ReshapeBatch(C.ops.functions.UserFunction): def __init__(self,", "with sequence axis.' % shape) if constants is None: constants", "to keep the mapping from grad placeholder to parameter grad_parameter_dict", "returns it. # Arguments value: Numpy array, initial value of", "a): return C.pow(x, a) def clip(x, min_value, max_value): if max_value", "dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend: non-square dilation_rate is '", "1) x = _padding(x, padding[2], 2) else: assert len(base_shape) ==", "= floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name)", "not supported, ' 'expected 0 or 1.' % value) _LEARNING_PHASE", "/ 2 p = C.parameter( shape, init=C.initializer.uniform( scale, seed=seed), dtype=dtype,", "1) x = _padding(x, padding[1], 2) else: assert len(base_shape) ==", "data_format): if data_format == 'channels_first': output = repeat_elements(x, height_factor, axis=2)", "result def squeeze(x, axis): if isinstance(axis, tuple): axis = list(axis)", "if data_format is None: data_format = image_data_format() if data_format not", "1: shape = (1, 1, bias.shape[0]) else: shape = bias.shape", "value.dtype != np.float32 and value.dtype != np.float64): value = value.astype(np.float32)", "= repeat_elements(output, width_factor, axis=3) return output elif data_format == 'channels_last':", "data_format) def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): if data_format ==", "for i, a in enumerate(_axis): if a is not None", "x) output = C.ops.argmin(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def square(x):", "to the variable after an optimizer update. # Returns A", "output.shape) return categorical_crossentropy(target, output, from_logits) class Function(object): def __init__(self, inputs,", "Keras metadata included). \"\"\" if dtype is None: dtype =", "self.unrelated_updates.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise", "_axis.append(_ if _ >= 0 else _ + len(shape)) if", "padding='valid', data_format=None): if data_format is None: data_format = image_data_format() if", "initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape = int_shape(inputs) dims", "x.dynamic_axes[dynamic_axis_index] i += 1 dynamic_axis_index += 1 while i <", "not keep_dims: mean = squeeze(mean, _axes) variance = squeeze(variance, _axes)", "True, execute=lambda arg: print(arg), name=''): self.when = when self.execute =", "C.not_equal(x, y) def greater(x, y): return C.greater(x, y) def greater_equal(x,", "== 'channels_last': output = repeat_elements(x, depth_factor, axis=1) output = repeat_elements(output,", "x_aggregate = concatenate(xs, axis=1) # transpose kernel to output_filters first,", "def in_top_k(predictions, targets, k): _targets = C.one_hot(targets, predictions.shape[-1]) result =", "_reduce_on_axis(x, axis, 'reduce_min') return _remove_dims(output, axis, keepdims) def sum(x, axis=None,", "g in grad_parameter_dict: p_list.append(grad_parameter_dict[g]) u_list.append(g) else: raise ValueError( 'CNTK backend:", "(strides[0],) x = C.convolution( kernel, x, strides, auto_padding=[ False, padding,", "C.transpose(x, (2, 0, 1)) return x def _preprocess_conv2d_kernel(kernel, data_format): #", "outputs[2:]] self.metrics_func = C.combine(self.metrics_outputs) else: self.metrics_func = None @staticmethod def", "1: beta = _reshape_dummy_dim(beta, [0]) return (x - mean) /", "= _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate == (1,", "[dynamic_dimension if s is None else s for s in", "2] permutation += list(range(len(y_shape) - 2)) permutation += [len(y_shape) -", "return res else: return C.element_select(C.greater(x, 0), res, alpha * res)", "C.cntk_py.Value(root_gradients.data()) class ConvertToStatic(C.ops.functions.UserFunction): \"\"\"Converts input first axis to CNTK static", "unroll=False, input_length=None): shape = int_shape(inputs) dims = len(shape) global uses_learning_phase", "less(x, y): return C.less(x, y) def less_equal(x, y): return C.less_equal(x,", "[] for a in axis: if isinstance(a, C.Axis) is False:", "1) last_output = outputs[0] while i < len(outputs): # add", "= value.shape if hasattr(value, 'shape') else () if hasattr(value, 'dtype')", "broadcast_beta = C.reshape(beta, target_shape) normalized = batch_normalization( x, broadcast_mean, broadcast_var,", "dim in x.shape dim = np.prod(x.shape) x = C.reshape(x, (-1,))", "_get_dynamic_axis_num(x) == 0: return C.reduce_sum(any_matrix) else: return any_matrix def all(x,", "axis with inferred dimension. ' 'The reshape did not take", "axis=axis, keepdims=keepdims) def std(x, axis=None, keepdims=False): return C.sqrt(var(x, axis=axis, keepdims=keepdims))", "' '%d dimension is not supported, at least ' '%d", "else: raise ValueError('CNTK Backend: Unsupported dtype: %s. ' 'CNTK only", "back final_output = expand_dims(outputs[0], 1) last_output = outputs[0] while i", "keepdims) def prod(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output", "1, 1, bias.shape[0]) else: shape = bias.shape elif dims ==", "criterion, [learner]) self.trainer_output = tuple([f.output for f in criterion]) elif", "= feed_dict[argument] else: raise ValueError('CNTK backend: metrics argument %s '", "x = C.transpose(x, (2, 0, 1)) return x def _preprocess_conv2d_kernel(kernel,", "axis=axis) variance_mean = C.square(C.minus(x, shift)) for axis in _axes: variance_mean", "3: raise ValueError('CNTK Backend: the input of rnn has only", "0 else _ for _ in axis] if shape.count(C.InferredDimension) >", "in criterion]) elif len(u_ops) > 0: unrelated_updates.extend(u_ops) if len(unrelated_updates) >", "1.0 or _LEARNING_PHASE == 1)): _, output_values = self.metrics_func.forward( input_dict,", "else _ for _ in new_shape] return C.reshape(x, new_shape) def", "= C.pad(x, pattern=[(0, 0), padding, (0, 0)]) else: x =", "axes): if isinstance(axes, int): axes = [axes] cntk_axes = _normalize_axis(axes,", "x nones = _get_dynamic_axis_num(x) for _ in sorted(_axis, reverse=True): del", "is removed by default, so don't need reshape on it", "be applied to the variable after an optimizer update. #", "= int_shape(inputs) dims = len(shape) uses_learning_phase = False if dims", "# cntk doesn't support gradient as symbolic op, to hook", "axis) return x def _reshape_sequence(x, time_step): tmp_shape = list(int_shape(x)) tmp_shape[1]", "normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): if gamma is None: if", "def dot(x, y): if len(x.shape) > 2 or len(y.shape) >", "(2, 0, 1)) # Shape: (batch, filters, output_length, input_length *", "if training == 1 or training is True else alt", "None: gamma = ones_like(var) elif ndim(gamma) == ndim(x) and shape(gamma)[0]", "0)) return x def _preprocess_conv3d_input(x, data_format): if data_format == 'channels_last':", "+ num_dynamic] is None: non_dyn_shape.append(x.shape[i]) else: non_dyn_shape.append(shape[i + num_dynamic]) return", "and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]: raise ValueError('CNTK backend: the permute pattern", "support input with variable # length. Will support it in", "* stride_col + kernel_size[1]) if data_format == 'channels_first': xs.append(reshape(inputs[:, :,", "= np.prod(np.asarray(self.target_shape)) num_batch = int(num_element / num_static_element) result = arguments.data().as_shape((num_batch,)", "not supported. ' 'CNTK only supports `eval` with ' '`Function`,", "y): return C.equal(x, y) def not_equal(x, y): return C.not_equal(x, y)", "and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(any_matrix) else: return any_matrix def", "C.InferredDimension or n is C.FreeDimension: return x index = 1", "axis, keepdims) def min(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x)", "axis = _normalize_axis(axis, tensors[0]) return C.splice(*tensors, axis=axis[0]) def flatten(x): return", "constants if mask is not None and not has_seq_axis(mask): if", "dtype=None, seed=None): for _ in shape: if _ is None:", "'related to any parameters in the model. ' 'Please double", "data_format=None, dilation_rate=1): if data_format is None: data_format = image_data_format() if", "list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0,", "floatx() if shape is None: shape = () np_value =", "None: # behaves like tf.batch_matmul as default axes = [len(x_shape)", "padding[1], 3) else: if num_dynamic_axis > 0: assert len(base_shape) ==", "reshape(output, (-1, filters, output_row, output_col)) if data_format == 'channels_last': #", "strides, data_format=None): if data_format is None: data_format = image_data_format() if", "mean=0.0, stddev=1.0, dtype=None, seed=None): if seed is None: seed =", "# length. Will support it in next release. if not", "size of batch axis. name: name of this node. \"\"\"", "def infer_outputs(self): return [ C.output_variable( self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def forward(self,", "the Numpy RNG seed = np.random.randint(10e7) np.random.seed(seed) if dtype is", "C.swapaxes(x, i, i + 1) i += 1 i =", "hasattr(value, 'shape') else () if hasattr(value, 'dtype') and value.dtype !=", "floatx, epsilon, image_dim_ordering, image_data_format from collections import defaultdict from contextlib", "new_shape.insert(index, 1) new_shape = tuple(new_shape) x = C.reshape(x, new_shape) temp", "result = C.classification_error(predictions, _targets, topN=k) return 1 - C.reshape(result, shape=())", "filters, output_length) output = sum(output, axis=3) # Shape: (batch, output_length,", "assert len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1])", "doesn't support gradient as symbolic op, to hook up with", "alpha * negative_part return x def dropout(x, level, noise_shape=None, seed=None):", "= C.transpose(kernel, (3, 2, 0, 1)) return kernel def _preprocess_border_mode(padding):", "'`train_function`.' % argument.name) result = self.trainer.train_minibatch( input_dict, self.trainer_output) assert(len(result) ==", "i != p and p != C.InferredDimension and p !=", "== 2 and len(y_shape) == 2: if axes[0] == axes[1]:", "in enumerate(n): if i >= num_dynamic_axis and shape[i] is not", "# map to keep the mapping from grad placeholder to", "sequence axis.' % shape) if constants is None: constants =", "auto_padding=[False, padding, padding]) x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1),", "expect (batch, ) return C.reshape(result, ()) else: # scale preds", "later with cntk cond op. if callable(x) and isinstance(x, C.cntk_py.Function)", "batch_dot(x, y, axes=None): x_shape = int_shape(x) y_shape = int_shape(y) if", "hasattr(x, 'dynamic_axes'): return len(x.dynamic_axes) else: return 0 def _contain_seqence_axis(x): if", "def transpose(x): return C.swapaxes(x, 0, 1) def gather(reference, indices): #", "== 1: var = _reshape_dummy_dim(var, [0]) if gamma is None:", "keepdims=keepdims)) def expand_dims(x, axis=-1): shape = list(int_shape(x)) nones = _get_dynamic_axis_num(x)", "x() if callable(alt) and isinstance(alt, C.cntk_py.Function) is False: alt =", "10e6) if dtype is None: dtype = np.float32 else: dtype", "'float64.' % dtype) def variable(value, dtype=None, name=None, constraint=None): \"\"\"Instantiates a", "mean = squeeze(mean, _axes) variance = squeeze(variance, _axes) return mean,", "range(dims)]) if num_dynamic_axis > 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]: raise", "= x.shape if b_any([dim < 0 for dim in base_shape]):", "' 'shape is not supported. Please provide ' 'fixed dimension", "axis=None, keepdims=False): reduce_result = sum(x, axis, keepdims=keepdims) any_matrix = C.element_select(", "else: return all_matrix def classification_error(target, output, axis=-1): return C.ops.reduce_mean( C.equal(", "output = x_aggregate * weight # Shape: (batch, filters, output_length)", "else: return C.user_function(ReshapeBatch(x, shape[1:])) def _get_cntk_version(): version = C.__version__ if", "final_states = _recurrence(rnn_inputs, states, mask) last_output = C.sequence.last(final_output) last_states =", "padding, padding, padding], output_shape=output_shape) return _postprocess_conv3d_output(x, data_format) def pool2d(x, pool_size,", "cntk now # return the same x to take cntk", "axis - 1) beta = C.reduce_mean(beta, axis - 1) else:", "def std(x, axis=None, keepdims=False): return C.sqrt(var(x, axis=axis, keepdims=keepdims)) def expand_dims(x,", "# transpose i = normalized_axis[0] while i < len(x.shape) -", "in shape, # so just flatten all the dim in", "def _get_dynamic_axis_num(x): if hasattr(x, 'dynamic_axes'): return len(x.dynamic_axes) else: return 0", "constraint=None): \"\"\"Instantiates a variable and returns it. # Arguments value:", "== 0: if hasattr(C, 'to_batch'): initial.append(C.to_batch(s)) else: initial.append(C.user_function(ConvertToBatch(s))) else: initial.append(s)", "the axis if len(n) < len(shape): n = tuple([1 for", "0 and x.shape[0] == C.InferredDimension: dims -= 1 bias_dims =", "uses the last dimension as channel dimension, # instead of", "'`. ' 'Expected a symbolic tensor instance.') return hasattr(x, '_keras_history')", "it later with cntk cond op. if callable(x) and isinstance(x,", "= normalized_axis[0] while i < len(x.shape) - 1: x =", "= [] for c in constant: if _get_dynamic_axis_num(c) == 1:", "else: target_shape.append(x_shape[axis]) broadcast_mean = C.reshape(mean, target_shape) broadcast_var = C.reshape(variant, target_shape)", "if dilation_rate == (1, 1): strides = (1,) + strides", "'channels_last': shape = list(output_shape) shape[0] = output_shape[3] shape[1] = output_shape[0]", "C.equal( argmax( output, axis=-1), argmax( target, axis=-1)), axis=C.Axis.all_axes()) def argmax(x,", "output = reshape(output, (-1, filters, output_row, output_col)) if data_format ==", "_axis = [] for _ in axis: if isinstance(_, int):", "_targets, topN=k) return 1 - C.reshape(result, shape=()) def conv2d_transpose(x, kernel,", "x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding], groups=x.shape[0]) return", "# TF input shape: (samples, rows, cols, input_depth) x =", "def random_normal_variable( shape, mean, scale, dtype=None, name=None, seed=None): if dtype", "== 3: if data_format == 'channels_first': if bias_dims == 1:", "use # element_select approach as workaround. It may have #", "if padding == 'causal': # causal (dilated) convolution: left_pad =", "float to run the model return np.float32 def _convert_dtype_string(dtype): if", "_axes: shift = C.reduce_mean(shift, axis=axis) shift = C.stop_gradient(shift) shifted_mean =", "= expand_dims(outputs[i], 1) final_output = C.splice(final_output, output_slice, axis=time_axis) last_output =", "and hasattr(placeholder, 'shape'): num_dynamic = get_num_dynamic_axis(placeholder) input_shape = input.shape[num_dynamic:] placeholder_shape", "inputs.' % argument.name) # Some ops (like dropout) won't be", "= C.reduce_mean(shift, axis=axis) shift = C.stop_gradient(shift) shifted_mean = C.minus(x, shift)", "float, so don't need case from bool / int return", "dilation_rate=1): if data_format is None: data_format = image_data_format() if data_format", "for your sequences.') rnn_inputs = inputs if need_convert: if go_backwards:", "> 1: result = x for index in sorted(_axis, reverse=True):", "dtype): # cntk calculate everything in float, so don't need", "v return grads def equal(x, y): return C.equal(x, y) def", "-1: i += 1 else: break shape = tuple([-1 for", "of this node. \"\"\" def __init__(self, input, batch_size, name='convert_to_static'): super(ConvertToStatic,", "raise ValueError('Invalid pooling mode: ' + str(pool_mode)) return _postprocess_conv2d_output(x, data_format)", "concatenate(xs, axis=1) # transpose kernel to output_filters first, to apply", "inferred dimension. ' 'The reshape did not take place.') return", "= value.astype(dtype) # TODO: remove the conversion when cntk supports", "stride, i * stride + kernel_size[0]) xs.append(reshape(inputs[:, slice_length, :], (-1,", "for axis in _axes: variance_mean = C.reduce_mean(variance_mean, axis=axis) variance =", "when self.execute = execute super(LambdaFunc, self).__init__([arg], name=name) def infer_outputs(self): return", "str(padding)) return padding def _postprocess_conv2d_output(x, data_format): if data_format == 'channels_last':", "return C.equal(x, y) def not_equal(x, y): return C.not_equal(x, y) def", "= repeat_elements(x, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return", "= (bias.shape[0], 1) else: shape = (bias.shape[1],) + bias.shape[:1] elif", "return C.less(x, y) def less_equal(x, y): return C.less_equal(x, y) def", "grad_array_view = root_gradients.data() num_element = root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape)) num_static_element =", "def count_params(x): for _ in x.shape: if _ == C.InferredDimension", "== 'channels_last': # TF uses the last dimension as channel", "if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(any_matrix)", "1: rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs)) else: rnn_constants.append(constant) else: rnn_constants = constants if", "def gradients(loss, variables): # cntk does not support gradients as", "return x.value else: raise ValueError('CNTK Backend: `eval` method on '", "RNG seed = np.random.randint(10e7) if dtype is None: dtype =", "ctype), dtype=dtype, name=name) def eye(size, dtype=None, name=None): if dtype is", "pool_mode='max'): if data_format is None: data_format = image_data_format() if data_format", "out how to repeat it in cntk now # return", "# for hot fix, ignore all the . except the", "from .common import floatx, epsilon, image_dim_ordering, image_data_format from collections import", "else: _axis = axis if isinstance(_axis, list): for i, a", "> 0: self.unrelated_updates = C.combine([_.output for _ in unrelated_updates]) if", "spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): assert len(padding) == 2", "keras2, need handle output shape in different format if data_format", "tmp_shape = list(int_shape(x)) tmp_shape[1] = time_step return reshape(x, tmp_shape) def", "squeeze(result, -1) return result def transpose(x): return C.swapaxes(x, 0, 1)", "all_matrix def classification_error(target, output, axis=-1): return C.ops.reduce_mean( C.equal( argmax( output,", "else: rnn_constants.append(constant) else: rnn_constants = constants if mask is not", "# so just flatten all the dim in x.shape dim", "1) if len(int_shape(mask)) == 2: mask = expand_dims(mask) mask =", "{} for argument in self.unrelated_updates.arguments: if argument in feed_dict: input_dict[argument]", "= self.metrics_func.eval(input_dict, as_numpy=False) if isinstance(output_values, dict): for o in self.metrics_outputs:", "shape_expr[ndim_cond + i]) return C.element_select(condition, then_expression, else_expression) def elu(x, alpha=1.):", "to hook up with keras model, # we will create", "min_value, max_value): if max_value is not None and max_value <", "phase tensor return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else", "axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_min') return", "tuple)) feed_dict = {} for tensor, value in zip(self.placeholders, inputs):", "new_shape[num_dynamic_axis:] new_shape = [C.InferredDimension if _ is None else _", "C.pad(x, pattern=[(0, 0), padding, (0, 0)]) else: x = _padding(x,", "level) return C.dropout(x, level) def batch_flatten(x): # cntk's batch axis", "None: beta = zeros_like(x) else: beta = zeros_like(gamma) mean, variant", "y): return C.greater(x, y) def greater_equal(x, y): return C.greater_equal(x, y)", "None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape", "list(padding[1])]) else: x = _padding(x, padding[0], 1) x = _padding(x,", "new_shape) if index < nones: result._keras_shape = shape return result", "(1, 1): strides = (1,) + strides x = C.convolution(", "if dtype is None: dtype = floatx() if shape is", "output else: raise ValueError('CNTK Backend: Invalid data_format:', data_format) def repeat_elements(x,", "1, 1) else: shape = (bias.shape[2],) + bias.shape[:2] elif data_format", "is conditioned by the Numpy RNG seed = np.random.randint(10e7) if", "% shape) if constants is None: constants = [] if", "value = output_values[o] v = value.asarray() updated.append(v) else: v =", "int, (int, int), ' + 'Provided: ' + str(axes)) if", "if need_convert: final_output = C.sequence.unpack(final_output, 0, no_mask_output=True) if num_time_step is", "if dtype is None: dtype = floatx() if seed is", "minval=0.0, maxval=1.0, dtype=None, seed=None): for _ in shape: if _", "# https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter dtype = 'float32' if 'int' in str(dtype) else", "== 0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(any_matrix) else: return", "_preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) if", "3, 0, 1, 2)) return kernel def _postprocess_conv3d_output(x, dim_ordering): if", "return C.less_equal(x, y) def maximum(x, y): return C.element_max(x, y) def", "(learning_phase == 0). # LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic", "1), 'Invalid strides for dilated convolution' x = C.convolution( kernel,", "def infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return [ C.output_variable( self.target_shape, self.inputs[0].dtype,", "x.shape if data_format == 'channels_first': if num_dynamic_axis > 0: assert", "feature_dim))) else: xs.append(reshape(inputs[:, slice_row, slice_col, :], (-1, 1, feature_dim))) x_aggregate", "ValueError('Unknown data_format ' + str(data_format)) padding = _preprocess_border_mode(padding) x =", "mean, var, beta, gamma, epsilon=1e-3): # The mean / var", "reverse(x, axes): if isinstance(axes, int): axes = [axes] cntk_axes =", "p and p != C.InferredDimension and p != C.FreeDimension: return", "x.value else: return eval(x) def batch_get_value(xs): result = [] for", "list(int_shape(x)) _axis = [] for _ in axis: if isinstance(_,", "return tensor.is_sparse def int_shape(x): if hasattr(x, '_keras_shape'): return x._keras_shape shape", "= C.classification_error(predictions, _targets, topN=k) return 1 - C.reshape(result, shape=()) def", "(bias.shape[2],) + bias.shape[:2] elif data_format == 'channels_last': if bias_dims ==", "i, rep in enumerate(n): if i >= num_dynamic_axis and shape[i]", "variable(value=np.ones(shape, ctype), dtype=dtype, name=name) def eye(size, dtype=None, name=None): if dtype", "axis): if isinstance(axis, tuple): axis = list(axis) if not isinstance(axis,", "in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) dims", "for argument in self.loss.arguments: if argument in feed_dict: input_dict[argument] =", "dimension ' 'instead of `None`.') return random_uniform_variable(shape, minval, maxval, dtype,", "-= 1 else: i = 0 while i < shape[1]:", "target) # cntk's result shape is (batch, 1), while keras", "(1, 1): strides = (1,) + strides x = C.convolution(depthwise_kernel,", "is not 0 or 1, we will go with dynamic", "axis >= 0 else len(shape) + 1 shape.insert(index, 1) new_shape", "0 or unroll: return _static_rnn( step_function, inputs, initial_states, go_backwards, mask,", "instability with epsilon clipping output = C.clip(output, epsilon(), 1.0 -", "== 4: if data_format == 'channels_first': if bias_dims == 1:", "CNTK expects `(depth, input_depth, rows, cols)`. kernel = C.transpose(kernel, (3,", "np.float32 else: dtype = _convert_string_dtype(dtype) size = 1 for _", "not static. If you want to run ' 'rnn with", "name=name) def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if dtype is", "expand_dims(mask) nones = _get_dynamic_axis_num(inputs) states = tuple(initial_states) outputs = []", "_padding(x, padding[1], 1) x = _padding(x, padding[2], 2) else: assert", "result._keras_shape = shape return result def squeeze(x, axis): if isinstance(axis,", "(-1,)) def reshape(x, shape): shape = tuple([C.InferredDimension if _ ==", "padding]) else: assert dilation_rate[0] == dilation_rate[1] assert strides == (1,", "y): return C.less_equal(x, y) def maximum(x, y): return C.element_max(x, y)", "floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name) def", "is ' 'not supported.') if strides != (1, 1): raise", "outputs[i] i += 1 last_output._uses_learning_phase = uses_learning_phase return last_output, final_output,", "place_holders): n_s.append(o.replace_placeholders({p: o.output})) if len(n_s) > 0: new_output = n_s[0]", "C.FreeDimension: return False return True def __call__(self, inputs): global _LEARNING_PHASE_PLACEHOLDER", "in self.unrelated_updates.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else:", "return -sum(target * C.log(output), axis=-1) def sparse_categorical_crossentropy(target, output, from_logits=False): target", "- _get_dynamic_axis_num(x) if index < 0 or index > 1:", "as a constant placeholder, here use this global # map", "width_factor, axis=4) return output elif data_format == 'channels_last': output =", "ndim(x) and shape(beta)[0] == 1: beta = _reshape_dummy_dim(beta, [0]) return", "= [axes] cntk_axes = _normalize_axis(axes, x) begin_index = [0 for", "is a bug in cntk gather op which may cause", "data_format=None): assert len(padding) == 2 assert len(padding[0]) == 2 assert", "value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape", "[_ + len(shape) if _ < 0 else _ for", "np.ndarray) is False: value = np.asarray(value) if isinstance(x, C.variables.Parameter): x.value", "phase ' 'with value %s is not supported, ' 'expected", "expand_dims(x, axis=-1): shape = list(int_shape(x)) nones = _get_dynamic_axis_num(x) index =", "None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) size =", "+ reshape(bias, shape) def eval(x): if isinstance(x, C.cntk_py.Function): return x.eval()", "(1, 0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) padding =", "'not supported.') if strides != (1, 1): raise ValueError('Invalid strides", "length for your sequences.') rnn_inputs = inputs if need_convert: if", "= when self.execute = execute super(LambdaFunc, self).__init__([arg], name=name) def infer_outputs(self):", "True, execute=lambda x: print(message))) def batch_set_value(tuples): for t in tuples:", "strides + (strides[0],) x = C.convolution( kernel, x, strides, auto_padding=[", "axis=3) # shape: batch, filters, row, col output = reshape(output,", "# use numpy workaround now if seed is None: #", "and shape(beta)[0] == 1: beta = _reshape_dummy_dim(beta, [0]) return (x", "'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1) else:", "axis=axis, keepdims=keepdims)) def var(x, axis=None, keepdims=False): m = mean(x, axis,", "x.shape if hasattr(x, 'dynamic_axes'): dynamic_shape = [None for a in", "while i < len(outputs): # add the time_step axis back", "self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def forward(self, argument, device=None, outputs_to_retain=None): if self.when(argument):", "0.5 x = C.clip(x, 0.0, 1.0) return x def conv1d(x,", "padding, (0, 0)]) else: x = _padding(x, padding, 1) return", "x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding]) else: raise ValueError('Invalid pooling mode:", "None: name = '' if isinstance( value, C.variables.Constant) or isinstance(", "return result def transpose(x): return C.swapaxes(x, 0, 1) def gather(reference,", "def _contain_seqence_axis(x): if _get_dynamic_axis_num(x) > 1: return x.dynamic_axes[1] == C.Axis.default_dynamic_axis()", "list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0], 1) x", "x = _padding(x, padding[1], 2) else: assert len(base_shape) == 4", "current version cntk can't support input with variable # length.", "or level >= 1: raise ValueError('CNTK Backend: Invalid dropout level", "%s ' 'is not found in inputs. Please double '", "supports float32 and ' 'float64.' % dtype) def variable(value, dtype=None,", "need handle output shape in different format if data_format ==", "- low) / 2 p = C.parameter( shape, init=C.initializer.uniform( scale,", "' 'dynamic shape is not supported now. ' 'Please provide", "= x.shape if hasattr(x, 'dynamic_axes'): dynamic_shape = [None for a", "_preprocess_border_mode(padding) strides = strides pool_size = pool_size x = _preprocess_conv2d_input(x,", "name = '' return C.parameter( shape=shape, init=C.initializer.normal( scale=scale, seed=seed), dtype=dtype,", "x.shape if pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis] =", "y)[0]) # transpose i = normalized_axis[0] while i < len(x.shape)", "i += 1 i = normalized_axis[1] while i > 0:", "i, p in zip(input_shape, placeholder_shape): if i != p and", "kernel = _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = strides", "else: assert len(base_shape) == 5 if hasattr(C, 'pad'): x =", "# Current cntk does not support shape like (1, batch).", "> 0 else 1 if go_backwards: i = shape[1] -", "= C.parameter( shape, init=C.initializer.uniform( scale, seed=seed), dtype=dtype, name=name) return variable(value=p.value", "to let cntk know we want to evaluate them.from #", "* 0 def ones_like(x, dtype=None, name=None): return zeros_like(x) + 1", "axis is removed by default, so don't need reshape on", "_convert_string_dtype(dtype) return C.parameter( shape, init=C.initializer.truncated_normal( stddev, seed=seed), dtype=dtype) def dtype(x):", "[] for i in range(output_length): slice_length = slice(i * stride,", "C.Axis) \\ and a != C.Axis.default_batch_axis() \\ and hasattr(C.sequence, reduce_fun_name):", "_get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s) == 1: if hasattr(C, 'unpack_batch'):", "np.random.randint(10e7) if dtype is None: dtype = np.float32 else: dtype", "raise ValueError('CNTK Backend: the input of rnn has only rank", "if alpha != 0.: x -= alpha * negative_part return", "learner will apply # the gradient during training. global grad_parameter_dict", "sparse_categorical_crossentropy(target, output, from_logits=False): target = C.one_hot(target, output.shape[-1]) target = C.reshape(target,", "is not fully optimized,' 'please run with GPU to get", "if self.trainer is not None: input_dict = {} for argument", "# Shape: (batch, filters, output_length) output = sum(output, axis=3) #", "want to run ' 'rnn with non-static axis, please try", "check the keras shape history.' % (str(shape), nones)) # Current", "None: input_dict = {} for argument in self.unrelated_updates.arguments: if argument", "= output_shape[1] output_shape = tuple(shape) x = C.convolution_transpose( kernel, x,", "# need group update by gradient place holder u_ops =", "data_format=None, dilation_rate=1): raise NotImplementedError def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1),", "ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:", "def permute_dimensions(x, pattern): dims = len(int_shape(x)) num_dynamic_axis = _get_dynamic_axis_num(x) if", "'_'.join(NAME_SCOPE_STACK) if name is None or name == '': return", "_postprocess_conv2d_output(x, data_format): if data_format == 'channels_last': x = C.transpose(x, (1,", "= '' scale = (high - low) / 2 p", "import print_function import cntk as C import numpy as np", "try ' 'dynamic rnn with sequence axis.' % shape) if", "None: name = '%s_alias' % x.name return C.alias(x, name=name) def", "raise ValueError('CNTK Backend: Set learning phase ' 'with value %s", "x def max(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output", "interval [0, 1].' % level) return C.dropout(x, level) def batch_flatten(x):", "= reshape(output, (-1, filters, output_row, output_col)) if data_format == 'channels_last':", "x, strides=dilation_rate[0], auto_padding=[False, padding, padding]) x = C.convolution(pointwise_kernel, x, strides=(1,", "= bias.shape elif dims == 2: if data_format == 'channels_first':", "axis, keepdims) def sum(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x)", "uses_learning_phase return x else: # if _LEARNING_PHASE is static if", "not 0 or 1, we will go with dynamic learning", "tuple): axis = list(axis) if not isinstance(axis, list): axis =", "correct axis. Will remove this tricky after we add support", "Optional projection function to be applied to the variable after", "current = C.ops.slice(inputs, time_axis, i, i + 1) # remove", "target = C.reshape(target, output.shape) return categorical_crossentropy(target, output, from_logits) class Function(object):", "end_axis=index + 1) return result else: for index in sorted(_axis,", "range(dims)] else: current_layout = tuple([i for i in range(dims)]) if", "1), auto_padding=[False]) else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend:", "== 5 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0],", "_axis = axis if isinstance(_axis, list): for i, a in", "then first need to padding the shape if num_dynamic_axis >=", "1: shape = (1, 1, 1, bias.shape[0]) else: shape =", "states = new_states i -= 1 else: i = 0", "= [] for update in updates: if isinstance(update, tuple): if", "if dtype == np.float32: return 'float32' elif dtype == np.float64:", "'instead of `None`.') # how to apply mean and stddev", "len(shape): n = tuple([1 for _ in range(len(shape) - len(n))])", "in range(rep): slices.append(tmp) i += 1 return C.splice(*slices, axis=axis) def", "log(x): return C.log(x) def round(x): return C.round(x) def sigmoid(x): return", "current = squeeze(current, time_axis) output, new_states = step_function( current, tuple(states)", "return result def set_value(x, value): if (isinstance(x, C.variables.Parameter) or isinstance(x,", "new_shape) temp = [x] * n return C.splice(*temp, axis=index) def", "b_any( _ == C.FreeDimension for _ in x.shape): warnings.warn( 'Warning:", "are needed.' % (len(cntk_shape, dynamic_axis_num))) if name is None: name", "auto_padding=[False, padding, padding], groups=x.shape[0]) x = C.convolution(pointwise_kernel, x, strides=(1, 1,", "shape = (bias.shape[3],) + bias.shape[:3] elif data_format == 'channels_last': if", "C.log(1.0 - output) return output def get_variable_shape(x): return int_shape(x) def", "= _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) else:", "and inputs in ' '`train_function`.' % argument.name) result = self.trainer.train_minibatch(", "need_convert = not has_seq_axis(inputs) if go_backwards and need_convert is False:", "'must be in interval [0, 1].' % level) return C.dropout(x,", "in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError('CNTK backend: metrics", "1, return dynamic learning phase tensor return _LEARNING_PHASE if _LEARNING_PHASE", "float(version) except: warnings.warn( 'CNTK backend warning: CNTK version not detected.", "= C.Axis.default_batch_axis() return [ C.output_variable( self.target_shape, self.inputs[0].dtype, [batch_axis])] def forward(self,", "2 p = C.parameter( shape, init=C.initializer.uniform( scale, seed=seed), dtype=dtype, name=name)", "_padding(x, padding[0], 1) x = _padding(x, padding[1], 2) else: assert", "len(y.shape) > 1 else 1) if len(y_shape) == 2: result", "2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) padding = _preprocess_border_mode(padding) if", "use this global # map to keep the mapping from", "(bias.shape[0], 1) else: shape = (bias.shape[1],) + bias.shape[:1] elif data_format", "is False: alt = alt() if training is True: x._uses_learning_phase", "x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding]) x =", "C.cntk_py.Function) is False: alt = alt() if training is True:", "C.assign(update[0], update[1]) else: u = update if len(u.arguments) == 0:", "shape def ndim(x): shape = int_shape(x) return len(shape) def _prepare_name(name,", "like tf.batch_matmul as default axes = [len(x_shape) - 1, len(y_shape)", "found in inputs. ' 'Please double check the model and", "i -= 1 else: i = 0 while i <", "for _ in new_shape] return C.reshape(x, new_shape) def permute_dimensions(x, pattern):", "target_shape = [] x_shape = int_shape(x) # skip the batch", "hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]]) else:", "gamma = _reshape_dummy_dim(gamma, [0]) if beta is None: beta =", "= C.minus(variance_mean, C.square(shifted_mean)) mean = C.plus(shifted_mean, shift) if not keep_dims:", "if _get_dynamic_axis_num(c) == 1: new_c.append(C.sequence.broadcast_as(c, rnn_inputs)) else: new_c.append(c) rnn_constants.append(new_c) else:", "states, m): # create place holder place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for", "by broadcast # so it may have an extra batch", "3)), (-1, 1) + depthwise_kernel.shape[2:]) padding = _preprocess_border_mode(padding) if dilation_rate", "is not supported. Please do permute ' 'on static axis.'", "return x def _preprocess_conv2d_kernel(kernel, data_format): # As of Keras 2.0.0,", "x.value = value else: raise NotImplementedError def print_tensor(x, message=''): return", "tuples: x = t[0] value = t[1] if isinstance(value, np.ndarray)", "len(shape) def _prepare_name(name, default): prefix = '_'.join(NAME_SCOPE_STACK) if name is", "1) x = temporal_padding(x, (left_pad, 0)) padding = 'valid' if", "def get_num_dynamic_axis(x): return _get_dynamic_axis_num(x) def _reduce_on_axis(x, axis, reduce_fun_name): if isinstance(axis,", "(samples, input_depth, rows, cols) # TF input shape: (samples, rows,", "sequences.') rnn_inputs = inputs if need_convert: if go_backwards: rnn_inputs =", "= version[:-1] # for hot fix, ignore all the .", "the workaround # here to mapping the correct axis. Will", "strides = strides pool_size = pool_size x = _preprocess_conv2d_input(x, data_format)", "_reshape_dummy_dim(var, [0]) if gamma is None: gamma = ones_like(var) elif", "x = _padding(x, padding, 1) return x def _padding(x, pattern,", "= _padding(x, padding[2], 3) else: assert len(base_shape) == 5 if", "RNG seed = np.random.randint(10e7) np.random.seed(seed) if dtype is None: dtype", "= floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.ones(shape, ctype), dtype=dtype, name=name)", "normalized_axis = [] normalized_axis.append(_normalize_axis(axes[0], x)[0]) normalized_axis.append(_normalize_axis(axes[1], y)[0]) # transpose i", "raise ValueError('CNTK Backend: `eval` method on ' '`%s` type is", "'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) dims = len(x.shape)", "2: mask = expand_dims(mask) mask = C.to_sequence_like(mask, rnn_inputs) states =", "raise ValueError( 'CNTK backend: argument %s is not found in", "batch_size, name='convert_to_static'): super(ConvertToStatic, self).__init__([input], as_numpy=False, name=name) self.target_shape = (batch_size,) +", "C.sqrt(x) def exp(x): return C.exp(x) def log(x): return C.log(x) def", "dtype = floatx() for _ in shape: if _ is", "'instead of `None`.') size *= _ binomial = np.random.binomial(1, p,", "return a constant as place holder, the cntk learner will", "dtype and len(shape) > 0: value = value.astype(dtype) # TODO:", "models in # either train mode (learning_phase == 1) or", "4: if data_format == 'channels_first': if bias_dims == 1: shape", "x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x", "for s in initial_states: if _get_dynamic_axis_num(s) == 0: if hasattr(C,", "of `data_format`. # CNTK expects `(depth, input_depth, rows, cols)`. kernel", "+ self.from_shape)) class ConvertToBatch(C.ops.functions.UserFunction): \"\"\"Converts input first axis to CNTK", "return_states outputs.append(output) states = new_states[:len(states)] i += 1 i =", "/ C.abs(x) def pow(x, a): return C.pow(x, a) def clip(x,", "C.stop_gradient(shift) shifted_mean = C.minus(x, shift) for axis in _axes: shifted_mean", "raise ValueError('CNTK backend: The placeholder has been resolved ' 'to", "value = np.full(x.shape, value, dtype=floatx()) x.value = value else: raise", "< 0: _axis[i] = (a % ndim) if _axis[i] is", "like (1, batch). so using the workaround # here to", "argmin(x, axis=-1): axis = [axis] axis = _normalize_axis(axis, x) output", "# in current version cntk can't support input with variable", "inputs. ' 'Please double check the model and inputs in", "np.random.randint(10e7) np.random.seed(seed) if dtype is None: dtype = np.float32 else:", "you want to run ' 'rnn with non-static axis, please", "x def _normalize_axis(axis, x): shape = int_shape(x) ndim = len(shape)", "+ str(data_format)) stride_row, stride_col = strides output_row, output_col = output_shape", "strides == (1, 1), 'Invalid strides for dilated convolution' x", "outputs, updates=[], **kwargs): self.placeholders = inputs self.trainer = None self.unrelated_updates", "v = output_values.asarray() for o in self.metrics_outputs: updated.append(v) if self.unrelated_updates", "do auto cast here if (hasattr(value, 'dtype') and value.dtype !=", "remove this tricky after we add support # in native", "min_value: max_value = min_value if max_value is None: max_value =", "== 1: if hasattr(C, 'unpack_batch'): f_stats.append(C.unpack_batch(l_s)) else: f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0]))) else:", "= False if dims < 3: raise ValueError('Input should be", "mask is not None: mask_slice = C.ops.slice(mask, time_axis, i, i", "== 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]),", "NotImplementedError new_shape = list(x.shape) new_shape.insert(index, 1) new_shape = tuple(new_shape) x", "model and inputs in ' '`train_function`.' % argument.name) result =", "C.FreeDimension else _ for _ in shape] return C.reshape(x, shape)", "dynamic_axis_index += 1 else: cntk_axis.append(i - dynamic_axis_index) if dynamic_axis_index <", "# we will create gradient as a constant placeholder, here", "dims) if _get_dynamic_axis_num(inputs) == 0 or unroll: return _static_rnn( step_function,", "= C.reshape(x, new_shape) temp = [x] * n return C.splice(*temp,", "4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], [0,", "= {} NAME_SCOPE_STACK = [] @contextmanager def name_scope(name): global NAME_SCOPE_STACK", "arguments, device=None, outputs_to_retain=None): num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape)) num_static_element =", "C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0],", "1), padding='valid', data_format=None, pool_mode='max'): if data_format is None: data_format =", "' 'The reshape did not take place.') return x return", "list(axis) elif isinstance(axis, int): _axis = [axis] elif isinstance(axis, list):", "unrelated_updates = [] for update in updates: if isinstance(update, tuple):", "y) def greater(x, y): return C.greater(x, y) def greater_equal(x, y):", "'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1, 1)", "else: return 0 def _contain_seqence_axis(x): if _get_dynamic_axis_num(x) > 1: return", "else: dtype = _convert_string_dtype(dtype) return C.parameter( shape, init=C.initializer.truncated_normal( stddev, seed=seed),", "axis batch_size: size of batch axis. name: name of this", "map to keep the mapping from grad placeholder to parameter", "axis=axis) return x def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):", "axis=1) # transpose kernel to output_filters first, to apply broadcast", "Backend: `eval` method on ' '`%s` type is not supported.", "else: result = C.element_select(training, x, alt) result._uses_learning_phase = uses_learning_phase return", "C.log(x) def round(x): return C.round(x) def sigmoid(x): return C.sigmoid(x) def", "in CNTK native implementation later. # Arguments inputs: a cntk", "- len(n))]) + n if len(n) != len(shape): raise NotImplementedError", "1: mean = _reshape_dummy_dim(mean, [0]) if ndim(var) == ndim(x) and", "return _postprocess_conv2d_output(x, data_format) def identity(x, name=None): if name is None:", "xs: if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): result.append(x.value) else: result.append(eval(x))", "if len(x_shape) == 2 and len(y_shape) == 2: if axes[0]", "assert len(base_shape) == 5 if hasattr(C, 'pad'): x = C.pad(x,", "1) def gather(reference, indices): # There is a bug in", "reduction_axes, epsilon=1e-3): if gamma is None: if beta is None:", "data_format == 'channels_first': output = repeat_elements(x, depth_factor, axis=2) output =", "'/' + default return prefix + '/' + name def", "a < 0: _axis[i] = (a % ndim) if _axis[i]", "format `(rows, cols, input_depth, depth)`, # independently of `data_format`. #", "clear_session(): \"\"\"Reset learning phase flag for cntk backend. \"\"\" global", "sigmoid(x): return C.sigmoid(x) def sign(x): return x / C.abs(x) def", "data_format=None): if data_format is None: data_format = image_data_format() if data_format", "'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])]) else: x", "t[0] value = t[1] if isinstance(value, np.ndarray) is False: value", "symbolic op, # to hook up with keras model #", "C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) x = C.convolution(pointwise_kernel,", "_normalize_axis(axis, x) return C.transpose(x, axis) def resize_images(x, height_factor, width_factor, data_format):", "to make the recurrent layer work. # need to be", "k): _targets = C.one_hot(targets, predictions.shape[-1]) result = C.classification_error(predictions, _targets, topN=k)", "= [strides] x = C.convolution( kernel, x, strides=tuple(strides), auto_padding=[ False,", "dtype = floatx() if shape is None: shape = ()", "self.target_shape, self.inputs[0].dtype, [batch_axis])] def forward(self, arguments, device=None, outputs_to_retain=None): num_element =", "return any_matrix def all(x, axis=None, keepdims=False): reduce_result = prod(x, axis,", "and 1 metric in trainer, for metrics more # than", "__init__(self, input, shape, name='reshape_with_batch'): super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name) self.from_shape =", "= '' if isinstance( value, C.variables.Constant) or isinstance( value, C.variables.Parameter):", "= min_value if max_value is None: max_value = np.inf if", "self.target_shape, self.inputs[0].dtype, [])] def forward(self, arguments, device=None, outputs_to_retain=None): return None,", "dynamic_axis_num > len(cntk_shape): raise ValueError('CNTK backend: creating placeholder with '", "(-1, 1) + depthwise_kernel.shape[2:]) padding = _preprocess_border_mode(padding) if dilation_rate ==", "is None: data_format = image_data_format() if data_format not in {'channels_first',", "with GPU to get better performance.') # A learning phase", "= _preprocess_border_mode(padding) strides = [strides] x = C.convolution( kernel, x,", "1) or test mode (learning_phase == 0). # LEARNING_PHASE_PLACEHOLDER is", "value = eval(value) shape = value.shape if hasattr(value, 'shape') else", "2nd one. # TH input shape: (samples, input_depth, rows, cols)", "len(y_shape) > 2: permutation = [len(y_shape) - 2] permutation +=", "'`Function`, `Constant` or ' '`Parameter`.' % type(x)) def placeholder( shape=None,", "i * stride_row + kernel_size[0]) slice_col = slice(j * stride_col,", "prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x", "= C.swapaxes(y, i, i - 1) i -= 1 result", "1 or shape.count(C.FreeDimension) > 1: result = x for index", "if axes is None: # behaves like tf.batch_matmul as default", "recurrent layer # if n is inferred dimension, # we", "reduce_result = prod(x, axis, keepdims=keepdims) all_matrix = C.element_select( reduce_result, ones_like(reduce_result),", "len(x.dynamic_axes) > 1 def l2_normalize(x, axis=None): axis = [axis] axis", "= '' return C.parameter( shape=shape, init=C.initializer.normal( scale=scale, seed=seed), dtype=dtype, name=name)", "== C.FreeDimension for _ in x.shape): warnings.warn( 'Warning: CNTK backend", "gamma + beta def concatenate(tensors, axis=-1): if len(tensors) == 0:", "C.log(output) - (1.0 - target) * C.log(1.0 - output) return", "= squeeze(mean, _axes) variance = squeeze(variance, _axes) return mean, variance", "= [] if mask is not None: mask_shape = int_shape(mask)", "+ shape new_shape = list(shape) new_shape = new_shape[num_dynamic_axis:] new_shape =", "whether `x` is a placeholder. # Arguments x: A candidate", "auto_padding=[padding]) elif pool_mode == 'avg': x = C.pooling( x, C.AVG_POOLING,", "supported. ' + 'Expected: None, int, (int, int), ' +", "x = C.relu(x) if max_value is not None: x =", "axes[1]: result = sum(x * y, axis=axes[0], keepdims=True) return result", "= update if len(u.arguments) == 0: u_ops.append(u) else: unrelated_updates.append(u) update_func", "C.reshape(x, shape) else: num_dynamic_axis = _get_dynamic_axis_num(x) if num_dynamic_axis == 1", "sum(output, axis=3) # shape: batch, filters, row, col output =", "update with gather op in next release if _get_cntk_version() >=", "_is_input_shape_compatible(input, placeholder): if hasattr(input, 'shape') and hasattr(placeholder, 'shape'): num_dynamic =", "self.loss = outputs[0] # need group update by gradient place", "[0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0],", "_get_dynamic_axis_num(s) == 0: if hasattr(C, 'to_batch'): initial.append(C.to_batch(s)) else: initial.append(C.user_function(ConvertToBatch(s))) else:", "rnn with sequence axis.' % shape) if constants is None:", "'%d cntk dynamic axis, this is not expected, please '", "= np.random.randint(10e7) if dtype is None: dtype = np.float32 else:", "= _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = strides +", "_normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_prod') return _remove_dims(output, axis,", "list(padding[2]), [0, 0]]) else: x = _padding(x, padding[0], 0) x", "_UID_PREFIXES[prefix] += 1 return _UID_PREFIXES[prefix] def learning_phase(): # If _LEARNING_PHASE", "== 2 num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if num_dynamic_axis", "value else: raise NotImplementedError def print_tensor(x, message=''): return C.user_function( LambdaFunc(x,", "a constant as place holder, the cntk learner will apply", "= v return grads def equal(x, y): return C.equal(x, y)", "strides = strides + (strides[0],) x = C.convolution( kernel, x,", "# to hook up with keras model # we will", "isinstance(x, C.variables.Parameter): return x.value else: raise ValueError('CNTK Backend: `eval` method", "batch axis if b_any(_ == C.InferredDimension for _ in x.shape)", "GPU is not detected. ' 'CNTK\\'s CPU version is not", "shape = int_shape(inputs) dims = len(shape) global uses_learning_phase uses_learning_phase =", "= x_aggregate * weight # shape: batch, filters, output_length output", "{} NAME_SCOPE_STACK = [] @contextmanager def name_scope(name): global NAME_SCOPE_STACK NAME_SCOPE_STACK.append(name)", "i = 1 # add the time_step axis back final_output", "[0]) if beta is None: beta = zeros_like(mean) elif ndim(beta)", "shape[_] new_shape = shape[nones:] new_shape = tuple([C.InferredDimension if _ ==", "sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normalized = batch_normalization( x, mean, variant, beta,", "dims = len(x.shape) if dims > 0 and x.shape[0] ==", "+= 1 if isinstance(axis, tuple): _axis = list(axis) elif isinstance(axis,", "= tuple([C.InferredDimension if _ == C.FreeDimension else _ for _", "gamma = ones_like(var) elif ndim(gamma) == ndim(x) and shape(gamma)[0] ==", "cntk cond op. if callable(x) and isinstance(x, C.cntk_py.Function) is False:", "num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if num_dynamic_axis > 0:", "tmp_shape[1] = time_step return reshape(x, tmp_shape) def local_conv1d(inputs, kernel, kernel_size,", "num_classes = reference.shape[0] one_hot_matrix = C.ops.one_hot(indices, num_classes) return C.times(one_hot_matrix, reference,", "initial_states): if _get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s) == 1: if", "Backend: `go_backwards` is not supported with ' 'variable-length sequences. Please", "import warnings C.set_global_option('align_axis', 1) b_any = any dev = C.device.use_default_device()", "1), auto_padding=[False]) return _postprocess_conv2d_output(x, data_format) def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1),", "not in {0, 1}: raise ValueError('CNTK Backend: Set learning phase", "ValueError('CNTK Backend: Invalid dropout level %s, ' 'must be in", "1 else transpose(result) else: return sum(x * transpose(y), axis=axes[0], keepdims=True)", "is True else alt else: result = C.element_select(training, x, alt)", "j in range(output_col): slice_row = slice(i * stride_row, i *", "input_dict, self.trainer_output) assert(len(result) == 2) outputs = result[1] for o", "by the Numpy RNG seed = np.random.randint(10e7) np.random.seed(seed) if dtype", "for s, p in zip(states, place_holders): past_values.append(C.sequence.past_value(p, s)) new_output, new_states", "axis, 'reduce_mean') return _remove_dims(output, axis, keepdims) def any(x, axis=None, keepdims=False):", "3) else: if num_dynamic_axis > 0: assert len(base_shape) == 3", "eval it first as # workaround if isinstance(value, C.cntk_py.Function): value", "= _get_dynamic_axis_num(x) non_dyn_shape = [] for i in range(len(x.shape)): if", "value of the tensor. dtype: Tensor type. name: Optional name", "= kernel_shape xs = [] for i in range(output_row): for", "- epsilon()) output = -target * C.log(output) - (1.0 -", "hook up with keras model, # we will create gradient", "C.input( shape=cntk_shape, dtype=_convert_string_dtype(dtype), is_sparse=sparse, name=name) x._keras_shape = shape x._uses_learning_phase =", "with ' '%d dimension is not supported, at least '", "mean=0.0, stddev=1.0, dtype=None, seed=None): if dtype is None: dtype =", "ndim(x) and shape(var)[0] == 1: var = _reshape_dummy_dim(var, [0]) if", "this operation in CNTK native implementation later. # Arguments inputs:", "shape = (bias.shape[0], 1, 1, 1) else: shape = (bias.shape[3],)", "argument in self.metrics_func.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument]", "data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) kernel =", "rnn_inputs) states = tuple(initial) with C.default_options(axis_offset=1): def _recurrence(x, states, m):", "the conversion when cntk supports int32, int64 # https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter dtype", "_convert_string_dtype(dtype) return variable(value=np.ones(shape, ctype), dtype=dtype, name=name) def eye(size, dtype=None, name=None):", "axis=i - num_dynamic_axis) i += 1 return x def _normalize_axis(axis,", "name = '%s_alias' % x.name return C.alias(x, name=name) def _preprocess_conv2d_input(x,", "= C.combine([u.output for u in u_ops]) grads = update_func.find_all_with_name('keras_grad_placeholder') u_list", "constant placeholder, here use this global # map to keep", "np.full(x.shape, value, dtype=floatx()) x.value = value else: raise NotImplementedError def", "' + str(axes)) if len(x_shape) == 2 and len(y_shape) ==", "== ndim(x) and shape(mean)[0] == 1: mean = _reshape_dummy_dim(mean, [0])", "[] p_list = [] for g in grads: if g", "input.shape[num_dynamic:] placeholder_shape = placeholder.shape for i, p in zip(input_shape, placeholder_shape):", "row, col, filters output = permute_dimensions(output, (0, 2, 3, 1))", "v.shape v._uses_learning_phase = False v.constraint = constraint return v def", "print_function import cntk as C import numpy as np from", "inputs: a cntk variable (parameter/constant) name: name of this node", "in training phase. To make it work, call # \"forward\"", "hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1 def l2_normalize(x, axis=None): axis", "not supported. Please do permute ' 'on static axis.' %", "seed is None: seed = np.random.randint(1, 10e6) if dtype is", "x = C.swapaxes(x, 0, 1) kernel = C.swapaxes(kernel, 0, 2)", "True elif padding == 'valid': padding = False else: raise", "return C.element_max(x, y) def minimum(x, y): return C.element_min(x, y) def", "in range(ndim)]) dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2 else", "new_output = n_s[0] return new_output, n_s final_output, final_states = _recurrence(rnn_inputs,", "else: # cntk only running with float, # try to", "and shape(mean)[0] == 1: mean = _reshape_dummy_dim(mean, [0]) if ndim(var)", "2nd one. # TH input shape: (samples, input_depth, conv_dim1, conv_dim2,", "arg: print(arg), name=''): self.when = when self.execute = execute super(LambdaFunc,", "backend: creating placeholder with ' '%d dimension is not supported,", "if ndim(gamma) > axis: gamma = C.reduce_mean(gamma, axis - 1)", "if bias_dims == 1: shape = (bias.shape[0], 1, 1, 1)", "shape = int_shape(x) return len(shape) def _prepare_name(name, default): prefix =", "_axes: shifted_mean = C.reduce_mean(shifted_mean, axis=axis) variance_mean = C.square(C.minus(x, shift)) for", "int(num_element / num_static_element) return C.cntk_py.Value( grad_array_view.as_shape( (num_old_batch,) + self.from_shape)) class", "> 0: y = C.swapaxes(y, i, i - 1) i", "in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) num_dynamic_axis", "mask_slice = squeeze(mask_slice, time_axis) if len(outputs) == 0: prev_output =", "Arguments inputs: a cntk variable (parameter/constant) name: name of this", "return C.cos(x) def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): if gamma", "in new_shape]) result = C.reshape(x, new_shape) if index < nones:", "Backend: Invalid data_format:', data_format) def repeat_elements(x, rep, axis): axis =", "C.ops.one_hot(indices, num_classes) return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1) def _remove_dims(x,", "place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states] past_values = []", "rank %d ' 'Need at least rank 3 to run", "state, root_gradients): return C.cntk_py.Value(root_gradients.data()) class ConvertToStatic(C.ops.functions.UserFunction): \"\"\"Converts input first axis", "C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x,", "[] x_shape = int_shape(x) # skip the batch axis for", "0: unrelated_updates.extend(u_ops) if len(unrelated_updates) > 0: self.unrelated_updates = C.combine([_.output for", "== 1)): _, output_values = self.metrics_func.forward( input_dict, self.metrics_func.outputs, (self.metrics_func.outputs[0],), as_numpy=False)", "else: initial.append(s) need_convert = not has_seq_axis(inputs) if go_backwards and need_convert", "pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate ==", "layer # if n is inferred dimension, # we can't", "True if mask is not None: mask_slice = C.ops.slice(mask, time_axis,", "strides pool_size = pool_size x = _preprocess_conv2d_input(x, data_format) if pool_mode", "% type(x)) def placeholder( shape=None, ndim=None, dtype=None, sparse=False, name=None, dynamic_axis_num=1):", "axis = _normalize_axis(axis, x) output = C.ops.argmax(x, axis=axis[0]) return _reshape_dummy_dim(output,", "from bool / int return x def dot(x, y): if", "mask_slice, n_s, s)) new_states = return_states outputs.append(output) states = new_states", "support cond op, so here we use # element_select approach", "op, to hook up with keras model, # we will", "return x def softmax(x, axis=-1): return C.softmax(x, axis=axis) def softplus(x):", "' 'which is not supported. Please give fixed ' 'dimension", "value, momentum): return C.assign(variable, variable * momentum + value *", "reduce_fun_name)(x, axis) return x def _reshape_sequence(x, time_step): tmp_shape = list(int_shape(x))", "axis=1) output = repeat_elements(output, height_factor, axis=2) output = repeat_elements(output, width_factor,", "numpy workaround now if seed is None: # ensure that", "x = C.transpose(x, (1, 2, 0)) return x def _preprocess_conv3d_input(x,", "batch, filters, output_length, input_length * kernel_size output = x_aggregate *", "[batch_axis])] def forward(self, arguments, device=None, outputs_to_retain=None): num_element = arguments.shape()[0] *", "tensor. constraint: Optional projection function to be applied to the", "if len(mask_shape) == dims - 1: mask = expand_dims(mask) nones", "init=value, dtype=dtype, name=_prepare_name(name, 'variable')) v._keras_shape = v.shape v._uses_learning_phase = False", "ctype), dtype=dtype, name=name) def ones(shape, dtype=None, name=None): if dtype is", "if go_backwards: i = shape[1] - 1 while i >=", "= not has_seq_axis(inputs) if go_backwards and need_convert is False: raise", "axis=axis[0])) return x / norm def hard_sigmoid(x): x = (0.2", "double check how the gradient node ' 'is constructed.' %", "provide fixed dimension ' 'instead of `None`.') size *= _", "axis, keepdims=keepdims) any_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape)", "update_add(x, increment): result = x + increment return C.assign(x, result)", "'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]]) else: x", "keras model # we will return a constant as place", "depth_factor, height_factor, width_factor, data_format): if data_format == 'channels_first': output =", "= False for a in axis: if isinstance(a, C.Axis): has_seq", "ValueError('CNTK Backend: the input of rnn has only rank %d", "i in range(ndim_diff): condition = expand_dims(condition) condition = tile(condition, shape_expr[ndim_cond", "res else: return C.element_select(C.greater(x, 0), res, alpha * res) def", "# Compute true mean while keeping the dims for proper", "and isinstance(x, C.cntk_py.Function) is False: x = x() if callable(alt)", "== 'causal': # causal (dilated) convolution: left_pad = dilation_rate *", "C.assign(x, new_x) def moving_average_update(variable, value, momentum): return C.assign(variable, variable *", "+ strides x = C.convolution( kernel, x, strides, auto_padding=[ False,", "return updated def function(inputs, outputs, updates=[], **kwargs): return Function(inputs, outputs,", "None: tmp = [x] * rep x = C.splice(*tmp, axis=i", "any_matrix def all(x, axis=None, keepdims=False): reduce_result = prod(x, axis, keepdims=keepdims)", "return False def get_num_dynamic_axis(x): return _get_dynamic_axis_num(x) def _reduce_on_axis(x, axis, reduce_fun_name):", "+ 1) mask_slice = squeeze(mask_slice, 1) if len(outputs) == 0:", "axis.' % pattern) axis = list(pattern) axis = axis[num_dynamic_axis:] axis", "_padding(x, padding[0], 0) x = _padding(x, padding[1], 1) else: assert", "rnn_inputs = C.to_sequence(rnn_inputs) rnn_constants = [] for constant in constants:", "_ for _ in axis] if shape.count(C.InferredDimension) > 1 or", "strides for dilated convolution' x = C.convolution( kernel, x, strides=dilation_rate[0],", "`' + str(type(x)) + '`. ' 'Expected a symbolic tensor", "elif ndim(beta) == ndim(x) and shape(beta)[0] == 1: beta =", "' 'is constructed.' % g) if len(u_list) > 0: learner", "__init__(self, arg, when=lambda arg: True, execute=lambda arg: print(arg), name=''): self.when", "x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2)", "broadcast_gamma, epsilon) return normalized, mean, variant def _moments(x, axes=None, shift=None,", "* C.log(output) - (1.0 - target) * C.log(1.0 - output)", "in range(len(x.shape)): if shape[i + num_dynamic] is None: non_dyn_shape.append(x.shape[i]) else:", "broadcast_beta, broadcast_gamma, epsilon) return normalized, mean, variant def _moments(x, axes=None,", "local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None): if data_format is None:", "# ensure that randomness is conditioned by the Numpy RNG", "shape = int_shape(x) ndim = len(shape) nones = _get_dynamic_axis_num(x) if", "not None: mask_slice = C.ops.slice(mask, time_axis, i, i + 1)", "u_ops]) grads = update_func.find_all_with_name('keras_grad_placeholder') u_list = [] p_list = []", "'shape `%s` contains non-specified dimension, ' 'which is not supported.", "'channels_last': x = C.transpose(x, (1, 2, 3, 0)) return x", "'Please provide fixed dimension ' 'instead of `None`.') size *=", "f in outputs] self.metrics_func = C.combine(self.metrics_outputs) # cntk only could", "dynamic learning phase tensor. _LEARNING_PHASE = -1 _UID_PREFIXES = defaultdict(int)", "(isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): if isinstance(value, (float, int)): value", "input_depth, conv_dim1, conv_dim2, conv_dim3) # TF input shape: (samples, conv_dim1,", "getattr(new_output, '_uses_learning_phase', False): global uses_learning_phase uses_learning_phase = True if m", "== 2: if axes[0] == axes[1]: result = sum(x *", "and isinstance(axis, list): # sequence axis is removed by default,", "more # than 2, need manual eval elif len(outputs) >", "targets, k): _targets = C.one_hot(targets, predictions.shape[-1]) result = C.classification_error(predictions, _targets,", "support # in native cntk op cntk_axis = [] dynamic_axis_index", "batch axis with 1, it is not needed # in", "= _preprocess_border_mode(padding) strides = strides pool_size = pool_size x =", "C.minus(variance_mean, C.square(shifted_mean)) mean = C.plus(shifted_mean, shift) if not keep_dims: mean", "= step_function( current, tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase', False):", "'shape is not supported. Please provide ' 'fixed dimension instead", "keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_sum')", "< 0. or level >= 1: raise ValueError('CNTK Backend: Invalid", "axis=axis[0]) def flatten(x): return reshape(x, (-1,)) def reshape(x, shape): shape", "padding[0], 1) x = _padding(x, padding[1], 2) else: assert len(base_shape)", "' 'on static axis.' % pattern) axis = list(pattern) axis", "# shape: batch, filters, output_length output = sum(output, axis=3) #", "input_dict[argument] = feed_dict[argument] else: raise ValueError( 'CNTK backend: argument %s", "if isinstance(a, C.Axis) \\ and a != C.Axis.default_batch_axis() \\ and", "value = np.asarray(value) if isinstance(x, C.variables.Parameter): x.value = value else:", "x, training=training) def _convert_string_dtype(dtype): # cntk only support float32 and", "cntk only could handle loss and 1 metric in trainer,", "= C.ops.slice(x, axis, i, i + 1) for _ in", "ensure that randomness is conditioned by the Numpy RNG seed", "dilation_rate=(1, 1, 1)): if data_format is None: data_format = image_data_format()", "self.trainer.train_minibatch( input_dict, self.trainer_output) assert(len(result) == 2) outputs = result[1] for", "and version[1] == '.': version = version[:2] + version[2:].replace('.', '')", "= [] shape = x.shape i = 0 while i", "1) for _ in range(rep): slices.append(tmp) i += 1 return", "the time_step axis back final_output = expand_dims(outputs[0], 1) last_output =", "forward(self, arguments, device=None, outputs_to_retain=None): num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape)) num_static_element", "len(shape) nones = _get_dynamic_axis_num(x) if nones > ndim: raise ValueError('CNTK", "== 0 or unroll: return _static_rnn( step_function, inputs, initial_states, go_backwards,", "true mean while keeping the dims for proper broadcasting. for", "if callable(alt) and isinstance(alt, C.cntk_py.Function) is False: alt = alt()", "shape[nones:] new_shape = tuple( [C.InferredDimension if _ is None else", "seed=None): if level < 0. or level >= 1: raise", "_padding(x, padding[1], 2) else: assert len(base_shape) == 4 if hasattr(C,", "performance.') # A learning phase is a bool tensor used", "scale=1.0, seed=seed) def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if seed", "= outputs[0] while i < len(outputs): # add the time_step", "return x + reshape(bias, shape) def eval(x): if isinstance(x, C.cntk_py.Function):", "_padding(x, padding[0], 1) x = _padding(x, padding[1], 2) return x", "input_depth) x = C.transpose(x, (3, 0, 1, 2)) return x", "end_index, strides) def _reshape_batch(x, shape): # there is a bug", "is None: shift = x # Compute true mean while", "2.0.0, all kernels are normalized # on the format `(rows,", "pooling mode: ' + str(pool_mode)) return _postprocess_conv2d_output(x, data_format) def pool3d(x,", "' 'with value %s is not supported, ' 'expected 0", "C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x", "need group update by gradient place holder u_ops = []", "will return a constant as place holder, the cntk learner", "used to run Keras models in # either train mode", "isinstance(_, int): _axis.append(_ if _ >= 0 else _ +", "axis=None, keepdims=False): m = mean(x, axis, keepdims=True) devs_squared = C.square(x", "list(shape) new_shape = new_shape[num_dynamic_axis:] new_shape = [C.InferredDimension if _ is", "v def bias_add(x, bias, data_format=None): if data_format is None: data_format", "(C.variables.Constant, C.variables.Variable, C.variables.Parameter, C.ops.functions.Function)) def shape(x): shape = list(int_shape(x)) num_dynamic", "= _padding(x, padding[0], 2) x = _padding(x, padding[1], 3) x", "sin(x): return C.sin(x) def cos(x): return C.cos(x) def normalize_batch_in_training(x, gamma,", "output elif data_format == 'channels_last': output = repeat_elements(x, depth_factor, axis=1)", "C.one_hot(targets, predictions.shape[-1]) result = C.classification_error(predictions, _targets, topN=k) return 1 -", "trainer, for metrics more # than 2, need manual eval", "def sigmoid(x): return C.sigmoid(x) def sign(x): return x / C.abs(x)", "def repeat_elements(x, rep, axis): axis = _normalize_axis(axis, x) axis =", "static axis, CNTK will do unroll by default if shape[1]", "dummy dimension current = squeeze(current, 1) output, new_states = step_function(", "phase is a bool tensor used to run Keras models", "name=name) x._keras_shape = shape x._uses_learning_phase = False x._cntk_placeholder = True", "name=name) self.target_shape = (batch_size,) + input.shape def infer_outputs(self): return [", "and value.dtype != np.float64): value = value.astype(np.float32) if tensor ==", "dimension. ' 'The reshape did not take place.') return x", "keeping the dims for proper broadcasting. for axis in _axes:", "backend: argument %s is not found in inputs. ' 'Please", "C.softplus(x) def softsign(x): return x / (1 + C.abs(x)) def", "_LEARNING_PHASE global _LEARNING_PHASE_PLACEHOLDER _LEARNING_PHASE = -1 _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0) def", "mask_slice = squeeze(mask_slice, 1) if len(outputs) == 0: prev_output =", "# shape: batch, filters, output_length, input_length * kernel_size output =", "(x - mean) / (C.sqrt(var) + epsilon) * gamma +", "'float64': return np.float64 else: # cntk only running with float,", "floatx() if name is None: name = '' if isinstance(", "x.shape if b_any([dim < 0 for dim in base_shape]): raise", "shape = list(output_shape) shape[0] = output_shape[3] shape[1] = output_shape[0] shape[2]", "def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None): if", "self.inputs[0].dtype, [batch_axis])] def forward(self, arguments, device=None, outputs_to_retain=None): return None, C.cntk_py.Value(arguments.data())", "uses_learning_phase = True if m is not None: new_states =", "dimension, # instead of the 2nd one. # TH input", "data_format == 'channels_last': # shape: batch, row, col, filters output", "mode: ' + str(pool_mode)) return _postprocess_conv2d_output(x, data_format) def pool3d(x, pool_size,", "float, # try to cast to float to run the", "i += 1 if isinstance(axis, tuple): _axis = list(axis) elif", "learning phase tensor. _LEARNING_PHASE = -1 _UID_PREFIXES = defaultdict(int) #", "> 0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape =", "% (str(shape), nones)) # Current cntk does not support shape", "# than 2, need manual eval elif len(outputs) > 2:", "in self.metrics_outputs: value = output_values[o] v = value.asarray() updated.append(v) else:", "be at least 3D.') # if the second axis is", "np.random.randint(1, 10e6) if dtype is None: dtype = np.float32 else:", "conditioned by the Numpy RNG seed = np.random.randint(10e7) np.random.seed(seed) if", "dtype=dtype, name=name) def eye(size, dtype=None, name=None): if dtype is None:", "= C.element_select(training, x, alt) result._uses_learning_phase = uses_learning_phase return result def", "to run Keras models in # either train mode (learning_phase", "== 'channels_last': if bias_dims == 1: shape = (1, 1,", "1 shape.insert(index, 1) new_shape = shape[nones:] new_shape = tuple( [C.InferredDimension", "num_element = root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape)) num_static_element = np.prod(np.asarray(self.from_shape)) num_old_batch =", "len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0,", "i]) return C.element_select(condition, then_expression, else_expression) def elu(x, alpha=1.): res =", "range(output_length): slice_length = slice(i * stride, i * stride +", "== 'same': padding = True elif padding == 'valid': padding", "shape) else: # no collapse, then first need to padding", "tuple(constants)) if getattr(output, '_uses_learning_phase', False): uses_learning_phase = True if mask", "not support gradients as symbolic op, # to hook up", "== 'channels_last': x = C.swapaxes(x, 0, 1) kernel = C.swapaxes(kernel,", "if isinstance(constant, list): new_c = [] for c in constant:", "output = x_aggregate * weight # shape: batch, filters, output_length", "input_dict[argument] = feed_dict[argument] else: raise ValueError('CNTK backend: metrics argument %s", "shape `%s`, the second axis ' 'is not static. If", "p, size).astype(dtype).reshape(shape) return variable(value=binomial, dtype=dtype) def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None,", "_axis[i] = cntk_axis[_axis[i]] else: if _axis is None: _axis =", "list(axis) if not isinstance(axis, list): axis = [axis] shape =", "dims: raise ValueError('Unexpected bias dimensions %d, ' 'expected 1 or", "mask, constants, unroll, input_length) if constants is None: constants =", "' 'pass inputs that have a static shape.' % (str(tensor.shape),", "_ for _ in new_shape]) result = C.reshape(x, new_shape) if", "[axes] cntk_axes = _normalize_axis(axes, x) begin_index = [0 for _", "* negative_part return x def dropout(x, level, noise_shape=None, seed=None): if", "i += 1 i = 1 # add the time_step", "return x def is_placeholder(x): \"\"\"Returns whether `x` is a placeholder.", "axis=-1): return C.ops.reduce_mean( C.equal( argmax( output, axis=-1), argmax( target, axis=-1)),", "2, 3, 1)) return output def reverse(x, axes): if isinstance(axes,", "don't need reshape on it reduce_axes = [] for a", "constructed.' % g) if len(u_list) > 0: learner = C.cntk_py.universal_learner(p_list,", "len(shape) global uses_learning_phase uses_learning_phase = False if dims < 3:", "'Warning: CNTK backend does not support ' 'collapse of batch", "a) else: x = getattr(C, reduce_fun_name)(x, axis) return x def", "axis=axis) def softplus(x): return C.softplus(x) def softsign(x): return x /", "return C.splice(*temp, axis=index) def tanh(x): return C.tanh(x) def _static_rnn(step_function, inputs,", "= output_shape[1] shape[3] = output_shape[2] output_shape = tuple(shape) x =", "self.metrics_outputs = [f.output for f in outputs] self.metrics_func = C.combine(self.metrics_outputs)", "x, strides=dilation_rate[0], auto_padding=[ False, padding, padding]) return _postprocess_conv2d_output(x, data_format) def", "return C.sqrt(x) def exp(x): return C.exp(x) def log(x): return C.log(x)", "shape = x.shape if hasattr(x, 'dynamic_axes'): dynamic_shape = [None for", "is not None: input_dict = {} for argument in self.unrelated_updates.arguments:", "range(output_col): slice_row = slice(i * stride_row, i * stride_row +", "self).__init__([input], as_numpy=False, name=name) self.from_shape = input.shape self.target_shape = shape def", "< len(x.shape) - 1: x = C.swapaxes(x, i, i +", "how the gradient node ' 'is constructed.' % g) if", "return C.dropout(x, level) def batch_flatten(x): # cntk's batch axis is", "num_static_element = np.prod(np.asarray(self.from_shape)) num_old_batch = int(num_element / num_static_element) return C.cntk_py.Value(", "output = repeat_elements(output, width_factor, axis=3) return output elif data_format ==", "return 'float32' elif dtype == np.float64: return 'float64' else: raise", "target_shape.append(1) if ndim(gamma) > axis: gamma = C.reduce_mean(gamma, axis -", "!= 1 and bias_dims != dims: raise ValueError('Unexpected bias dimensions", "place holder u_ops = [] unrelated_updates = [] for update", "= (1,) + strides x = C.convolution( kernel, x, strides,", "dtype=None, sparse=False, name=None, dynamic_axis_num=1): if dtype is None: dtype =", "= C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) else: if", "is a bool tensor used to run Keras models in", "x = _preprocess_conv3d_input(x, data_format) if pool_mode == 'max': x =", "C.element_min(x, y) def sin(x): return C.sin(x) def cos(x): return C.cos(x)", "placeholder for dynamic learning phase _LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0,", "axis = list(axis) if not isinstance(axis, list): axis = [axis]", "= _preprocess_conv3d_input(x, data_format) if pool_mode == 'max': x = C.pooling(", "seed = np.random.randint(10e7) if dtype is None: dtype = np.float32", "C.slice(x, cntk_axes, begin_index, end_index, strides) def _reshape_batch(x, shape): # there", "if shape is None: shape = () np_value = value", "x def is_placeholder(x): \"\"\"Returns whether `x` is a placeholder. #", "after we add support # in native cntk op cntk_axis", "if num_dynamic_axis > 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]: raise ValueError('CNTK", "x = expand_dims(x, nones) return x def max(x, axis=None, keepdims=False):", "if dims > 0 and x.shape[0] == C.InferredDimension: dims -=", "= [] unrelated_updates = [] for update in updates: if", "# TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, # input_depth)", "input, name='convert_to_batch'): super(ConvertToBatch, self).__init__([input], as_numpy=False, name=name) def infer_outputs(self): batch_axis =", "tuple(dynamic_shape) + shape return shape def ndim(x): shape = int_shape(x)", "for j in range(output_col): slice_row = slice(i * stride_row, i", "pattern=[[0, 0], list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0], 1)", "data_format ' + str(data_format)) padding = _preprocess_border_mode(padding) strides = strides", "= None self.updates = updates if len(updates) > 0: assert", "if data_format == 'channels_last': # TF uses the last dimension", "(1, 1), (1, 1)), data_format=None): assert len(padding) == 3 assert", "assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x,", "step_function( current, tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase', False): uses_learning_phase", "self.metrics_outputs: value = output_values[o] v = value.asarray() updated.append(v) else: v", "axis, this is not expected, please ' 'double check the", "1: shape = (bias.shape[0], 1, 1, 1) else: shape =", "nones = _get_dynamic_axis_num(inputs) states = tuple(initial_states) outputs = [] time_axis", "[x] * rep x = C.splice(*tmp, axis=i - num_dynamic_axis) i", "2.2: const_a = C.unpack_batch(x) const_a = C.reshape(const_a, shape) return C.to_batch(const_a)", "= (0.2 * x) + 0.5 x = C.clip(x, 0.0,", "* weight # shape: batch, filters, output_length output = sum(output,", "The mean / var / beta / gamma may be", "nones > ndim: raise ValueError('CNTK Backend: tensor with keras shape:", "len(u_list) > 0: learner = C.cntk_py.universal_learner(p_list, u_list, update_func) criterion =", "= _padding(x, padding[1], 3) x = _padding(x, padding[2], 4) else:", "# CNTK currently don't support cond op, so here we", "first, to apply broadcast weight = permute_dimensions(kernel, (2, 0, 1))", "None and not has_seq_axis(inputs): num_time_step = inputs.shape[0] initial = []", "0.: x -= alpha * negative_part return x def dropout(x,", "< 0 else _ for _ in axis] if shape.count(C.InferredDimension)", "%s ' 'requested permute on dynamic axis, ' 'which is", "= arguments.data().as_shape((num_batch,) + self.target_shape) return None, C.cntk_py.Value(result) def backward(self, state,", "0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0], 2)", "def ndim(x): shape = int_shape(x) return len(shape) def _prepare_name(name, default):", "inputs. Please ' 'pass inputs that have a static shape.'", "return _reshape_dummy_dim(output, axis) def square(x): return C.square(x) def abs(x): return", "and inputs.' % argument.name) self.unrelated_updates.eval(input_dict, as_numpy=False) return updated def function(inputs,", "if hasattr(input, 'shape') and hasattr(placeholder, 'shape'): num_dynamic = get_num_dynamic_axis(placeholder) input_shape", "* x) + 0.5 x = C.clip(x, 0.0, 1.0) return", "'channels_first': if num_dynamic_axis > 0: assert len(base_shape) == 4 if", "= ndim_expr - ndim_cond for i in range(ndim_diff): condition =", "and not has_seq_axis(mask): if go_backwards: mask = reverse(mask, 1) if", "def zeros_like(x, dtype=None, name=None): return x * 0 def ones_like(x,", "permute ' 'on static axis.' % pattern) axis = list(pattern)", "self.metrics_func.eval(input_dict, as_numpy=False) if isinstance(output_values, dict): for o in self.metrics_outputs: value", "Function(inputs, outputs, updates=updates, **kwargs) def temporal_padding(x, padding=(1, 1)): assert len(padding)", "= x_aggregate * weight # Shape: (batch, filters, output_length) output", "apply mean and stddev return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed) def", "eval elif len(outputs) > 2: self.metrics_outputs = [f.output for f", "new_states = return_states outputs.append(output) states = new_states[:len(states)] i += 1", "repeat_elements(output, height_factor, axis=3) output = repeat_elements(output, width_factor, axis=4) return output", "return_states = [] for s, n_s in zip(states, new_states): return_states.append(", "axis=C.Axis.all_axes()) def argmax(x, axis=-1): axis = [axis] axis = _normalize_axis(axis,", "C.swapaxes(x, 0, 1) return x def conv2d(x, kernel, strides=(1, 1),", "dynamic axis, this is not expected, please ' 'double check", "shape, init=C.initializer.uniform( scale, seed=seed), dtype=dtype, name=name) return variable(value=p.value + low", "n = tuple([1 for _ in range(len(shape) - len(n))]) +", "2] if b_any([isinstance(a, (list, tuple)) for a in axes]): raise", "2: permutation = [len(y_shape) - 2] permutation += list(range(len(y_shape) -", "slice(i * stride, i * stride + kernel_size[0]) xs.append(reshape(inputs[:, slice_length,", "c in constant: if _get_dynamic_axis_num(c) == 1: new_c.append(C.sequence.broadcast_as(c, rnn_inputs)) else:", "we will return a constant as place holder, the cntk", "shape[1] = output_shape[0] shape[2] = output_shape[1] shape[3] = output_shape[2] output_shape", "(1,) + strides x = C.convolution( kernel, x, strides, auto_padding=[", "(1, 0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) pointwise_kernel =", "= {} for argument in self.metrics_func.arguments: if argument in feed_dict:", "= 1 # add the time_step axis back final_output =", "0: current = C.ops.slice(inputs, time_axis, i, i + 1) #", "x._keras_shape = (None, dim) return x def softmax(x, axis=-1): return", "num_dynamic] is None: non_dyn_shape.append(x.shape[i]) else: non_dyn_shape.append(shape[i + num_dynamic]) return shape[:num_dynamic]", "num_dynamic_axis > 0: assert len(base_shape) == 3 if hasattr(C, 'pad'):", "to run the model return np.float32 def _convert_dtype_string(dtype): if dtype", "dtype = floatx() if name is None: name = ''", "None: seed = np.random.randint(1, 10e6) if dtype is None: dtype", "def set_value(x, value): if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): if", "return _postprocess_conv3d_output(x, data_format) def relu(x, alpha=0., max_value=None): if alpha !=", "raise NotImplementedError else: u = C.assign(update[0], update[1]) else: u =", "def exp(x): return C.exp(x) def log(x): return C.log(x) def round(x):", "width_factor, axis=3) return output elif data_format == 'channels_last': output =", "2, need manual eval elif len(outputs) > 2: self.metrics_outputs =", "' 'is not static. If you want to run '", "0)]) else: x = _padding(x, padding, 1) return x def", "final_output, f_stats def has_seq_axis(x): return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) >", "in native cntk op cntk_axis = [] dynamic_axis_index = 0", "input_dict, self.metrics_func.outputs, (self.metrics_func.outputs[0],), as_numpy=False) else: output_values = self.metrics_func.eval(input_dict, as_numpy=False) if", "x / (1 + C.abs(x)) def categorical_crossentropy(target, output, from_logits=False): if", "max_value is not None and max_value < min_value: max_value =", "data_format=None, pool_mode='max'): if data_format is None: data_format = image_data_format() if", "applied to the variable after an optimizer update. # Returns", "1.' % value) _LEARNING_PHASE = value def clear_session(): \"\"\"Reset learning", "cntk dynamic axis, this is not expected, please ' 'double", "unrelated_updates]) if self.trainer is None: self.metrics_outputs = [f.output for f", "(bias.shape[1],) + bias.shape[:1] elif data_format == 'channels_last': if bias_dims ==", "updated.append(v) if self.unrelated_updates is not None: input_dict = {} for", "else: i = 0 while i < shape[1]: current =", "isinstance(training, int) or isinstance(training, bool): result = x if training", "= C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension cntk_shape =", "base_shape]): raise ValueError('CNTK Backend: padding input tensor with ' 'shape", "cntk. # They only evaluated in training phase. To make", "in range(output_length): slice_length = slice(i * stride, i * stride", "< 3: raise ValueError('Input should be at least 3D.') #", "seed = np.random.randint(10e3) if dtype is None: dtype = np.float32", "training phase. To make it work, call # \"forward\" method", "1 - _get_dynamic_axis_num(x) if index < 0 or index >", "axis = axis[0] slices = [] shape = x.shape i", "raise ValueError('Unknown data_format ' + str(data_format)) padding = _preprocess_border_mode(padding) strides", "let cntk know we want to evaluate them.from # But", "all kernels are normalized # on the format `(rows, cols,", "the time_step axis back output_slice = expand_dims(outputs[i], 1) final_output =", "work, call # \"forward\" method to let cntk know we", "version is not fully optimized,' 'please run with GPU to", "and dynamic_axis_index < nones: cntk_axis.append(x.dynamic_axes[dynamic_axis_index]) dynamic_axis_index += 1 else: cntk_axis.append(i", "input first axis to CNTK static axis. We may introduce", "shape = bias.shape elif dims == 3: if data_format ==", "s is None else s for s in shape] cntk_shape", "squeeze(mask_slice, 1) if len(outputs) == 0: prev_output = zeros_like(output) else:", "_axes) variance = squeeze(variance, _axes) return mean, variance def batch_normalization(x,", "C.cntk_py.Value(root_gradients.data()) class LambdaFunc(C.ops.functions.UserFunction): def __init__(self, arg, when=lambda arg: True, execute=lambda", "%s is not found in inputs. ' 'Please double check", "isinstance(axes, int): axes = (axes, axes) if axes is None:", "padding[0], 0) x = _padding(x, padding[1], 1) else: assert len(base_shape)", "len(int_shape(x)) num_dynamic_axis = _get_dynamic_axis_num(x) if isinstance(pattern, list): current_layout = [i", "x._cntk_placeholder def is_keras_tensor(x): if not is_tensor(x): raise ValueError('Unexpectedly found an", "[] normalized_axis.append(_normalize_axis(axes[0], x)[0]) normalized_axis.append(_normalize_axis(axes[1], y)[0]) # transpose i = normalized_axis[0]", "ValueError('Input should be at least 3D.') # if the second", "a in axes]): raise ValueError('Multiple target dimensions are not supported.", "output_row, output_col = output_shape kernel_shape = int_shape(kernel) _, feature_dim, filters", "'channels_last': if bias_dims == 1: shape = (1, 1, 1,", "b_any([dim < 0 for dim in base_shape]): raise ValueError('CNTK Backend:", "= _normalize_axis(axis, x) axis = axis[0] slices = [] shape", "if (self.unrelated_updates is None and (_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE", "axis=1) # transpose kernel to put filters first weight =", "Please do permute ' 'on static axis.' % pattern) axis", "isinstance(output_values, dict): for o in self.metrics_outputs: value = output_values[o] v", "permute_dimensions(output, (0, 2, 3, 1)) return output def reverse(x, axes):", "axis=axis) def repeat(x, n): # this is a workaround for", "Please double ' 'check the model and inputs.' % argument.name)", "broadcast feature # to make the recurrent layer work. #", "0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape)", "strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None:", "var(x, axis=None, keepdims=False): m = mean(x, axis, keepdims=True) devs_squared =", "enable padding.' % base_shape) if pattern[0] > 0: prefix_shape =", "> 2 and version[1] == '.': version = version[:2] +", "a in enumerate(_axis): if a is not None and a", "None: name = '' cntk_shape = cntk_shape[dynamic_axis_num:] x = C.input(", "name is None: name = '' if isinstance( value, C.variables.Constant)", "if i >= num_dynamic_axis and shape[i] is not None: tmp", "ValueError('Invalid pooling mode: ' + str(pool_mode)) return _postprocess_conv3d_output(x, data_format) def", "in sorted(_axis, reverse=True): del shape[index] shape = [C.InferredDimension if _", "zip(input_shape, placeholder_shape): if i != p and p != C.InferredDimension", "{'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) stride_row, stride_col", "with symbolic op, so eval it first as # workaround", "'channels_last': if bias_dims == 1: shape = (1, 1, bias.shape[0])", "from __future__ import absolute_import from __future__ import division from __future__", "dimension as channel dimension, # instead of the 2nd one.", "if min_value is None: min_value = -np.inf return C.clip(x, min_value,", "raise ValueError( 'CNTK backend: when constructing trainer, ' 'found gradient", "i >= 0: current = C.ops.slice(inputs, time_axis, i, i +", "training = learning_phase() uses_learning_phase = True else: uses_learning_phase = False", "tensor. dtype: Tensor type. name: Optional name string for the", "> 0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape =", "conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)):", "new_c.append(C.sequence.broadcast_as(c, rnn_inputs)) else: new_c.append(c) rnn_constants.append(new_c) else: if _get_dynamic_axis_num(constant) == 1:", "_preprocess_border_mode(padding) x = _preprocess_conv3d_input(x, data_format) if pool_mode == 'max': x", "2: raise NotImplementedError else: u = C.assign(update[0], update[1]) else: u", "[None for a in x.dynamic_axes] shape = tuple(dynamic_shape) + shape", "if num_dynamic_axis > 0: assert len(base_shape) == 4 if hasattr(C,", "prefix = '_'.join(NAME_SCOPE_STACK) if name is None or name ==", "**kwargs): self.placeholders = inputs self.trainer = None self.unrelated_updates = None", "in cntk 2.1's unpack_batch implementation if hasattr(C, 'unpack_batch') and _get_cntk_version()", "* np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape =", "True: x._uses_learning_phase = uses_learning_phase return x else: # if _LEARNING_PHASE", "range(ndim): if shape[i] is None and dynamic_axis_index < nones: cntk_axis.append(x.dynamic_axes[dynamic_axis_index])", "'collapse of batch axis with inferred dimension. ' 'The reshape", "root_gradients.data() num_element = root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape)) num_static_element = np.prod(np.asarray(self.from_shape)) num_old_batch", "np.float64 else: # cntk only running with float, # try", "() if hasattr(value, 'dtype') and value.dtype != dtype and len(shape)", "cntk's batch axis is not in shape, # so just", "if len(y_shape) > 2: permutation = [len(y_shape) - 2] permutation", "feed_dict[argument] else: raise ValueError('CNTK backend: metrics argument %s ' 'is", "pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x = _padding(x, padding[0],", "as workaround. It may have # perf issue, will resolve", "kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)): if", "go_backwards: rnn_inputs = reverse(rnn_inputs, 1) rnn_inputs = C.to_sequence(rnn_inputs) rnn_constants =", "False for a in axis: if isinstance(a, C.Axis): has_seq =", "C.Axis.default_dynamic_axis() else: return False def get_num_dynamic_axis(x): return _get_dynamic_axis_num(x) def _reduce_on_axis(x,", "shape like (1, batch). so using the workaround # here", "shape = value.shape if hasattr(value, 'shape') else () if hasattr(value,", "def batch_set_value(tuples): for t in tuples: x = t[0] value", "_preprocess_conv2d_kernel(pointwise_kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1):", "data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) depthwise_kernel =", "if data_format == 'channels_first': if num_dynamic_axis > 0: assert len(base_shape)", "< nones: cntk_axis[i] = x.dynamic_axes[dynamic_axis_index] i += 1 dynamic_axis_index +=", "2.1's unpack_batch implementation if hasattr(C, 'unpack_batch') and _get_cntk_version() >= 2.2:", "2, 0)) return x def _preprocess_conv3d_input(x, data_format): if data_format ==", "input_length) if constants is None: constants = [] num_time_step =", "self.inputs[0].shape[1:], self.inputs[0].dtype, [batch_axis])] def forward(self, arguments, device=None, outputs_to_retain=None): return None,", "no collapse, then first need to padding the shape if", "'%d dimensions are needed.' % (len(cntk_shape, dynamic_axis_num))) if name is", "return C.clip(x, min_value, max_value) def binary_crossentropy(target, output, from_logits=False): if from_logits:", "axis=time_axis) last_output = outputs[i] i += 1 last_output._uses_learning_phase = uses_learning_phase", "resolved ' 'to shape `%s`, but input shape is `%s`.", "s)) new_output, new_states = step_function( x, tuple(past_values) + tuple(rnn_constants)) if", "def _remove_dims(x, axis, keepdims=False): if keepdims is False and isinstance(axis,", "shape in different format if data_format == 'channels_last': shape =", "history.' % (str(shape), nones)) # Current cntk does not support", "str(ndim_cond) + ', ndim(then_expression)' '=' + str(ndim_expr)) elif ndim_cond <", "return (x - mean) / (C.sqrt(var) + epsilon) * gamma", "data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides =", "x = C.convolution( kernel, x, strides, auto_padding=[ False, padding, padding,", "s in shape] cntk_shape = tuple(cntk_shape) if dynamic_axis_num > len(cntk_shape):", "else: raise ValueError('CNTK Backend: `eval` method on ' '`%s` type", "= 1 for _ in shape: if _ is None:", "But the assign ops won't be executed under this mode,", "which has batch axis batch_size: size of batch axis. name:", "== 'channels_first': output = repeat_elements(x, height_factor, axis=2) output = repeat_elements(output,", "def forward(self, arguments, device=None, outputs_to_retain=None): return None, C.cntk_py.Value(arguments.data()) def backward(self,", "as_numpy=False, name=name) self.from_shape = input.shape self.target_shape = shape def infer_outputs(self):", "== 1: return res else: return C.element_select(C.greater(x, 0), res, alpha", "output_shape, strides=(1, 1, 1), padding='valid', data_format=None): if data_format is None:", "(len(cntk_shape, dynamic_axis_num))) if name is None: name = '' cntk_shape", "we need this check. if (self.unrelated_updates is None and (_LEARNING_PHASE_PLACEHOLDER.value", "pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x =", "!= 0.: x -= alpha * negative_part return x def", "xs.append(reshape(inputs[:, slice_row, slice_col, :], (-1, 1, feature_dim))) x_aggregate = concatenate(xs,", "if _ == C.InferredDimension or _ == C.FreeDimension: raise ValueError('CNTK", "_ in x.shape: if _ == C.InferredDimension or _ ==", "x def _get_dynamic_axis_num(x): if hasattr(x, 'dynamic_axes'): return len(x.dynamic_axes) else: return", "strides=dilation_rate[0], auto_padding=[False, padding, padding], groups=x.shape[0]) return _postprocess_conv2d_output(x, data_format) def conv3d(x,", "one. # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)", "0 or index > 1: raise NotImplementedError new_shape = list(x.shape)", "training is True: x._uses_learning_phase = uses_learning_phase return x else: #", "= reverse(mask, 1) if len(int_shape(mask)) == 2: mask = expand_dims(mask)", "dropout(x, level, noise_shape=None, seed=None): if level < 0. or level", "for i in range(dims)] else: current_layout = tuple([i for i", "axis - 1) else: target_shape.append(x_shape[axis]) broadcast_mean = C.reshape(mean, target_shape) broadcast_var", "False def get_num_dynamic_axis(x): return _get_dynamic_axis_num(x) def _reduce_on_axis(x, axis, reduce_fun_name): if", "+ len(shape)) if len(_axis) == 0: return x nones =", "axis: if isinstance(a, C.Axis) \\ and a != C.Axis.default_batch_axis() \\", "x = C.swapaxes(x, 0, 1) return x def conv2d(x, kernel,", "u = C.assign(update[0], update[1]) else: u = update if len(u.arguments)", "return float(version) except: warnings.warn( 'CNTK backend warning: CNTK version not", "= [] for _ in axis: if isinstance(_, int): _axis.append(_", "0, 1) def gather(reference, indices): # There is a bug", "b_any(_ == C.InferredDimension for _ in x.shape) or b_any( _", "else: x = _padding(x, padding, 0) else: assert len(base_shape) ==", "made a fix but not catched in CNTK 2.1 release.", "version cntk can't support input with variable # length. Will", "= floatx() if seed is None: # ensure that randomness", "shape is not supported now. ' 'Please provide fixed dimension", "== 0: prev_output = zeros_like(output) else: prev_output = outputs[-1] output", "x.eval() elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter): return x.value else:", "pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape", "* (kernel.shape[0] - 1) x = temporal_padding(x, (left_pad, 0)) padding", "C.alias(x, name=name) def _preprocess_conv2d_input(x, data_format): if data_format == 'channels_last': #", "any dev = C.device.use_default_device() if dev.type() == 0: warnings.warn( 'CNTK", "data_format == 'channels_first': xs.append(reshape(inputs[:, :, slice_row, slice_col], (-1, 1, feature_dim)))", "if isinstance(axis, tuple): axis = list(axis) if not isinstance(axis, list):", "\"\"\"Reset learning phase flag for cntk backend. \"\"\" global _LEARNING_PHASE", "value def clear_session(): \"\"\"Reset learning phase flag for cntk backend.", "tensor with ' 'shape `%s` contains non-specified dimension, ' 'which", "tf.batch_matmul as default axes = [len(x_shape) - 1, len(y_shape) -", "0)) return x def _get_dynamic_axis_num(x): if hasattr(x, 'dynamic_axes'): return len(x.dynamic_axes)", "else ( outputs[0], ) self.trainer = C.trainer.Trainer( outputs[0], criterion, [learner])", "raise ValueError('Unexpectedly found an instance of type `' + str(type(x))", "> 0 self.loss = outputs[0] # need group update by", "if len(y.shape) > 1 else 1) if len(y_shape) == 2:", "axis, keepdims=True) devs_squared = C.square(x - m) return mean(devs_squared, axis=axis,", "the second axis is static axis, CNTK will do unroll", "len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0,", "isinstance(inputs, (list, tuple)) feed_dict = {} for tensor, value in", "C.reduce_sum(any_matrix) else: return any_matrix def all(x, axis=None, keepdims=False): reduce_result =", "'The reshape did not take place.') return x return _reshape_batch(x,", "and bias_dims != dims: raise ValueError('Unexpected bias dimensions %d, '", "< 0 or index > 1: raise NotImplementedError new_shape =", "dim_ordering): if dim_ordering == 'channels_last': x = C.transpose(x, (1, 2,", "= tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape =", "defaultdict(int) # cntk doesn't support gradient as symbolic op, to", "using CNTK 2.0 GA as default.') return float(2.0) class ReshapeBatch(C.ops.functions.UserFunction):", "cntk_axis.append(x.dynamic_axes[dynamic_axis_index]) dynamic_axis_index += 1 else: cntk_axis.append(i - dynamic_axis_index) if dynamic_axis_index", "/ int return x def dot(x, y): if len(x.shape) >", "' + str(data_format)) padding = _preprocess_border_mode(padding) x = _preprocess_conv3d_input(x, data_format)", "else: x = getattr(C, reduce_fun_name)(x, axis) return x def _reshape_sequence(x,", "set_value(x, value): if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): if isinstance(value,", "# CNTK expects `(depth, input_depth, rows, cols)`. kernel = C.transpose(kernel,", "i += 1 return C.splice(*slices, axis=axis) def repeat(x, n): #", "padding], output_shape=output_shape) return _postprocess_conv2d_output(x, data_format) def identity(x, name=None): if name", "None: shift = x # Compute true mean while keeping", "= C.device.use_default_device() if dev.type() == 0: warnings.warn( 'CNTK backend warning:", "in cntk_axes] end_index = [0 for _ in cntk_axes] strides", "len(outputs) > 0 self.loss = outputs[0] # need group update", "isinstance(x, C.cntk_py.Function): return x.eval() elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter):", "x_aggregate * weight # shape: batch, filters, output_length output =", "input_dict[argument] = feed_dict[argument] else: raise ValueError( 'CNTK backend: assign ops", "model and inputs.' % argument.name) # Some ops (like dropout)", "shape(beta)[0] == 1: beta = _reshape_dummy_dim(beta, [0]) return (x -", "time_axis) output, new_states = step_function( current, tuple(states) + tuple(constants)) if", "assert dilation_rate[0] == dilation_rate[1] assert strides == (1, 1), 'Invalid", "and num_time_step is not C.FreeDimension: final_output = _reshape_sequence(final_output, num_time_step) f_stats", "== list(range(ndim(x)))[:-1]: normalized = batch_normalization( x, mean, variant, beta, gamma,", "= floatx() return variable(np.eye(size), dtype, name) def zeros_like(x, dtype=None, name=None):", "new_states = step_function( current, tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase',", "constants = [] if mask is not None: mask_shape =", "inputs in ' '`train_function`.' % argument.name) result = self.trainer.train_minibatch( input_dict,", "it in cntk now # return the same x to", "equal(x, y): return C.equal(x, y) def not_equal(x, y): return C.not_equal(x,", "Current cntk does not support shape like (1, batch). so", "from_logits=False): if from_logits: result = C.cross_entropy_with_softmax(output, target) # cntk's result", "output_rank=(len(y.shape) - 1) if len(y.shape) > 1 else 1) if", "' 'Please double check the model and inputs in '", "an optimizer update. # Returns A variable instance (with Keras", "= _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_prod') return _remove_dims(output,", "n_s final_output, final_states = _recurrence(rnn_inputs, states, mask) last_output = C.sequence.last(final_output)", "cntk only support calculate on float, do auto cast here", "= np.float32 else: dtype = _convert_string_dtype(dtype) if name is None:", "False v.constraint = constraint return v def bias_add(x, bias, data_format=None):", "an instance of type `' + str(type(x)) + '`. '", "not has_seq_axis(mask): if go_backwards: mask = reverse(mask, 1) if len(int_shape(mask))", "epsilon()) output = -target * C.log(output) - (1.0 - target)", "_padding(x, padding[2], 2) else: assert len(base_shape) == 5 if hasattr(C,", ">= 0 else len(shape) + 1 shape.insert(index, 1) new_shape =", "x / norm def hard_sigmoid(x): x = (0.2 * x)", "final_output, states def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False,", "return len(shape) def _prepare_name(name, default): prefix = '_'.join(NAME_SCOPE_STACK) if name", "num_time_step is not C.FreeDimension: final_output = _reshape_sequence(final_output, num_time_step) f_stats =", "in range(ndim): if shape[i] is None and dynamic_axis_index < nones:", "_padding(x, padding[0], 2) x = _padding(x, padding[1], 3) else: if", "version = version[:2] + version[2:].replace('.', '') try: return float(version) except:", "_padding(x, padding, 1) return x def _padding(x, pattern, axis): base_shape", "'avg': x = C.pooling( x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding]) else:", "elif dtype == 'float64': return np.float64 else: # cntk only", "== 2) outputs = result[1] for o in self.trainer_output: updated.append(outputs[o])", "= (1, bias.shape[0]) else: shape = bias.shape else: shape =", "# causal (dilated) convolution: left_pad = dilation_rate * (kernel.shape[0] -", "C.cntk_py.Function): return x.eval() elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter): return", "RNG seed = np.random.randint(10e3) if dtype is None: dtype =", "(-1, filters, output_row, output_col)) if data_format == 'channels_last': # shape:", "depth_factor, axis=2) output = repeat_elements(output, height_factor, axis=3) output = repeat_elements(output,", "sorted(_axis, reverse=True): result = C.reshape(result, shape=(), begin_axis=index, end_axis=index + 1)", "else: raise ValueError( 'CNTK backend: argument %s is not found", "arguments, device=None, outputs_to_retain=None): return None, C.cntk_py.Value(arguments.data()) def backward(self, state, root_gradients):", "bias_dims != dims: raise ValueError('Unexpected bias dimensions %d, ' 'expected", "if level < 0. or level >= 1: raise ValueError('CNTK", "so here we use # element_select approach as workaround. It", "axis to CNTK static axis. We may introduce this operation", "True return x def is_placeholder(x): \"\"\"Returns whether `x` is a", "1) else: target_shape.append(x_shape[axis]) broadcast_mean = C.reshape(mean, target_shape) broadcast_var = C.reshape(variant,", "static. If you want to run ' 'rnn with non-static", "in float, so don't need case from bool / int", "_convert_dtype_string(x.dtype) def zeros(shape, dtype=None, name=None): if dtype is None: dtype", "mask = reverse(mask, 1) if len(int_shape(mask)) == 2: mask =", "'CNTK backend: when constructing trainer, ' 'found gradient node `%s`", "= tuple(cntk_shape) if dynamic_axis_num > len(cntk_shape): raise ValueError('CNTK backend: creating", "variables): # cntk does not support gradients as symbolic op,", "this global # map to keep the mapping from grad", "`%s` which is not ' 'related to any parameters in", "return len(x.dynamic_axes) else: return 0 def _contain_seqence_axis(x): if _get_dynamic_axis_num(x) >", "list(int_shape(x)) tmp_shape[1] = time_step return reshape(x, tmp_shape) def local_conv1d(inputs, kernel,", "1 - nones if nones > 0 else 1 if", "list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x = _padding(x, padding[0],", "if len(x.shape) > 2 or len(y.shape) > 2: y_shape =", "_LEARNING_PHASE if value not in {0, 1}: raise ValueError('CNTK Backend:", "axis, keepdims) def logsumexp(x, axis=None, keepdims=False): return log(sum(exp(x), axis=axis, keepdims=keepdims))", "p != C.InferredDimension and p != C.FreeDimension: return False return", "3) x = _padding(x, padding[2], 4) else: if num_dynamic_axis >", "= {} for tensor, value in zip(self.placeholders, inputs): # cntk", "batch axis. We may introduce this operation in CNTK native", "2 or len(y.shape) > 2: y_shape = int_shape(y) if len(y_shape)", "num_dynamic_axis = _get_dynamic_axis_num(x) if num_dynamic_axis == 1 and len(shape) >", "y) def greater_equal(x, y): return C.greater_equal(x, y) def less(x, y):", "target dimensions are not supported. ' + 'Expected: None, int,", "raise ValueError('Unknown data_format ' + str(data_format)) padding = _preprocess_border_mode(padding) x", "= _padding(x, padding[0], 0) x = _padding(x, padding[1], 1) else:", "dim = np.prod(x.shape) x = C.reshape(x, (-1,)) x._keras_shape = (None,", "`x` is a placeholder. # Arguments x: A candidate placeholder.", "new_states = [C.element_select(m, n, s) for n, s in zip(new_states,", "of then and' ' else expressions. ndim(condition)=' + str(ndim_cond) +", "Backend: Invalid dropout level %s, ' 'must be in interval", "x._uses_learning_phase = False x._cntk_placeholder = True return x def is_placeholder(x):", "zeros_like(x, dtype=None, name=None): return x * 0 def ones_like(x, dtype=None,", "[0, 0]]) else: x = _padding(x, padding[0], 0) x =", "= 0 while dynamic_axis_index < nones: cntk_axis[i] = x.dynamic_axes[dynamic_axis_index] i", "num_static_element) return C.cntk_py.Value( grad_array_view.as_shape( (num_old_batch,) + self.from_shape)) class ConvertToBatch(C.ops.functions.UserFunction): \"\"\"Converts", "% ndim) if _axis[i] is not None: _axis[i] = cntk_axis[_axis[i]]", "is inferred dimension, # we can't figure out how to", "= {} for argument in self.loss.arguments: if argument in feed_dict:", "C.InferredDimension for _ in x.shape) or b_any( _ == C.FreeDimension", "_get_dynamic_axis_num(x) def _reduce_on_axis(x, axis, reduce_fun_name): if isinstance(axis, list): for a", "not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format))", "hasattr(placeholder, 'shape'): num_dynamic = get_num_dynamic_axis(placeholder) input_shape = input.shape[num_dynamic:] placeholder_shape =", "predictions.shape[-1]) result = C.classification_error(predictions, _targets, topN=k) return 1 - C.reshape(result,", "tanh(x): return C.tanh(x) def _static_rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None,", "for metrics more # than 2, need manual eval elif", "place holder place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states] past_values", "ValueError('CNTK backend: metrics argument %s ' 'is not found in", "len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]),", "# TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3) #", "node `%s` which is not ' 'related to any parameters", "dynamic axis, ' 'which is not supported. Please do permute", "C.Axis.all_axes() return _axis def _reshape_dummy_dim(x, axis): shape = list(x.shape) _axis", "_reduce_on_axis(x, axis, 'reduce_mean') return _remove_dims(output, axis, keepdims) def any(x, axis=None,", "' 'which is not supported. Please do permute ' 'on", "axis in _axes: variance_mean = C.reduce_mean(variance_mean, axis=axis) variance = C.minus(variance_mean,", "outputs[0] # need group update by gradient place holder u_ops", "relu(x, alpha=0., max_value=None): if alpha != 0.: negative_part = C.relu(-x)", "C.sequence.unpack(final_output, 0, no_mask_output=True) if num_time_step is not None and num_time_step", "axis=-1)), axis=C.Axis.all_axes()) def argmax(x, axis=-1): axis = [axis] axis =", "C.softmax(x, axis=axis) def softplus(x): return C.softplus(x) def softsign(x): return x", "pool_size, strides, auto_padding=[padding]) else: raise ValueError('Invalid pooling mode: ' +", "is None: raise ValueError('CNTK Backend: the input of static rnn", "t in tuples: x = t[0] value = t[1] if", "here to mapping the correct axis. Will remove this tricky", "bias_dims == 1: shape = (bias.shape[0], 1, 1, 1) else:", "ValueError('CNTK backend: `count_params` with dynamic ' 'shape is not supported.", "x._keras_shape = shape x._uses_learning_phase = False x._cntk_placeholder = True return", "better performance.') # A learning phase is a bool tensor", "isinstance(axis, list): # sequence axis is removed by default, so", "if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0,", "raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format)", "_ in x.shape) or b_any( _ == C.FreeDimension for _", "x = _padding(x, padding, 0) else: assert len(base_shape) == 3", "[C.InferredDimension if _ is None else _ for _ in", "ones_like(x) else: gamma = ones_like(beta) if beta is None: if", "- 1 while i >= 0: current = C.ops.slice(inputs, time_axis,", "dtype is None: dtype = floatx() ctype = _convert_string_dtype(dtype) return", "x def dot(x, y): if len(x.shape) > 2 or len(y.shape)", "+ 1 shape.insert(index, 1) new_shape = shape[nones:] new_shape = tuple(", "_padding(x, padding[0], 1) x = _padding(x, padding[1], 2) x =", "updated def function(inputs, outputs, updates=[], **kwargs): return Function(inputs, outputs, updates=updates,", "tuple(shape) x = C.convolution_transpose( kernel, x, strides, auto_padding=[ False, padding,", "_reshape_dummy_dim(mean, [0]) if ndim(var) == ndim(x) and shape(var)[0] == 1:", "ValueError('Multiple target dimensions are not supported. ' + 'Expected: None,", "for a in axis: if isinstance(a, C.Axis) \\ and a", "'' if isinstance( value, C.variables.Constant) or isinstance( value, C.variables.Parameter): value", "fix, ignore all the . except the first one. if", "2, 3, 0)) return x def _get_dynamic_axis_num(x): if hasattr(x, 'dynamic_axes'):", "= inputs self.trainer = None self.unrelated_updates = None self.updates =", "dummy axis. if ndim(mean) == ndim(x) and shape(mean)[0] == 1:", "else: u = C.assign(update[0], update[1]) else: u = update if", "f in outputs[2:]] self.metrics_func = C.combine(self.metrics_outputs) else: self.metrics_func = None", "shape[i] == -1: i += 1 else: break shape =", "to evaluate them.from # But the assign ops won't be", "int_shape(kernel) output_length, feature_dim, filters = kernel_shape xs = [] for", "'CNTK backend warning: CNTK version not detected. ' 'Will using", "tensor return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER", "len(shape) > 0: value = value.astype(dtype) # TODO: remove the", "= _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_sum') return _remove_dims(output,", "place.') return x return _reshape_batch(x, shape) else: # no collapse,", "= [] for l_s, i_s in zip(last_states, initial_states): if _get_dynamic_axis_num(i_s)", "for o in self.metrics_outputs: updated.append(v) if self.unrelated_updates is not None:", "(bias.shape[0], 1, 1) else: shape = (bias.shape[2],) + bias.shape[:2] elif", "= [] for g in grads: if g in grad_parameter_dict:", "else C.InferredDimension cntk_shape = [dynamic_dimension if s is None else", "{0, 1}: raise ValueError('CNTK Backend: Set learning phase ' 'with", "execute=lambda x: print(message))) def batch_set_value(tuples): for t in tuples: x", "_preprocess_conv2d_input(x, data_format) if pool_mode == 'max': x = C.pooling( x,", "(samples, input_depth, conv_dim1, conv_dim2, conv_dim3) # TF input shape: (samples,", "when cntk supports int32, int64 # https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter dtype = 'float32'", "return Function(inputs, outputs, updates=updates, **kwargs) def temporal_padding(x, padding=(1, 1)): assert", "strides, auto_padding=[ False, padding, padding, padding], output_shape=output_shape) return _postprocess_conv3d_output(x, data_format)", "using the workaround # here to mapping the correct axis.", "1: return res else: return C.element_select(C.greater(x, 0), res, alpha *", "x) output = _reduce_on_axis(x, axis, 'reduce_max') return _remove_dims(output, axis, keepdims)", "if name is None: name = '' if isinstance( value,", "data_format) def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): raise", "in outputs[2:]] self.metrics_func = C.combine(self.metrics_outputs) else: self.metrics_func = None @staticmethod", "+ str(padding)) return padding def _postprocess_conv2d_output(x, data_format): if data_format ==", "output = repeat_elements(x, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3)", "0: _axis[i] = (a % ndim) if _axis[i] is not", "False, padding, padding, padding], output_shape=output_shape) return _postprocess_conv3d_output(x, data_format) def pool2d(x,", "(isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): result.append(x.value) else: result.append(eval(x)) return result", "Returns Boolean. \"\"\" return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder def is_keras_tensor(x):", "== 'valid': padding = False else: raise ValueError('Invalid border mode:", "def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if", "len(shape) if _ < 0 else _ for _ in", "-sum(target * C.log(output), axis=-1) def sparse_categorical_crossentropy(target, output, from_logits=False): target =", "C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) pointwise_kernel", "expand_dims(outputs[0], 1) last_output = outputs[0] while i < len(outputs): #", "is None: name = '' if isinstance( value, C.variables.Constant) or", "output = repeat_elements(x, depth_factor, axis=2) output = repeat_elements(output, height_factor, axis=3)", "tensor, value in zip(self.placeholders, inputs): # cntk only support calculate", "backend: the permute pattern %s ' 'requested permute on dynamic", "is a placeholder. # Arguments x: A candidate placeholder. #", "is not ' 'related to any parameters in the model.", "x, strides, auto_padding=[ False, padding, padding], output_shape=output_shape) return _postprocess_conv2d_output(x, data_format)", "_normalize_axis(reduction_axes, x)) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normalized = batch_normalization( x,", "the placeholder for dynamic learning phase _LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32,", "return C.user_function(ReshapeBatch(x, shape[1:])) def _get_cntk_version(): version = C.__version__ if version.endswith('+'):", "# scale preds so that the class probas of each", "1, 1, 1) else: shape = (bias.shape[3],) + bias.shape[:3] elif", "return x def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1,", "(axes, axes) if axes is None: # behaves like tf.batch_matmul", "a ' 'static length for your sequences.') rnn_inputs = inputs", "1), (1, 1)), data_format=None): assert len(padding) == 2 assert len(padding[0])", "if data_format == 'channels_last': x = C.swapaxes(x, 0, 1) kernel", "== 2: if data_format == 'channels_first': if bias_dims == 1:", "_reshape_batch(x, shape) else: # no collapse, then first need to", "min_value is None: min_value = -np.inf return C.clip(x, min_value, max_value)", "else: raise NotImplementedError def print_tensor(x, message=''): return C.user_function( LambdaFunc(x, when=lambda", "len(n))]) + n if len(n) != len(shape): raise NotImplementedError i", "= squeeze(mask_slice, time_axis) if len(outputs) == 0: prev_output = zeros_like(output)", "if isinstance(pattern, list): current_layout = [i for i in range(dims)]", "so that the class probas of each sample sum to", "init=C.initializer.normal( scale=scale, seed=seed), dtype=dtype, name=name) def random_normal(shape, mean=0.0, stddev=1.0, dtype=None,", "i += 1 else: break shape = tuple([-1 for _", "ValueError( 'CNTK backend: when constructing trainer, ' 'found gradient node", "new_shape = new_shape[num_dynamic_axis:] new_shape = [C.InferredDimension if _ is None", "C.ops.slice(x, axis, i, i + 1) for _ in range(rep):", "- 1: x = C.swapaxes(x, i, i + 1) i", "ndim(x) and shape(gamma)[0] == 1: gamma = _reshape_dummy_dim(gamma, [0]) if", "data_format=None, dilation_rate=(1, 1, 1)): if data_format is None: data_format =", "else: f_stats.append(l_s) last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, f_stats def", "isinstance(x, C.variables.Constant)): result.append(x.value) else: result.append(eval(x)) return result def set_value(x, value):", "f_stats.append(l_s) last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, f_stats def has_seq_axis(x):", "cntk does not support shape like (1, batch). so using", "_ in x.shape): warnings.warn( 'Warning: CNTK backend does not support", "C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads def", "C.cntk_py.Function) is False: x = x() if callable(alt) and isinstance(alt,", "not None: _axis[i] = cntk_axis[_axis[i]] else: if _axis is None:", "def get_uid(prefix=''): _UID_PREFIXES[prefix] += 1 return _UID_PREFIXES[prefix] def learning_phase(): #", "C.splice(*temp, axis=index) def tanh(x): return C.tanh(x) def _static_rnn(step_function, inputs, initial_states,", "1) mask_slice = squeeze(mask_slice, time_axis) if len(outputs) == 0: prev_output", "= [_ + len(shape) if _ < 0 else _", "None: max_value = np.inf if min_value is None: min_value =", "' + str(data_format)) padding = _preprocess_border_mode(padding) strides = strides pool_size", "= C.pad(x, pattern=[padding, (0, 0)]) else: x = _padding(x, padding,", "- dynamic_axis_index) if dynamic_axis_index < nones: i = 0 while", "False and isinstance(axis, list): # sequence axis is removed by", "with 1, it is not needed # in cntk, need", "ignore all the . except the first one. if len(version)", "def argmax(x, axis=-1): axis = [axis] axis = _normalize_axis(axis, x)", "max_value = np.inf if min_value is None: min_value = -np.inf", "y, len(y_shape) - 1) else: return C.times(x, y) def batch_dot(x,", "_postprocess_conv3d_output(x, data_format) def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid',", "= value.astype(np.float32) if tensor == _LEARNING_PHASE_PLACEHOLDER: _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value) else:", "== 'channels_last': if bias_dims == 1: shape = (1, bias.shape[0])", "feed_dict = {} for tensor, value in zip(self.placeholders, inputs): #", "permute pattern %s ' 'requested permute on dynamic axis, '", "contains non-specified dimension, ' 'which is not supported. Please give", "pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape", "shape=()) def conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None): if", "padding, padding], output_shape=output_shape) return _postprocess_conv2d_output(x, data_format) def identity(x, name=None): if", "def maximum(x, y): return C.element_max(x, y) def minimum(x, y): return", "= _padding(x, padding[0], 2) x = _padding(x, padding[1], 3) else:", "def has_seq_axis(x): return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1 def", "a in axis: if isinstance(a, C.Axis): has_seq = True break", "maxval=1.0, dtype=None, seed=None): for _ in shape: if _ is", "_remove_dims(output, axis, keepdims) def prod(x, axis=None, keepdims=False): axis = _normalize_axis(axis,", "self.trainer_output = tuple([f.output for f in criterion]) elif len(u_ops) >", "' + str(padding)) return padding def _postprocess_conv2d_output(x, data_format): if data_format", "- output) return output def get_variable_shape(x): return int_shape(x) def update(x,", "= False x._cntk_placeholder = True return x def is_placeholder(x): \"\"\"Returns", "x = C.input( shape=cntk_shape, dtype=_convert_string_dtype(dtype), is_sparse=sparse, name=name) x._keras_shape = shape", "padding, padding]) return _postprocess_conv3d_output(x, data_format) def conv3d_transpose(x, kernel, output_shape, strides=(1,", "instance of type `' + str(type(x)) + '`. ' 'Expected", "C.parameter(shape=shape, init=value, dtype=dtype, name=_prepare_name(name, 'variable')) v._keras_shape = v.shape v._uses_learning_phase =", "new_states i -= 1 else: i = 0 while i", "seed is None: # ensure that randomness is conditioned by", "axis output_shape = output_shape[1:] # in keras2, need handle output", "_preprocess_border_mode(padding) strides = (1,) + strides # cntk output_shape does", "len(x.shape) > 2 or len(y.shape) > 2: y_shape = int_shape(y)", "normalized, mean, variant def _moments(x, axes=None, shift=None, keep_dims=False): _axes =", "warning: GPU is not detected. ' 'CNTK\\'s CPU version is", "(1, 1, 1, bias.shape[0]) else: shape = bias.shape elif dims", "y, output_rank=(len(y.shape) - 1) if len(y.shape) > 1 else 1)", "not take variable length inputs. Please ' 'pass inputs that", "for _ in range(ndim)]) dynamic_dimension = C.FreeDimension if _get_cntk_version() >=", "now. ' 'Please provide fixed dimension ' 'instead of `None`.')", "dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension cntk_shape", "0 def ones_like(x, dtype=None, name=None): return zeros_like(x) + 1 def", "randomness op with ' 'dynamic shape is not supported now.", "== 'channels_last': x = C.transpose(x, (1, 2, 0)) return x", "= C.input( shape=cntk_shape, dtype=_convert_string_dtype(dtype), is_sparse=sparse, name=name) x._keras_shape = shape x._uses_learning_phase", "filters, output_length output = sum(output, axis=3) # shape: batch, filters,", "from_logits=False): target = C.one_hot(target, output.shape[-1]) target = C.reshape(target, output.shape) return", "updates=[], **kwargs): return Function(inputs, outputs, updates=updates, **kwargs) def temporal_padding(x, padding=(1,", "for axis in _axes: shift = C.reduce_mean(shift, axis=axis) shift =", "classification_error(target, output, axis=-1): return C.ops.reduce_mean( C.equal( argmax( output, axis=-1), argmax(", "update by gradient place holder u_ops = [] unrelated_updates =", "if num_dynamic_axis >= len(shape): i = 0 while i <", "C.ops.element_select( mask_slice, n_s, s)) new_states = return_states outputs.append(output) states =", "for _ in cntk_axes] end_index = [0 for _ in", "1, feature_dim))) x_aggregate = concatenate(xs, axis=1) # transpose kernel to", "assert len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x,", "else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) return _postprocess_conv3d_output(x,", "= '_'.join(NAME_SCOPE_STACK) if name is None or name == '':", "None: dtype = floatx() for _ in shape: if _", "C.less_equal(x, y) def maximum(x, y): return C.element_max(x, y) def minimum(x,", "+ str(data_format)) dims = len(x.shape) if dims > 0 and", "p != C.FreeDimension: return False return True def __call__(self, inputs):", "dtype = 'float32' if 'int' in str(dtype) else dtype v", "can't figure out how to repeat it in cntk now", "{} for tensor, value in zip(self.placeholders, inputs): # cntk only", "output_col = output_shape kernel_shape = int_shape(kernel) _, feature_dim, filters =", "hasattr(C.sequence, reduce_fun_name): x = getattr(C.sequence, reduce_fun_name)(x, a) else: x =", "mask = expand_dims(mask) mask = C.to_sequence_like(mask, rnn_inputs) states = tuple(initial)", "break shape = tuple([-1 for _ in range(num_dynamic_axis - i)])", "= getattr(C, reduce_fun_name)(x, axis) return x def _reshape_sequence(x, time_step): tmp_shape", "mean, variant = _moments(x, _normalize_axis(reduction_axes, x)) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:", "None: new_states = [C.element_select(m, n, s) for n, s in", "batch_set_value(tuples): for t in tuples: x = t[0] value =", "tensor): raise ValueError('CNTK backend: The placeholder has been resolved '", "shape = (bias.shape[1],) + bias.shape[:1] elif data_format == 'channels_last': if", "padding, 1) return x def _padding(x, pattern, axis): base_shape =", "isinstance(variables, list) is False: variables = [variables] grads = []", "for t in tuples: x = t[0] value = t[1]", "supported with ' 'variable-length sequences. Please specify a ' 'static", "' 'found gradient node `%s` which is not ' 'related", "take variable length inputs. Please ' 'pass inputs that have", "width_factor, axis=2) return output else: raise ValueError('CNTK Backend: Invalid data_format:',", "[] for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder')", "prev_output = zeros_like(output) else: prev_output = outputs[-1] output = C.ops.element_select(mask_slice,", "of batch axis. name: name of this node. \"\"\" def", "= [] for x in xs: if (isinstance(x, C.variables.Parameter) or", "data_format) if pool_mode == 'max': x = C.pooling( x, C.MAX_POOLING,", "to CNTK batch axis. We may introduce this operation in", "minval, maxval, dtype, seed) def random_uniform_variable(shape, low, high, dtype=None, name=None,", "if _get_cntk_version() >= 2.2 else C.InferredDimension cntk_shape = [dynamic_dimension if", "shape if num_dynamic_axis >= len(shape): i = 0 while i", "shape=prefix_shape), x, axis=axis) base_shape = x.shape if pattern[1] > 0:", "value = t[1] if isinstance(value, np.ndarray) is False: value =", "n_s[0] return new_output, n_s final_output, final_states = _recurrence(rnn_inputs, states, mask)", "repeat it in cntk now # return the same x", "x, strides=tuple(strides), auto_padding=[ False, padding]) if data_format == 'channels_last': x", "_preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1):", "and hasattr(C.sequence, reduce_fun_name): x = getattr(C.sequence, reduce_fun_name)(x, a) else: x", "* weight # Shape: (batch, filters, output_length) output = sum(output,", "isinstance(axes, int): axes = [axes] cntk_axes = _normalize_axis(axes, x) begin_index", "categorical_crossentropy(target, output, from_logits=False): if from_logits: result = C.cross_entropy_with_softmax(output, target) #", "== C.FreeDimension else _ for _ in shape]) if isinstance(x,", "self.from_shape = input.shape self.target_shape = shape def infer_outputs(self): batch_axis =", "output = repeat_elements(output, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3)", "ndim_cond < ndim_expr: shape_expr = int_shape(then_expression) ndim_diff = ndim_expr -", "== 0: return C.reduce_sum(all_matrix) else: return all_matrix def classification_error(target, output,", "ndim_cond = ndim(condition) ndim_expr = ndim(then_expression) if ndim_cond > ndim_expr:", "stride_row, i * stride_row + kernel_size[0]) slice_col = slice(j *", "_remove_dims(output, axis, keepdims) def min(x, axis=None, keepdims=False): axis = _normalize_axis(axis,", "ConvertToStatic(C.ops.functions.UserFunction): \"\"\"Converts input first axis to CNTK static axis. We", "'`Parameter`.' % type(x)) def placeholder( shape=None, ndim=None, dtype=None, sparse=False, name=None,", "run ' 'rnn with non-static axis, please try ' 'dynamic", "int) or isinstance(training, bool): result = x if training ==", "if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], [0, 0],", "kernel = _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate ==", "' + 'Expected: None, int, (int, int), ' + 'Provided:", "auto_padding=[ False, padding]) if data_format == 'channels_last': x = C.swapaxes(x,", "just flatten all the dim in x.shape dim = np.prod(x.shape)", "0: learner = C.cntk_py.universal_learner(p_list, u_list, update_func) criterion = ( outputs[0],", "_normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_max') return _remove_dims(output, axis,", "'' return C.parameter( shape=shape, init=C.initializer.normal( scale=scale, seed=seed), dtype=dtype, name=name) def", "res) def in_top_k(predictions, targets, k): _targets = C.one_hot(targets, predictions.shape[-1]) result", "keep the mapping from grad placeholder to parameter grad_parameter_dict =", "gamma = ones_like(x) else: gamma = ones_like(beta) if beta is", "'shape') else () if hasattr(value, 'dtype') and value.dtype != dtype", "depth_factor, axis=1) output = repeat_elements(output, height_factor, axis=2) output = repeat_elements(output,", "constraint return v def bias_add(x, bias, data_format=None): if data_format is", "keepdims) def sum(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output", "in grad_parameter_dict: p_list.append(grad_parameter_dict[g]) u_list.append(g) else: raise ValueError( 'CNTK backend: when", "reverse=True): del shape[index] shape = [C.InferredDimension if _ == C.FreeDimension", "tuple(rnn_constants)) if getattr(new_output, '_uses_learning_phase', False): global uses_learning_phase uses_learning_phase = True", "rnn has only rank %d ' 'Need at least rank", "def greater(x, y): return C.greater(x, y) def greater_equal(x, y): return", "+ len(shape) if _ < 0 else _ for _", "C.splice(final_output, output_slice, axis=time_axis) last_output = outputs[i] i += 1 last_output._uses_learning_phase", "_padding(x, padding[0], 2) x = _padding(x, padding[1], 3) x =", "max_value is None: max_value = np.inf if min_value is None:", "== np.float64: return 'float64' else: raise ValueError('CNTK Backend: Unsupported dtype:", "xs = [] for i in range(output_row): for j in", "is not None and a < 0: _axis[i] = (a", "for a in axis: if isinstance(a, C.Axis) is False: reduce_axes.append(a)", "= value updated = [] if self.trainer is not None:", "\"\"\"Returns whether `x` is a placeholder. # Arguments x: A", "= _reshape_dummy_dim(mean, [0]) if ndim(var) == ndim(x) and shape(var)[0] ==", "def stop_gradient(variables): if isinstance(variables, (list, tuple)): return map(C.stop_gradient, variables) else:", "need case from bool / int return x def dot(x,", "sorted(_axis, reverse=True): del shape[_] new_shape = shape[nones:] new_shape = tuple([C.InferredDimension", "initial_states, go_backwards, mask, constants, unroll, input_length) if constants is None:", "'channels_last': x = C.swapaxes(x, 0, 1) return x def conv2d(x,", "data_format == 'channels_last': x = C.swapaxes(x, 0, 1) kernel =", "backend: metrics argument %s ' 'is not found in inputs.", "shape return shape def ndim(x): shape = int_shape(x) return len(shape)", "output_length, filters) return permute_dimensions(output, (0, 2, 1)) def local_conv2d(inputs, kernel,", "in sorted(_axis, reverse=True): del shape[_] new_shape = shape[nones:] new_shape =", "list(pattern) axis = axis[num_dynamic_axis:] axis = _normalize_axis(axis, x) return C.transpose(x,", "n, s) for n, s in zip(new_states, past_values)] n_s =", "= C.elu(x) if alpha == 1: return res else: return", "name='_keras_learning_phase') # static learning phase flag, if it is not", "if shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension) > 1: result =", "1) + depthwise_kernel.shape[2:]) padding = _preprocess_border_mode(padding) if dilation_rate == (1,", "shape new_shape = list(shape) new_shape = new_shape[num_dynamic_axis:] new_shape = [C.InferredDimension", "as C import numpy as np from .common import floatx,", "flatten all the dim in x.shape dim = np.prod(x.shape) x", "= input.shape self.target_shape = shape def infer_outputs(self): batch_axis = C.Axis.default_batch_axis()", "= C.transpose(kernel, (4, 3, 0, 1, 2)) return kernel def", "= (None, dim) return x def softmax(x, axis=-1): return C.softmax(x,", "is None: dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.zeros(shape,", "and returns it. # Arguments value: Numpy array, initial value", "i < len(x.shape) - 1: x = C.swapaxes(x, i, i", "padding, padding]) return _postprocess_conv2d_output(x, data_format) def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1,", "import contextmanager import warnings C.set_global_option('align_axis', 1) b_any = any dev", "_get_dynamic_axis_num(x) # Padding the axis if len(n) < len(shape): n", "< min_value: max_value = min_value if max_value is None: max_value", "'float32' if 'int' in str(dtype) else dtype v = C.parameter(shape=shape,", "greater(x, y): return C.greater(x, y) def greater_equal(x, y): return C.greater_equal(x,", "last dimension as channel dimension, # instead of the 2nd", "reverse=True): result = C.reshape(result, shape=(), begin_axis=index, end_axis=index + 1) return", "2 if hasattr(C, 'pad'): x = C.pad(x, pattern=[padding, (0, 0)])", "= [i for i in range(dims)] else: current_layout = tuple([i", "rnn_constants = [] for constant in constants: if isinstance(constant, list):", "dims < 3: raise ValueError('Input should be at least 3D.')", "feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError( 'CNTK backend: assign", "= '' cntk_shape = cntk_shape[dynamic_axis_num:] x = C.input( shape=cntk_shape, dtype=_convert_string_dtype(dtype),", "def is_tensor(x): return isinstance(x, (C.variables.Constant, C.variables.Variable, C.variables.Parameter, C.ops.functions.Function)) def shape(x):", "index in sorted(_axis, reverse=True): del shape[index] shape = [C.InferredDimension if", "dims -= 1 bias_dims = len(bias.shape) if bias_dims != 1", "* np.prod(np.asarray(self.target_shape)) num_static_element = np.prod(np.asarray(self.from_shape)) num_old_batch = int(num_element / num_static_element)", "The placeholder has been resolved ' 'to shape `%s`, but", "_get_dynamic_axis_num(x) == 0: return C.reduce_sum(all_matrix) else: return all_matrix def classification_error(target,", "_get_dynamic_axis_num(inputs) == 0 or unroll: return _static_rnn( step_function, inputs, initial_states,", "as_numpy=False) if isinstance(output_values, dict): for o in self.metrics_outputs: value =", "while i < shape[axis]: tmp = C.ops.slice(x, axis, i, i", "one_hot(indices, num_classes): return C.one_hot(indices, num_classes) def get_value(x): if isinstance( x,", "var / beta / gamma may be processed by broadcast", "target_shape.append(x_shape[axis]) broadcast_mean = C.reshape(mean, target_shape) broadcast_var = C.reshape(variant, target_shape) broadcast_gamma", "not is_tensor(x): raise ValueError('Unexpectedly found an instance of type `'", "apply # the gradient during training. global grad_parameter_dict if isinstance(variables,", "* C.log(1.0 - output) return output def get_variable_shape(x): return int_shape(x)", "+ str(data_format)) x = _preprocess_conv2d_input(x, data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format)", "is None: beta = zeros_like(mean) elif ndim(beta) == ndim(x) and", "batch_normalization( x, mean, variant, beta, gamma, epsilon) else: # need", "= [] dynamic_axis_index = 0 for i in range(ndim): if", "= expand_dims(y) normalized_axis = [] normalized_axis.append(_normalize_axis(axes[0], x)[0]) normalized_axis.append(_normalize_axis(axes[1], y)[0]) #", "dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.zeros(shape, ctype), dtype=dtype,", "so don't need case from bool / int return x", "sum(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x,", "1 else: cntk_axis.append(i - dynamic_axis_index) if dynamic_axis_index < nones: i", "has_seq_axis(inputs): num_time_step = inputs.shape[0] initial = [] for s in", "'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if padding ==", "if data_format == 'channels_last': shape = list(output_shape) shape[0] = output_shape[3]", "is None: name = '' return C.parameter( shape=shape, init=C.initializer.normal( scale=scale,", "default.') return float(2.0) class ReshapeBatch(C.ops.functions.UserFunction): def __init__(self, input, shape, name='reshape_with_batch'):", "2) x = _padding(x, padding[1], 3) else: if num_dynamic_axis >", "== 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1,", "last_states = [C.sequence.last(s) for s in final_states] if need_convert: final_output", "mean = C.plus(shifted_mean, shift) if not keep_dims: mean = squeeze(mean,", "to put filters first weight = permute_dimensions(kernel, (2, 0, 1))", "filters first weight = permute_dimensions(kernel, (2, 0, 1)) # shape:", "if _get_dynamic_axis_num(x) > 1: return x.dynamic_axes[1] == C.Axis.default_dynamic_axis() else: return", "eval(x) def batch_get_value(xs): result = [] for x in xs:", "x = C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) else:", "C.__version__ if version.endswith('+'): version = version[:-1] # for hot fix,", "output = _reduce_on_axis(x, axis, 'reduce_min') return _remove_dims(output, axis, keepdims) def", "return padding def _postprocess_conv2d_output(x, data_format): if data_format == 'channels_last': x", "in tuples: x = t[0] value = t[1] if isinstance(value,", "else: beta = zeros_like(gamma) mean, variant = _moments(x, _normalize_axis(reduction_axes, x))", "or _ == C.FreeDimension: raise ValueError('CNTK backend: `count_params` with dynamic", "num_old_batch = int(num_element / num_static_element) return C.cntk_py.Value( grad_array_view.as_shape( (num_old_batch,) +", "return _convert_dtype_string(x.dtype) def zeros(shape, dtype=None, name=None): if dtype is None:", "2)) return kernel def _postprocess_conv3d_output(x, dim_ordering): if dim_ordering == 'channels_last':", "broadcast_gamma = C.reshape(gamma, target_shape) broadcast_beta = C.reshape(beta, target_shape) normalized =", "u_list.append(g) else: raise ValueError( 'CNTK backend: when constructing trainer, '", "i + 1) # remove dummy dimension current = squeeze(current,", "3: raise ValueError('Input should be at least 3D.') # if", "(1.0 - target) * C.log(1.0 - output) return output def", "_axis = C.Axis.all_axes() return _axis def _reshape_dummy_dim(x, axis): shape =", "= output_values[o] v = value.asarray() updated.append(v) else: v = output_values.asarray()", "def __init__(self, input, shape, name='reshape_with_batch'): super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name) self.from_shape", "uses_learning_phase = False # CNTK currently don't support cond op,", "found an instance of type `' + str(type(x)) + '`.", "len(x_shape) == 2 and len(y_shape) == 2: if axes[0] ==", "= _get_dynamic_axis_num(x) x = expand_dims(x, nones) return x def max(x,", "global grad_parameter_dict if isinstance(variables, list) is False: variables = [variables]", "\"\"\"Converts input first axis to CNTK batch axis. We may", "for axis in range(1, ndim(x)): if axis in reduction_axes: target_shape.append(1)", "# if _LEARNING_PHASE is static if isinstance(training, int) or isinstance(training,", "here use this global # map to keep the mapping", "axis, keepdims=keepdims) all_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape)", "def _static_rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape", "_preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides", "0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x = _padding(x,", "later. # Arguments inputs: a cntk variable (parameter/constant) name: name", "= [] for v in variables: g = C.constant(0, shape=v.shape,", "begin_index = [0 for _ in cntk_axes] end_index = [0", "import cntk as C import numpy as np from .common", "str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if data_format ==", "np.float64): value = value.astype(np.float32) if tensor == _LEARNING_PHASE_PLACEHOLDER: _LEARNING_PHASE_PLACEHOLDER.value =", "= tuple(dynamic_shape) + shape return shape def ndim(x): shape =", "size *= _ binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape) return variable(value=binomial,", "= C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) return _postprocess_conv2d_output(x, data_format)", "name: name of this node. \"\"\" def __init__(self, input, batch_size,", "def _normalize_axis(axis, x): shape = int_shape(x) ndim = len(shape) nones", "to mapping the correct axis. Will remove this tricky after", "Backend: non-square dilation_rate is ' 'not supported.') if strides !=", "== 'channels_last': x = C.transpose(x, (1, 2, 3, 0)) return", "update. # Returns A variable instance (with Keras metadata included).", "C.sequence.last(final_output) last_states = [C.sequence.last(s) for s in final_states] if need_convert:", "shape: batch, filters, output_length output = sum(output, axis=3) # shape:", "return np.float64 else: # cntk only running with float, #", "= axis[num_dynamic_axis:] axis = _normalize_axis(axis, x) return C.transpose(x, axis) def", "= _reshape_dummy_dim(gamma, [0]) if beta is None: beta = zeros_like(mean)", "update_func = C.combine([u.output for u in u_ops]) grads = update_func.find_all_with_name('keras_grad_placeholder')", "if shape[i] is None or shape[i] == -1: i +=", "= slice(j * stride_col, j * stride_col + kernel_size[1]) if", "executed under this mode, that's why # we need this", "dev.type() == 0: warnings.warn( 'CNTK backend warning: GPU is not", "if dims < 3: raise ValueError('CNTK Backend: the input of", "1 and bias_dims != dims: raise ValueError('Unexpected bias dimensions %d,", "padding = _preprocess_border_mode(padding) x = _preprocess_conv3d_input(x, data_format) if pool_mode ==", "categorical_crossentropy(target, output, from_logits) class Function(object): def __init__(self, inputs, outputs, updates=[],", "low + scale) def random_normal_variable( shape, mean, scale, dtype=None, name=None,", "the 2nd one. # TH input shape: (samples, input_depth, conv_dim1,", "keepdims=True) return result if axes[0] == 1 else transpose(result) else:", "[] for c in constant: if _get_dynamic_axis_num(c) == 1: new_c.append(C.sequence.broadcast_as(c,", "+ str(ndim_expr)) elif ndim_cond < ndim_expr: shape_expr = int_shape(then_expression) ndim_diff", "/ var / beta / gamma may be processed by", "reduce_fun_name): x = getattr(C.sequence, reduce_fun_name)(x, a) else: x = getattr(C,", "int): n = (n,) elif isinstance(n, list): n = tuple(n)", "1), padding='valid', data_format=None): if data_format is None: data_format = image_data_format()", "sequences. Please specify a ' 'static length for your sequences.')", "step_function( x, tuple(past_values) + tuple(rnn_constants)) if getattr(new_output, '_uses_learning_phase', False): global", "padding def _postprocess_conv2d_output(x, data_format): if data_format == 'channels_last': x =", "shape = bias.shape else: shape = bias.shape return x +", "0], list(padding[0]), list(padding[1]), [0, 0]]) else: x = _padding(x, padding[0],", "rep in enumerate(n): if i >= num_dynamic_axis and shape[i] is", "o, p in zip(new_states, place_holders): n_s.append(o.replace_placeholders({p: o.output})) if len(n_s) >", "loss and 1 metric in trainer, for metrics more #", "np.inf if min_value is None: min_value = -np.inf return C.clip(x,", "[] for _ in axis: if isinstance(_, int): _axis.append(_ if", "_reshape_dummy_dim(output, axis) def square(x): return C.square(x) def abs(x): return C.abs(x)", "= C.splice(final_output, output_slice, axis=time_axis) last_output = outputs[i] i += 1", "[f.output for f in outputs] self.metrics_func = C.combine(self.metrics_outputs) # cntk", "def _preprocess_conv3d_kernel(kernel, dim_ordering): kernel = C.transpose(kernel, (4, 3, 0, 1,", "_targets = C.one_hot(targets, predictions.shape[-1]) result = C.classification_error(predictions, _targets, topN=k) return", "zeros_like(gamma) mean, variant = _moments(x, _normalize_axis(reduction_axes, x)) if sorted(reduction_axes) ==", "axis back final_output = expand_dims(outputs[0], 1) last_output = outputs[0] while", "not expected, please ' 'double check the keras shape history.'", "shape[1] is None: raise ValueError('CNTK Backend: the input of static", "return [ C.output_variable( self.target_shape, self.inputs[0].dtype, [])] def forward(self, arguments, device=None,", "width_factor, data_format): if data_format == 'channels_first': output = repeat_elements(x, depth_factor,", "- mean) / (C.sqrt(var) + epsilon) * gamma + beta", "# cntk does not support gradients as symbolic op, #", "input_depth, depth)`, # independently of `data_format`. # CNTK expects `(depth,", "elif data_format == 'channels_last': if bias_dims == 1: shape =", "squeeze(variance, _axes) return mean, variance def batch_normalization(x, mean, var, beta,", "not take place.') return x return _reshape_batch(x, shape) else: #", "if name is None: name = '' cntk_shape = cntk_shape[dynamic_axis_num:]", "= C.Axis.all_axes() return _axis def _reshape_dummy_dim(x, axis): shape = list(x.shape)", "there is a bug in cntk 2.1's unpack_batch implementation if", "resolve it later with cntk cond op. if callable(x) and", "shape]) if isinstance(x, C.variables.Parameter): return C.reshape(x, shape) else: num_dynamic_axis =", "state, root_gradients): return C.cntk_py.Value(root_gradients.data()) class LambdaFunc(C.ops.functions.UserFunction): def __init__(self, arg, when=lambda", "C.element_select(condition, then_expression, else_expression) def elu(x, alpha=1.): res = C.elu(x) if", "C.element_select(C.greater(x, 0), res, alpha * res) def in_top_k(predictions, targets, k):", "= int_shape(inputs) dims = len(shape) global uses_learning_phase uses_learning_phase = False", "C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1])]) else: x =", "1.0) return x def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1):", "ndim_cond > ndim_expr: raise ValueError('Rank of condition should be less'", "later. # Arguments inputs: a cntk tensor which has batch", "return new_output, n_s final_output, final_states = _recurrence(rnn_inputs, states, mask) last_output", "add the time_step axis back final_output = expand_dims(outputs[0], 1) last_output", "cols, input_depth, depth)`, # independently of `data_format`. # CNTK expects", "np.prod(np.asarray(self.from_shape)) num_static_element = np.prod(np.asarray(self.target_shape)) num_batch = int(num_element / num_static_element) result", "strides=dilation_rate[0], auto_padding=[False, padding, padding]) x = C.convolution(pointwise_kernel, x, strides=(1, 1,", "for constant in constants: if isinstance(constant, list): new_c = []", "shape = (1, 1, bias.shape[0]) else: shape = bias.shape elif", "provide ' 'fixed dimension instead of `None`.') return np.prod(int_shape(x)) def", "for the tensor. constraint: Optional projection function to be applied", "2.1 release. # Will update with gather op in next", "square(x): return C.square(x) def abs(x): return C.abs(x) def sqrt(x): return", "0 and _get_dynamic_axis_num(l_s) == 1: if hasattr(C, 'unpack_batch'): f_stats.append(C.unpack_batch(l_s)) else:", "else: if _axis is None: _axis = C.Axis.all_axes() return _axis", "axis, 'reduce_min') return _remove_dims(output, axis, keepdims) def sum(x, axis=None, keepdims=False):", "' + str(pool_mode)) return _postprocess_conv2d_output(x, data_format) def pool3d(x, pool_size, strides=(1,", "num_classes) def get_value(x): if isinstance( x, C.variables.Parameter) or isinstance( x,", "not supported, at least ' '%d dimensions are needed.' %", "not isinstance(axis, list): axis = [axis] shape = list(int_shape(x)) _axis", "+= 1 else: break shape = tuple([-1 for _ in", "strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is None:", "v.constraint = constraint return v def bias_add(x, bias, data_format=None): if", "strides x = C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0])", "return None, C.cntk_py.Value(arguments.data()) def backward(self, state, root_gradients): return C.cntk_py.Value(root_gradients.data()) class", "(kernel.shape[0] - 1) x = temporal_padding(x, (left_pad, 0)) padding =", "bias, data_format=None): if data_format is None: data_format = image_data_format() if", "slice(j * stride_col, j * stride_col + kernel_size[1]) if data_format", "0) x = _padding(x, padding[1], 1) else: assert len(base_shape) ==", "_reduce_on_axis(x, axis, 'reduce_max') return _remove_dims(output, axis, keepdims) def min(x, axis=None,", "1 - C.reshape(result, shape=()) def conv2d_transpose(x, kernel, output_shape, strides=(1, 1),", "reduction_axes: target_shape.append(1) if ndim(gamma) > axis: gamma = C.reduce_mean(gamma, axis", "cntk variable (parameter/constant) name: name of this node \"\"\" def", "class ConvertToBatch(C.ops.functions.UserFunction): \"\"\"Converts input first axis to CNTK batch axis.", "in shape]) if isinstance(x, C.variables.Parameter): return C.reshape(x, shape) else: num_dynamic_axis", "= squeeze(result, -1) return result def transpose(x): return C.swapaxes(x, 0,", "for dilated convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding,", "_get_dynamic_axis_num(x) > 1: return x.dynamic_axes[1] == C.Axis.default_dynamic_axis() else: return False", "def softmax(x, axis=-1): return C.softmax(x, axis=axis) def softplus(x): return C.softplus(x)", "== 1) or test mode (learning_phase == 0). # LEARNING_PHASE_PLACEHOLDER", "in cntk_axes] strides = [-1 for _ in cntk_axes] return", "= C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else:", "tuple(initial) with C.default_options(axis_offset=1): def _recurrence(x, states, m): # create place", "x, strides=dilation_rate[0], auto_padding=[False, padding, padding], groups=x.shape[0]) return _postprocess_conv2d_output(x, data_format) def", "be in interval [0, 1].' % level) return C.dropout(x, level)", "* rep x = C.splice(*tmp, axis=i - num_dynamic_axis) i +=", "= ndim(then_expression) if ndim_cond > ndim_expr: raise ValueError('Rank of condition", "self.metrics_func is not None: input_dict = {} for argument in", "self.metrics_func = None @staticmethod def _is_input_shape_compatible(input, placeholder): if hasattr(input, 'shape')", "C.square(shifted_mean)) mean = C.plus(shifted_mean, shift) if not keep_dims: mean =", "= list(output_shape) shape[0] = output_shape[2] shape[1] = output_shape[0] shape[2] =", "0: prev_output = zeros_like(output) else: prev_output = outputs[-1] output =", "axis = _normalize_axis(axis, x) norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0])) return x", "shape = [C.InferredDimension if _ == C.FreeDimension else _ for", "and isinstance(alt, C.cntk_py.Function) is False: alt = alt() if training", "bug in cntk 2.1's unpack_batch implementation if hasattr(C, 'unpack_batch') and", "p_list = [] for g in grads: if g in", "f_stats def has_seq_axis(x): return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1", "0.: negative_part = C.relu(-x) x = C.relu(x) if max_value is", "batch, row, col, filters output = permute_dimensions(output, (0, 2, 3,", "C.cntk_py.Function): value = eval(value) shape = value.shape if hasattr(value, 'shape')", "raise ValueError('Invalid strides for dilated convolution') x = C.convolution(depthwise_kernel, x,", "1) new_shape = tuple(new_shape) x = C.reshape(x, new_shape) temp =", "if m is not None: new_states = [C.element_select(m, n, s)", "# The mean / var / beta / gamma may", "else: x = _padding(x, padding[0], 2) x = _padding(x, padding[1],", "axis[num_dynamic_axis:] axis = _normalize_axis(axis, x) return C.transpose(x, axis) def resize_images(x,", "0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) padding = _preprocess_border_mode(padding)", "shape=None, name=None): if dtype is None: dtype = floatx() if", "self._is_input_shape_compatible(value, tensor): raise ValueError('CNTK backend: The placeholder has been resolved", "has been resolved ' 'to shape `%s`, but input shape", "except the first one. if len(version) > 2 and version[1]", "data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate", "[] for update in updates: if isinstance(update, tuple): if len(update)", "go_backwards: i = shape[1] - 1 while i >= 0:", "does not support shape like (1, batch). so using the", "name of this node \"\"\" def __init__(self, input, name='convert_to_batch'): super(ConvertToBatch,", "is_sparse=sparse, name=name) x._keras_shape = shape x._uses_learning_phase = False x._cntk_placeholder =", "- nones if nones > 0 else 1 if go_backwards:", "' '`%s` type is not supported. ' 'CNTK only supports", "None, int, (int, int), ' + 'Provided: ' + str(axes))", "new_output, n_s final_output, final_states = _recurrence(rnn_inputs, states, mask) last_output =", "def binary_crossentropy(target, output, from_logits=False): if from_logits: output = C.sigmoid(output) output", "+ kernel_size[0]) xs.append(reshape(inputs[:, slice_length, :], (-1, 1, feature_dim))) x_aggregate =", "spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): assert len(padding)", "operation in CNTK native implementation later. # Arguments inputs: a", "name=None): if name is None: name = '%s_alias' % x.name", "is not None: input_dict = {} for argument in self.loss.arguments:", "or 1, we will go with dynamic learning phase tensor.", "_padding(x, padding[2], 4) else: if num_dynamic_axis > 0: assert len(base_shape)", "(list, tuple)): return map(C.stop_gradient, variables) else: return C.stop_gradient(variables) def switch(condition,", "C.constant(value=0, shape=postfix_shape), axis=axis) return x def spatial_2d_padding(x, padding=((1, 1), (1,", "2 if data_format is None: data_format = image_data_format() if data_format", "ValueError('CNTK Backend: the input of static rnn ' 'has shape", "dilation_rate is ' 'not supported.') if strides != (1, 1):", "batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): # The mean /", "dtype=None, name=None, seed=None): if dtype is None: dtype = floatx()", "!= C.InferredDimension and p != C.FreeDimension: return False return True", "_static_rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape =", "- 1) x = temporal_padding(x, (left_pad, 0)) padding = 'valid'", "get better performance.') # A learning phase is a bool", "hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])]) else:", "list): has_seq = False for a in axis: if isinstance(a,", "bias_dims == 1: shape = (bias.shape[0], 1, 1) else: shape", "shape) return C.to_batch(const_a) else: return C.user_function(ReshapeBatch(x, shape[1:])) def _get_cntk_version(): version", "= C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), [0, 0]]) else: x", "output_length, input_length * kernel_size) output = x_aggregate * weight #", "model. ' 'Please double check how the gradient node '", "with batch axis if b_any(_ == C.InferredDimension for _ in", "if pool_mode == 'max': x = C.pooling( x, C.MAX_POOLING, pool_size,", "= int_shape(x) # skip the batch axis for axis in", "- 1, len(y_shape) - 2] if b_any([isinstance(a, (list, tuple)) for", "None else _ for _ in new_shape] return C.reshape(x, new_shape)", "0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel,", "'channels_last': shape = list(output_shape) shape[0] = output_shape[2] shape[1] = output_shape[0]", "0, 1)) # shape: batch, filters, output_length, input_length * kernel_size", "and not has_seq_axis(inputs): num_time_step = inputs.shape[0] initial = [] for", "f_stats.append(C.unpack_batch(l_s)) else: f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0]))) else: f_stats.append(l_s) last_output._uses_learning_phase = uses_learning_phase return", "output = repeat_elements(output, width_factor, axis=3) return output else: raise ValueError('CNTK", "backend: The placeholder has been resolved ' 'to shape `%s`,", "stride_col = strides output_row, output_col = output_shape kernel_shape = int_shape(kernel)", "for s in shape] cntk_shape = tuple(cntk_shape) if dynamic_axis_num >", "np.random.binomial(1, p, size).astype(dtype).reshape(shape) return variable(value=binomial, dtype=dtype) def random_uniform(shape, minval=0.0, maxval=1.0,", "new_states[:len(states)] i += 1 i = 1 # add the", "padding=((1, 1), (1, 1), (1, 1)), data_format=None): assert len(padding) ==", "isinstance(axis, list): for a in axis: if isinstance(a, C.Axis) \\", "eval(value) shape = value.shape if hasattr(value, 'shape') else () if", "def set_learning_phase(value): global _LEARNING_PHASE if value not in {0, 1}:", "dimension instead of `None`.') return np.prod(int_shape(x)) def cast(x, dtype): #", "# avoid numerical instability with epsilon clipping output = C.clip(output,", "def square(x): return C.square(x) def abs(x): return C.abs(x) def sqrt(x):", "bias.shape elif dims == 2: if data_format == 'channels_first': if", "strides = (1,) + strides x = C.convolution(depthwise_kernel, x, strides=strides,", "C.relu(-x) x = C.relu(x) if max_value is not None: x", "C.variables.Parameter) or isinstance(x, C.variables.Constant)): if isinstance(value, (float, int)): value =", "if dim_ordering == 'channels_last': x = C.transpose(x, (1, 2, 3,", "= _get_dynamic_axis_num(x) for _ in sorted(_axis, reverse=True): del shape[_] new_shape", "= [] for s, p in zip(states, place_holders): past_values.append(C.sequence.past_value(p, s))", "the last dimension as channel dimension, # instead of the", "int): _axis.append(_ if _ >= 0 else _ + len(shape))", "with inferred dimension. ' 'The reshape did not take place.')", "= C.pooling( x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding]) else: raise ValueError('Invalid", "= True if m is not None: new_states = [C.element_select(m,", "C.reshape(result, shape=()) def conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None):", "axis=-1): shape = list(int_shape(x)) nones = _get_dynamic_axis_num(x) index = axis", "keepdims) def min(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output", "= strides pool_size = pool_size x = _preprocess_conv2d_input(x, data_format) if", "from collections import defaultdict from contextlib import contextmanager import warnings", "C.equal(x, y) def not_equal(x, y): return C.not_equal(x, y) def greater(x,", "hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), [0,", "None, C.cntk_py.Value(arguments.data()) def backward(self, state, root_gradients): return C.cntk_py.Value(root_gradients.data()) class ConvertToStatic(C.ops.functions.UserFunction):", "else: raise ValueError( 'CNTK backend: assign ops argument %s '", "= expand_dims(x, nones) return x def max(x, axis=None, keepdims=False): axis", "input_dict = {} for argument in self.metrics_func.arguments: if argument in", "super(LambdaFunc, self).__init__([arg], name=name) def infer_outputs(self): return [ C.output_variable( self.inputs[0].shape, self.inputs[0].dtype,", "update in updates: if isinstance(update, tuple): if len(update) != 2:", "in cntk. # They only evaluated in training phase. To", "(C.sqrt(var) + epsilon) * gamma + beta def concatenate(tensors, axis=-1):", "None: constants = [] num_time_step = shape[1] if num_time_step is", "= [axis] axis = _normalize_axis(axis, x) output = C.ops.argmax(x, axis=axis[0])", "[axis] shape = list(int_shape(x)) _axis = [] for _ in", "the keras shape history.' % (str(shape), nones)) # Current cntk", "return kernel def _preprocess_border_mode(padding): if padding == 'same': padding =", "not has_seq_axis(inputs) if go_backwards and need_convert is False: raise NotImplementedError('CNTK", "= _padding(x, padding[1], 2) return x def spatial_3d_padding(x, padding=((1, 1),", "kernel to output_filters first, to apply broadcast weight = permute_dimensions(kernel,", "'requested permute on dynamic axis, ' 'which is not supported.", "s in initial_states: if _get_dynamic_axis_num(s) == 0: if hasattr(C, 'to_batch'):", "seed=seed) def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if seed is", "x, strides=(1, 1, 1), auto_padding=[False]) else: if dilation_rate[0] != dilation_rate[1]:", "placeholder. # Arguments x: A candidate placeholder. # Returns Boolean.", "elif pool_mode == 'avg': x = C.pooling( x, C.AVG_POOLING, pool_size,", "in range(ndim_diff): condition = expand_dims(condition) condition = tile(condition, shape_expr[ndim_cond +", "variable * momentum + value * (1. - momentum)) def", "ops argument %s ' 'is not found in inputs. Please", "to CNTK static axis. We may introduce this operation in", "= expand_dims(outputs[0], 1) last_output = outputs[0] while i < len(outputs):", "x.dynamic_axes[1] == C.Axis.default_dynamic_axis() else: return False def get_num_dynamic_axis(x): return _get_dynamic_axis_num(x)", "the model and inputs in ' '`train_function`.' % argument.name) result", "randomness is conditioned by the Numpy RNG seed = np.random.randint(10e7)", "y, axis=axes[0], keepdims=True) return result if axes[0] == 1 else", "> 0: assert len(base_shape) == 3 if hasattr(C, 'pad'): x", "y = C.swapaxes(y, i, i - 1) i -= 1", "False: reduce_axes.append(a) return _reshape_dummy_dim(x, reduce_axes) else: if isinstance(axis, list): has_seq", "' 'has shape `%s`, the second axis ' 'is not", "kernel_size, strides, data_format=None): if data_format is None: data_format = image_data_format()", "= C.convolution_transpose( kernel, x, strides, auto_padding=[ False, padding, padding], output_shape=output_shape)", "if dtype is None: dtype = np.float32 else: dtype =", "_ is None else _ for _ in new_shape] return", "_padding(x, padding[1], 2) return x def spatial_3d_padding(x, padding=((1, 1), (1,", "else_expression): ndim_cond = ndim(condition) ndim_expr = ndim(then_expression) if ndim_cond >", "= 0 while i < len(shape): if shape[i] is None", "dims = len(shape) uses_learning_phase = False if dims < 3:", "version[2:].replace('.', '') try: return float(version) except: warnings.warn( 'CNTK backend warning:", "C.Axis.default_batch_axis() return [ C.output_variable( self.target_shape, self.inputs[0].dtype, [batch_axis])] def forward(self, arguments,", "= concatenate(xs, axis=1) # transpose kernel to put filters first", "will apply # the gradient during training. global grad_parameter_dict if", "is None: constants = [] num_time_step = shape[1] if num_time_step", "kernel def _preprocess_border_mode(padding): if padding == 'same': padding = True", "list(range(ndim(x)))[:-1]: normalized = batch_normalization( x, mean, variant, beta, gamma, epsilon)", "C.variables.Parameter, C.ops.functions.Function)) def shape(x): shape = list(int_shape(x)) num_dynamic = _get_dynamic_axis_num(x)", "data_format) def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None):", "def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): raise NotImplementedError", "== 'channels_last': shape = list(output_shape) shape[0] = output_shape[3] shape[1] =", "behaves like tf.batch_matmul as default axes = [len(x_shape) - 1,", "else: break shape = tuple([-1 for _ in range(num_dynamic_axis -", "new_shape = tuple(new_shape) x = C.reshape(x, new_shape) temp = [x]", "# cntk's result shape is (batch, 1), while keras expect", "uses_learning_phase return last_output, final_output, f_stats def has_seq_axis(x): return hasattr(x, 'dynamic_axes')", "in next release. if not self._is_input_shape_compatible(value, tensor): raise ValueError('CNTK backend:", "current_layout = tuple([i for i in range(dims)]) if num_dynamic_axis >", "assert len(padding[1]) == 2 assert len(padding[2]) == 2 if data_format", "zip(states, new_states): return_states.append( C.ops.element_select( mask_slice, n_s, s)) new_states = return_states", "result shape is (batch, 1), while keras expect (batch, )", "message=''): return C.user_function( LambdaFunc(x, when=lambda x: True, execute=lambda x: print(message)))", "or b_any( _ == C.FreeDimension for _ in x.shape): warnings.warn(", "for _ in x.shape: if _ == C.InferredDimension or _", "criterion]) elif len(u_ops) > 0: unrelated_updates.extend(u_ops) if len(unrelated_updates) > 0:", "metrics argument %s ' 'is not found in inputs. Please", "as symbolic op, to hook up with keras model, #", "2) x = _padding(x, padding[2], 3) else: assert len(base_shape) ==", "' + str(data_format)) stride_row, stride_col = strides output_row, output_col =", "- 2] if b_any([isinstance(a, (list, tuple)) for a in axes]):", "in different format if data_format == 'channels_last': shape = list(output_shape)", "axis = list(pattern) axis = axis[num_dynamic_axis:] axis = _normalize_axis(axis, x)", "+ 1) mask_slice = squeeze(mask_slice, time_axis) if len(outputs) == 0:", "outputs_to_retain=None): return None, C.cntk_py.Value(arguments.data()) def backward(self, state, root_gradients): return C.cntk_py.Value(root_gradients.data())", "seed = np.random.randint(10e7) np.random.seed(seed) if dtype is None: dtype =", "if mask is not None and not has_seq_axis(mask): if go_backwards:", "1, we will go with dynamic learning phase tensor. _LEARNING_PHASE", "> 2: permutation = [len(y_shape) - 2] permutation += list(range(len(y_shape)", "C.ops.gather(reference, indices) else: num_classes = reference.shape[0] one_hot_matrix = C.ops.one_hot(indices, num_classes)", "x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1])]) else:", "it is not needed # in cntk, need to remove", "axis: if isinstance(a, C.Axis): has_seq = True break if has_seq:", "def logsumexp(x, axis=None, keepdims=False): return log(sum(exp(x), axis=axis, keepdims=keepdims)) def var(x,", "_LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER def set_learning_phase(value):", "len(shape) uses_learning_phase = False if dims < 3: raise ValueError('Input", "\"\"\" global _LEARNING_PHASE global _LEARNING_PHASE_PLACEHOLDER _LEARNING_PHASE = -1 _LEARNING_PHASE_PLACEHOLDER.value =", "in zip(new_states, past_values)] n_s = [] for o, p in", "ValueError('Unexpectedly found an instance of type `' + str(type(x)) +", "arg: True, execute=lambda arg: print(arg), name=''): self.when = when self.execute", "def mean(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output =", "x._uses_learning_phase = uses_learning_phase return x else: # if _LEARNING_PHASE is", "len(unrelated_updates) > 0: self.unrelated_updates = C.combine([_.output for _ in unrelated_updates])", "shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension) > 1: result = x", "str(value.shape))) feed_dict[tensor] = value updated = [] if self.trainer is", "1: x = C.swapaxes(x, i, i + 1) i +=", "map(C.stop_gradient, variables) else: return C.stop_gradient(variables) def switch(condition, then_expression, else_expression): ndim_cond", "isinstance( x, C.variables.Parameter) or isinstance( x, C.variables.Constant): return x.value else:", "+ strides x = C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding],", "first need to padding the shape if num_dynamic_axis >= len(shape):", "remove dummy dimension current = squeeze(current, 1) output, new_states =", "if shift is None: shift = x # Compute true", "of batch axis with inferred dimension. ' 'The reshape did", "variables = [variables] grads = [] for v in variables:", "your sequences.') rnn_inputs = inputs if need_convert: if go_backwards: rnn_inputs", "else _ for _ in shape]) if isinstance(x, C.variables.Parameter): return", "self).__init__([arg], name=name) def infer_outputs(self): return [ C.output_variable( self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)]", "We have made a fix but not catched in CNTK", "C.reshape(result, shape=(), begin_axis=index, end_axis=index + 1) return result else: for", "filters, row, col output = reshape(output, (-1, filters, output_row, output_col))", "and inputs.' % argument.name) # Some ops (like dropout) won't", "= _convert_string_dtype(dtype) return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name) def ones(shape, dtype=None,", "outputs_to_retain=None): num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape)) num_static_element = np.prod(np.asarray(self.target_shape)) num_batch", "dim_ordering == 'channels_last': x = C.transpose(x, (1, 2, 3, 0))", "else: # if _LEARNING_PHASE is static if isinstance(training, int) or", "list(range(len(y_shape) - 2)) permutation += [len(y_shape) - 1] y =", "_LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0) def in_train_phase(x, alt, training=None): global _LEARNING_PHASE if", "don't support cond op, so here we use # element_select", "1, 1), auto_padding=[False]) else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK", "return _remove_dims(output, axis, keepdims) def prod(x, axis=None, keepdims=False): axis =", "or isinstance(x, C.variables.Parameter): return x.value else: raise ValueError('CNTK Backend: `eval`", "def min(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output =", "with ' 'variable-length sequences. Please specify a ' 'static length", "m) return mean(devs_squared, axis=axis, keepdims=keepdims) def std(x, axis=None, keepdims=False): return", "less' ' than or equal to rank of then and'", "return _reshape_dummy_dim(output, axis) def argmin(x, axis=-1): axis = [axis] axis", "else _ + len(shape)) if len(_axis) == 0: return x", "base_shape = x.shape if num_dynamic_axis > 0: assert len(base_shape) ==", "while i > 0: y = C.swapaxes(y, i, i -", "padding[2], 2) else: assert len(base_shape) == 5 if hasattr(C, 'pad'):", "least rank 3 to run RNN.' % dims) if _get_dynamic_axis_num(inputs)", "'Please provide fixed dimension ' 'instead of `None`.') # how", "_reshape_dummy_dim(output, axis) def argmin(x, axis=-1): axis = [axis] axis =", "# remove dummy dimension current = squeeze(current, time_axis) output, new_states", "+= 1 i = normalized_axis[1] while i > 0: y", "ndim(gamma) == ndim(x) and shape(gamma)[0] == 1: gamma = _reshape_dummy_dim(gamma,", "as symbolic op, # to hook up with keras model", "= tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x", "cntk_axes = _normalize_axis(axes, x) begin_index = [0 for _ in", "dimensions are not supported. ' + 'Expected: None, int, (int,", "to 1 output /= C.reduce_sum(output, axis=-1) # avoid numerical instability", "dynamic_axis_index) if dynamic_axis_index < nones: i = 0 while dynamic_axis_index", "' + 'Provided: ' + str(axes)) if len(x_shape) == 2", "(batch, filters, output_length, input_length * kernel_size) output = x_aggregate *", "elif dtype == np.float64: return 'float64' else: raise ValueError('CNTK Backend:", "keepdims=False): m = mean(x, axis, keepdims=True) devs_squared = C.square(x -", "to parameter grad_parameter_dict = {} NAME_SCOPE_STACK = [] @contextmanager def", "on the format `(rows, cols, input_depth, depth)`, # independently of", "= updates if len(updates) > 0: assert len(outputs) > 0", "_ < 0 else _ for _ in axis] if", "-= alpha * negative_part return x def dropout(x, level, noise_shape=None,", "supported, at least ' '%d dimensions are needed.' % (len(cntk_shape,", "constants = [] num_time_step = shape[1] if num_time_step is None", "def infer_outputs(self): return [ C.output_variable( self.target_shape, self.inputs[0].dtype, [])] def forward(self,", "updates=[], **kwargs): self.placeholders = inputs self.trainer = None self.unrelated_updates =", "raise NotImplementedError def stop_gradient(variables): if isinstance(variables, (list, tuple)): return map(C.stop_gradient,", "C.transpose(x, axis) def resize_images(x, height_factor, width_factor, data_format): if data_format ==", "strides # cntk output_shape does not include batch axis output_shape", "' 'rnn with non-static axis, please try ' 'dynamic rnn", "if nones > 0 else 1 if go_backwards: i =", "shape[1] - 1 while i >= 0: current = C.ops.slice(inputs,", "axis) def resize_images(x, height_factor, width_factor, data_format): if data_format == 'channels_first':", "pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x =", "1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)): if data_format is None:", "{'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) stride =", "2) x = _padding(x, padding[2], 3) return x def one_hot(indices,", "(left_pad, 0)) padding = 'valid' if data_format == 'channels_last': x", "to remove those dummy axis. if ndim(mean) == ndim(x) and", "CNTK native implementation later. # Arguments inputs: a cntk tensor", "input_depth) x = C.transpose(x, (2, 0, 1)) return x def", "len(n) < len(shape): n = tuple([1 for _ in range(len(shape)", "# need to be fixed in GA. if n is", "0], list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0], 2) x", "dynamic_axis_num))) if name is None: name = '' cntk_shape =", "= tuple([1 for _ in range(len(shape) - len(n))]) + n", "* momentum + value * (1. - momentum)) def update_add(x,", "ndim_expr: shape_expr = int_shape(then_expression) ndim_diff = ndim_expr - ndim_cond for", "= _convert_string_dtype(dtype) return variable(value=np.ones(shape, ctype), dtype=dtype, name=name) def eye(size, dtype=None,", "x def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): if data_format", "- 2] permutation += list(range(len(y_shape) - 2)) permutation += [len(y_shape)", "== 2 if hasattr(C, 'pad'): x = C.pad(x, pattern=[padding, (0,", "== 1: shape = (bias.shape[0], 1, 1) else: shape =", "len(y_shape) == 2: if axes[0] == axes[1]: result = sum(x", "alpha=0., max_value=None): if alpha != 0.: negative_part = C.relu(-x) x", "def softplus(x): return C.softplus(x) def softsign(x): return x / (1", "C.reduce_mean(gamma, axis - 1) beta = C.reduce_mean(beta, axis - 1)", "padding == 'valid': padding = False else: raise ValueError('Invalid border", "unroll, input_length) if constants is None: constants = [] num_time_step", "axis=axis[0]) return _reshape_dummy_dim(output, axis) def argmin(x, axis=-1): axis = [axis]", "with ' 'dynamic shape is not supported now. ' 'Please", "x._keras_shape shape = x.shape if hasattr(x, 'dynamic_axes'): dynamic_shape = [None", "seed=None): if dtype is None: dtype = floatx() if seed", "keepdims=keepdims) all_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) ==", "def batch_get_value(xs): result = [] for x in xs: if", "sorted(_axis, reverse=True): del shape[index] shape = [C.InferredDimension if _ ==", "all the . except the first one. if len(version) >", "+ increment return C.assign(x, result) def gradients(loss, variables): # cntk", "list(output_shape) shape[0] = output_shape[3] shape[1] = output_shape[0] shape[2] = output_shape[1]", "# remove dummy dimension current = squeeze(current, 1) output, new_states", "supported.') if strides != (1, 1): raise ValueError('Invalid strides for", "callable(x) and isinstance(x, C.cntk_py.Function) is False: x = x() if", "= shape[1] if num_time_step is None and not has_seq_axis(inputs): num_time_step", "ndim: shape = tuple([None for _ in range(ndim)]) dynamic_dimension =", "= C.clip(output, epsilon(), 1.0 - epsilon()) return -sum(target * C.log(output),", "mean(x, axis, keepdims=True) devs_squared = C.square(x - m) return mean(devs_squared,", "reverse=True): del shape[_] new_shape = shape[nones:] new_shape = tuple([C.InferredDimension if", "is None: training = learning_phase() uses_learning_phase = True else: uses_learning_phase", "in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] =", "padding = _preprocess_border_mode(padding) strides = strides + (strides[0],) x =", "{} for argument in self.metrics_func.arguments: if argument in feed_dict: input_dict[argument]", "max_value=None): if alpha != 0.: negative_part = C.relu(-x) x =", "concatenate(tensors, axis=-1): if len(tensors) == 0: return None axis =", "value = value.astype(np.float32) if tensor == _LEARNING_PHASE_PLACEHOLDER: _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value)", "TF uses the last dimension as channel dimension, # instead", "def gather(reference, indices): # There is a bug in cntk", "s in zip(new_states, past_values)] n_s = [] for o, p", "== C.FreeDimension else _ for _ in shape] return C.reshape(x,", "shape] return C.reshape(x, shape) def mean(x, axis=None, keepdims=False): axis =", "data_format) padding = _preprocess_border_mode(padding) strides = strides + (strides[0],) x", "* transpose(y), axis=axes[0], keepdims=True) else: if len(y_shape) == 2: y", "alt) result._uses_learning_phase = uses_learning_phase return result def in_test_phase(x, alt, training=None):", "not supported. ' + 'Expected: None, int, (int, int), '", "= len(x.shape) if dims > 0 and x.shape[0] == C.InferredDimension:", "def reshape(x, shape): shape = tuple([C.InferredDimension if _ == C.FreeDimension", "= -target * C.log(output) - (1.0 - target) * C.log(1.0", "time_axis, i, i + 1) # remove dummy dimension current", "== ndim(x) and shape(beta)[0] == 1: beta = _reshape_dummy_dim(beta, [0])", "= _padding(x, padding[1], 2) x = _padding(x, padding[2], 3) else:", "x = C.convolution( kernel, x, strides, auto_padding=[ False, padding, padding])", "reshape(x, (-1,)) def reshape(x, shape): shape = tuple([C.InferredDimension if _", "how to apply mean and stddev return random_normal_variable(shape=shape, mean=mean, scale=1.0,", "= batch_normalization( x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normalized,", "name is None: name = '%s_alias' % x.name return C.alias(x,", "pattern) axis = list(pattern) axis = axis[num_dynamic_axis:] axis = _normalize_axis(axis,", "= _get_dynamic_axis_num(x) base_shape = x.shape if num_dynamic_axis > 0: assert", "random_binomial(shape, p=0.0, dtype=None, seed=None): # use numpy workaround now if", "# As of Keras 2.0.0, all kernels are normalized #", "== 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0],", "= bias.shape return x + reshape(bias, shape) def eval(x): if", "C.abs(x) def pow(x, a): return C.pow(x, a) def clip(x, min_value,", "dims == 2: if data_format == 'channels_first': if bias_dims ==", "self.trainer_output) assert(len(result) == 2) outputs = result[1] for o in", "std(x, axis=None, keepdims=False): return C.sqrt(var(x, axis=axis, keepdims=keepdims)) def expand_dims(x, axis=-1):", "keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_prod')", "_ for _ in new_shape] return C.reshape(x, new_shape) def permute_dimensions(x,", "cntk_shape = tuple(cntk_shape) if dynamic_axis_num > len(cntk_shape): raise ValueError('CNTK backend:", "name = '' cntk_shape = cntk_shape[dynamic_axis_num:] x = C.input( shape=cntk_shape,", "float(2.0) class ReshapeBatch(C.ops.functions.UserFunction): def __init__(self, input, shape, name='reshape_with_batch'): super(ReshapeBatch, self).__init__([input],", "x = _preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) padding =", "def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if dtype is None:", "constants is None: constants = [] if mask is not", "output_values = self.metrics_func.forward( input_dict, self.metrics_func.outputs, (self.metrics_func.outputs[0],), as_numpy=False) else: output_values =", "LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase _LEARNING_PHASE_PLACEHOLDER =", "mean(devs_squared, axis=axis, keepdims=keepdims) def std(x, axis=None, keepdims=False): return C.sqrt(var(x, axis=axis,", "0) else: assert len(base_shape) == 3 if hasattr(C, 'pad'): x", "in constant: if _get_dynamic_axis_num(c) == 1: new_c.append(C.sequence.broadcast_as(c, rnn_inputs)) else: new_c.append(c)", "C.convolution( kernel, x, strides=tuple(strides), auto_padding=[ False, padding]) if data_format ==", "inputs): global _LEARNING_PHASE_PLACEHOLDER global _LEARNING_PHASE assert isinstance(inputs, (list, tuple)) feed_dict", "height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output elif", "name = '' if isinstance( value, C.variables.Constant) or isinstance( value,", "= C.convolution( kernel, x, strides=tuple(strides), auto_padding=[ False, padding]) if data_format", "self.unrelated_updates = C.combine([_.output for _ in unrelated_updates]) if self.trainer is", "n is C.InferredDimension or n is C.FreeDimension: return x index", "0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(all_matrix) else: return all_matrix", "output_filters first, to apply broadcast weight = permute_dimensions(kernel, (2, 0,", "else alt else: result = C.element_select(training, x, alt) result._uses_learning_phase =", "padding], groups=x.shape[0]) else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend:", "return C.log(x) def round(x): return C.round(x) def sigmoid(x): return C.sigmoid(x)", "else: xs.append(reshape(inputs[:, slice_row, slice_col, :], (-1, 1, feature_dim))) x_aggregate =", "= _normalize_axis(axis, x) norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0])) return x /", "issue, will resolve it later with cntk cond op. if", "in x.shape dim = np.prod(x.shape) x = C.reshape(x, (-1,)) x._keras_shape", "0], list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0], 1) x", "test mode (learning_phase == 0). # LEARNING_PHASE_PLACEHOLDER is the placeholder", "execute super(LambdaFunc, self).__init__([arg], name=name) def infer_outputs(self): return [ C.output_variable( self.inputs[0].shape,", "past_values)] n_s = [] for o, p in zip(new_states, place_holders):", "'Please double check the model and inputs in ' '`train_function`.'", "all_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) == 0", "> axis: gamma = C.reduce_mean(gamma, axis - 1) beta =", "axis is static axis, CNTK will do unroll by default", "tuple)) for a in axes]): raise ValueError('Multiple target dimensions are", "as np from .common import floatx, epsilon, image_dim_ordering, image_data_format from", "op in next release if _get_cntk_version() >= 2.2: return C.ops.gather(reference,", "n is inferred dimension, # we can't figure out how", "= C.trainer.Trainer( outputs[0], criterion, [learner]) self.trainer_output = tuple([f.output for f", "= tuple(new_shape) x = C.reshape(x, new_shape) temp = [x] *", "% argument.name) self.unrelated_updates.eval(input_dict, as_numpy=False) return updated def function(inputs, outputs, updates=[],", "1 i = normalized_axis[1] while i > 0: y =", "1) mask_slice = squeeze(mask_slice, 1) if len(outputs) == 0: prev_output", "CNTK version not detected. ' 'Will using CNTK 2.0 GA", "= repeat_elements(output, width_factor, axis=3) return output else: raise ValueError('CNTK Backend:", "Numpy RNG seed = np.random.randint(10e7) np.random.seed(seed) if dtype is None:", "x = _padding(x, padding[2], 3) else: assert len(base_shape) == 5", "'found gradient node `%s` which is not ' 'related to", "kernel_size[1]) if data_format == 'channels_first': xs.append(reshape(inputs[:, :, slice_row, slice_col], (-1,", "(1, bias.shape[0]) else: shape = bias.shape else: shape = bias.shape", "max_value is not None: x = C.clip(x, 0.0, max_value) if", "axis if axis >= 0 else len(shape) + 1 shape.insert(index,", "is None: dtype = floatx() return variable(np.eye(size), dtype, name) def", "' 'Will using CNTK 2.0 GA as default.') return float(2.0)", "'max': x = C.pooling( x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding]) elif", "gradient node `%s` which is not ' 'related to any", "x = C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) x", "Numpy RNG seed = np.random.randint(10e3) if dtype is None: dtype", "bias.shape[:2] elif data_format == 'channels_last': if bias_dims == 1: shape", "< 3: raise ValueError('CNTK Backend: the input of rnn has", "num_dynamic_axis) i += 1 return x def _normalize_axis(axis, x): shape", "feed_dict[tensor] = value updated = [] if self.trainer is not", "dtype=None, name=None): return x * 0 def ones_like(x, dtype=None, name=None):", "'') try: return float(version) except: warnings.warn( 'CNTK backend warning: CNTK", "update if len(u.arguments) == 0: u_ops.append(u) else: unrelated_updates.append(u) update_func =", "= [] time_axis = 1 - nones if nones >", "increment return C.assign(x, result) def gradients(loss, variables): # cntk does", "_padding(x, padding[2], 3) return x def one_hot(indices, num_classes): return C.one_hot(indices,", "only running with float, # try to cast to float", "shape is None: shape = () np_value = value *", "convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding], groups=x.shape[0])", "for f in outputs] self.metrics_func = C.combine(self.metrics_outputs) # cntk only", "\"forward\" method to let cntk know we want to evaluate", "device=None, outputs_to_retain=None): num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape)) num_static_element = np.prod(np.asarray(self.target_shape))", "if isinstance(value, (float, int)): value = np.full(x.shape, value, dtype=floatx()) x.value", "x = C.reshape(x, (-1,)) x._keras_shape = (None, dim) return x", "\"\"\" if dtype is None: dtype = floatx() if name", "padding == 'causal': # causal (dilated) convolution: left_pad = dilation_rate", "0 and shape[0] == -1: # collapse axis with batch", "if go_backwards: mask = reverse(mask, 1) if len(int_shape(mask)) == 2:", "conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None): if data_format", "input first axis to CNTK batch axis. We may introduce", "type is not supported. ' 'CNTK only supports `eval` with", "1) x = _padding(x, padding[1], 2) x = _padding(x, padding[2],", "def update(x, new_x): return C.assign(x, new_x) def moving_average_update(variable, value, momentum):", "hasattr(x, '_cntk_placeholder') and x._cntk_placeholder def is_keras_tensor(x): if not is_tensor(x): raise", "C.FreeDimension: return x index = 1 - _get_dynamic_axis_num(x) if index", "None: x = C.clip(x, 0.0, max_value) if alpha != 0.:", "int_shape(inputs) dims = len(shape) global uses_learning_phase uses_learning_phase = False if", "zeros_like(mean) elif ndim(beta) == ndim(x) and shape(beta)[0] == 1: beta", "slices = [] shape = x.shape i = 0 while", "'float32': return np.float32 elif dtype == 'float64': return np.float64 else:", "padding = _preprocess_border_mode(padding) strides = (1,) + strides # cntk", "len(padding[0]) == 2 assert len(padding[1]) == 2 assert len(padding[2]) ==", "== axes[1]: result = sum(x * y, axis=axes[0], keepdims=True) return", "= feed_dict[argument] else: raise ValueError( 'CNTK backend: assign ops argument", "'same': padding = True elif padding == 'valid': padding =", "bias_dims != 1 and bias_dims != dims: raise ValueError('Unexpected bias", "padding], groups=x.shape[0]) return _postprocess_conv2d_output(x, data_format) def conv3d(x, kernel, strides=(1, 1,", "have an extra batch axis with 1, it is not", "strides=dilation_rate[0], auto_padding=[ False, padding, padding]) return _postprocess_conv2d_output(x, data_format) def separable_conv1d(x,", "- 2)) permutation += [len(y_shape) - 1] y = C.transpose(y,", "dynamic_axis_index += 1 while i < len(cntk_axis): cntk_axis[i] -= nones", "new_output, new_states = step_function( x, tuple(past_values) + tuple(rnn_constants)) if getattr(new_output,", "in initial_states: if _get_dynamic_axis_num(s) == 0: if hasattr(C, 'to_batch'): initial.append(C.to_batch(s))", "= value else: raise NotImplementedError def stop_gradient(variables): if isinstance(variables, (list,", "training=None): global _LEARNING_PHASE if training is None: training = learning_phase()", "str(type(x)) + '`. ' 'Expected a symbolic tensor instance.') return", "else _ for _ in new_shape]) return C.reshape(x, new_shape) def", "def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape", "if from_logits: output = C.sigmoid(output) output = C.clip(output, epsilon(), 1.0", "input_dict = {} for argument in self.loss.arguments: if argument in", "# return the same x to take cntk broadcast feature", "of `None`.') return np.prod(int_shape(x)) def cast(x, dtype): # cntk calculate", "# add the time_step axis back output_slice = expand_dims(outputs[i], 1)", "begin_axis=index, end_axis=index + 1) return result else: for index in", "backend: when constructing trainer, ' 'found gradient node `%s` which", "_ in axis: if isinstance(_, int): _axis.append(_ if _ >=", "import numpy as np from .common import floatx, epsilon, image_dim_ordering,", "[C.sequence.last(s) for s in final_states] if need_convert: final_output = C.sequence.unpack(final_output,", "is not supported, at least ' '%d dimensions are needed.'", "or unroll: return _static_rnn( step_function, inputs, initial_states, go_backwards, mask, constants,", "== 1: shape = (bias.shape[0], 1) else: shape = (bias.shape[1],)", "'/' + name def constant(value, dtype=None, shape=None, name=None): if dtype", "self.when = when self.execute = execute super(LambdaFunc, self).__init__([arg], name=name) def", "keras expect (batch, ) return C.reshape(result, ()) else: # scale", "in_top_k(predictions, targets, k): _targets = C.one_hot(targets, predictions.shape[-1]) result = C.classification_error(predictions,", "ValueError('CNTK Backend: Invalid data_format:', data_format) def resize_volumes(x, depth_factor, height_factor, width_factor,", "else _ for _ in shape] return C.reshape(x, shape) def", "data_format) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides", "(hasattr(value, 'dtype') and value.dtype != np.float32 and value.dtype != np.float64):", "C.default_options(axis_offset=1): def _recurrence(x, states, m): # create place holder place_holders", "if hasattr(value, 'shape') else () if hasattr(value, 'dtype') and value.dtype", "is_tensor(x): raise ValueError('Unexpectedly found an instance of type `' +", "C.reshape(x, new_shape) def permute_dimensions(x, pattern): dims = len(int_shape(x)) num_dynamic_axis =", "bias.shape else: shape = bias.shape return x + reshape(bias, shape)", "= _reduce_on_axis(x, axis, 'reduce_min') return _remove_dims(output, axis, keepdims) def sum(x,", "reduce_fun_name): if isinstance(axis, list): for a in axis: if isinstance(a,", "None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) if name", "a variable and returns it. # Arguments value: Numpy array,", "= shape[1] - 1 while i >= 0: current =", "RNN.' % dims) if _get_dynamic_axis_num(inputs) == 0 or unroll: return", "binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape) return variable(value=binomial, dtype=dtype) def random_uniform(shape,", "condition = tile(condition, shape_expr[ndim_cond + i]) return C.element_select(condition, then_expression, else_expression)", "will go with dynamic learning phase tensor. _LEARNING_PHASE = -1", "= new_states i -= 1 else: i = 0 while", "zip(self.placeholders, inputs): # cntk only support calculate on float, do", "cntk only running with float, # try to cast to", "scale, seed=seed), dtype=dtype, name=name) return variable(value=p.value + low + scale)", "2: if data_format == 'channels_first': if bias_dims == 1: shape", "rnn_constants.append(constant) else: rnn_constants = constants if mask is not None", "'%s_alias' % x.name return C.alias(x, name=name) def _preprocess_conv2d_input(x, data_format): if", "= _reshape_dummy_dim(beta, [0]) return (x - mean) / (C.sqrt(var) +", "result def set_value(x, value): if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)):", "= C.combine(self.metrics_outputs) # cntk only could handle loss and 1", "= {} for argument in self.unrelated_updates.arguments: if argument in feed_dict:", "if go_backwards: rnn_inputs = reverse(rnn_inputs, 1) rnn_inputs = C.to_sequence(rnn_inputs) rnn_constants", "!= C.Axis.default_batch_axis() \\ and hasattr(C.sequence, reduce_fun_name): x = getattr(C.sequence, reduce_fun_name)(x,", "def elu(x, alpha=1.): res = C.elu(x) if alpha == 1:", "x, axis=axis) base_shape = x.shape if pattern[1] > 0: postfix_shape", "(float, int)): value = np.full(x.shape, value, dtype=floatx()) x.value = value", "GA as default.') return float(2.0) class ReshapeBatch(C.ops.functions.UserFunction): def __init__(self, input,", "len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(all_matrix) else:", "permute_dimensions(x, pattern): dims = len(int_shape(x)) num_dynamic_axis = _get_dynamic_axis_num(x) if isinstance(pattern,", "if len(u_list) > 0: learner = C.cntk_py.universal_learner(p_list, u_list, update_func) criterion", "else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1],", "is None: _axis = C.Axis.all_axes() return _axis def _reshape_dummy_dim(x, axis):", "_convert_string_dtype(dtype) return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name) def ones(shape, dtype=None, name=None):", "or isinstance( value, C.variables.Parameter): value = value.value # we don't", "node. \"\"\" def __init__(self, input, batch_size, name='convert_to_static'): super(ConvertToStatic, self).__init__([input], as_numpy=False,", "call # \"forward\" method to let cntk know we want", "Backend: tensor with keras shape: `%s` has ' '%d cntk", "for _ in x.shape) or b_any( _ == C.FreeDimension for", "`None`.') return random_uniform_variable(shape, minval, maxval, dtype, seed) def random_uniform_variable(shape, low,", "def flatten(x): return reshape(x, (-1,)) def reshape(x, shape): shape =", "Backend: Invalid data_format:', data_format) def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):", "= ( outputs[0], outputs[1]) if len(outputs) > 1 else (", "_get_cntk_version() >= 2.2: const_a = C.unpack_batch(x) const_a = C.reshape(const_a, shape)", "padding='valid', data_format=None, dilation_rate=1): if data_format is None: data_format = image_data_format()", "keep_dims=False): _axes = tuple(axes) if shift is None: shift =", "0, 1)) return kernel def _preprocess_border_mode(padding): if padding == 'same':", "< nones: i = 0 while dynamic_axis_index < nones: cntk_axis[i]", "axes is None: # behaves like tf.batch_matmul as default axes", "raise ValueError('Unknown data_format ' + str(data_format)) dims = len(x.shape) if", "in enumerate(_axis): if a is not None and a <", "assert len(padding) == 3 assert len(padding[0]) == 2 assert len(padding[1])", "name string for the tensor. constraint: Optional projection function to", "1)) # Shape: (batch, filters, output_length, input_length * kernel_size) output", "crash. # We have made a fix but not catched", "be less' ' than or equal to rank of then", "1, len(y_shape) - 2] if b_any([isinstance(a, (list, tuple)) for a", "isinstance(axis, tuple): axis = list(axis) if not isinstance(axis, list): axis", "gamma = C.reduce_mean(gamma, axis - 1) beta = C.reduce_mean(beta, axis", "when=lambda arg: True, execute=lambda arg: print(arg), name=''): self.when = when", "reduce_fun_name)(x, a) else: x = getattr(C, reduce_fun_name)(x, axis) return x", "# shape: batch, filters, row, col output = reshape(output, (-1,", "reshape(x, shape): shape = tuple([C.InferredDimension if _ == C.FreeDimension else", "new_shape) def permute_dimensions(x, pattern): dims = len(int_shape(x)) num_dynamic_axis = _get_dynamic_axis_num(x)", "return C.swapaxes(x, 0, 1) def gather(reference, indices): # There is", "width_factor, axis=3) return output else: raise ValueError('CNTK Backend: Invalid data_format:',", "variable(value=binomial, dtype=dtype) def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): for _", "`None`.') return np.prod(int_shape(x)) def cast(x, dtype): # cntk calculate everything", "' + str(data_format)) if padding == 'causal': # causal (dilated)", "False, padding, padding]) return _postprocess_conv2d_output(x, data_format) def separable_conv1d(x, depthwise_kernel, pointwise_kernel,", "= cntk_shape[dynamic_axis_num:] x = C.input( shape=cntk_shape, dtype=_convert_string_dtype(dtype), is_sparse=sparse, name=name) x._keras_shape", "output = C.sigmoid(output) output = C.clip(output, epsilon(), 1.0 - epsilon())", "from contextlib import contextmanager import warnings C.set_global_option('align_axis', 1) b_any =", "beta def concatenate(tensors, axis=-1): if len(tensors) == 0: return None", "% pattern) axis = list(pattern) axis = axis[num_dynamic_axis:] axis =", "_padding(x, padding[1], 2) x = _padding(x, padding[2], 3) return x", "= tuple(shape) x = C.convolution_transpose( kernel, x, strides, auto_padding=[ False,", "by default, so don't need reshape on it reduce_axes =", "= [] for s, n_s in zip(states, new_states): return_states.append( C.ops.element_select(", "devs_squared = C.square(x - m) return mean(devs_squared, axis=axis, keepdims=keepdims) def", "len(u.arguments) == 0: u_ops.append(u) else: unrelated_updates.append(u) update_func = C.combine([u.output for", "in range(output_col): slice_row = slice(i * stride_row, i * stride_row", "== 2 assert len(padding[2]) == 2 if data_format is None:", "return kernel def _postprocess_conv3d_output(x, dim_ordering): if dim_ordering == 'channels_last': x", "1) # remove dummy dimension current = squeeze(current, 1) output,", "evaluate them.from # But the assign ops won't be executed", "len(y_shape) - 2] if b_any([isinstance(a, (list, tuple)) for a in", "float, do auto cast here if (hasattr(value, 'dtype') and value.dtype", "if _ is None else _ for _ in new_shape]", "return x nones = _get_dynamic_axis_num(x) for _ in sorted(_axis, reverse=True):", "ValueError('Rank of condition should be less' ' than or equal", "return result def squeeze(x, axis): if isinstance(axis, tuple): axis =", "if padding == 'same': padding = True elif padding ==", "self.trainer_output: updated.append(outputs[o]) if self.metrics_func is not None: input_dict = {}", "'dynamic_axes'): dynamic_shape = [None for a in x.dynamic_axes] shape =", "C.variables.Constant) or isinstance(x, C.variables.Parameter): return x.value else: raise ValueError('CNTK Backend:", "if s is None else s for s in shape]", "kernel_size[0]) xs.append(reshape(inputs[:, slice_length, :], (-1, 1, feature_dim))) x_aggregate = concatenate(xs,", "`None`.') size *= _ binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape) return", "else_expression) def elu(x, alpha=1.): res = C.elu(x) if alpha ==", "name=None): if dtype is None: dtype = floatx() if shape", "isinstance(n, list): n = tuple(n) shape = int_shape(x) num_dynamic_axis =", "= C.transpose(x, (2, 0, 1)) return x def _preprocess_conv2d_kernel(kernel, data_format):", "= C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1])]) else: x", "kernel, x, strides, auto_padding=[ False, padding, padding, padding], output_shape=output_shape) return", "feature_dim))) x_aggregate = concatenate(xs, axis=1) # transpose kernel to put", "are not supported. ' + 'Expected: None, int, (int, int),", "int64 # https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter dtype = 'float32' if 'int' in str(dtype)", "p_list.append(grad_parameter_dict[g]) u_list.append(g) else: raise ValueError( 'CNTK backend: when constructing trainer,", "'Need at least rank 3 to run RNN.' % dims)", "def in_train_phase(x, alt, training=None): global _LEARNING_PHASE if training is None:", "not support ' 'collapse of batch axis with inferred dimension.", "x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) return _postprocess_conv2d_output(x,", "implementation later. # Arguments inputs: a cntk variable (parameter/constant) name:", "= C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])]) else: x = _padding(x,", "supported. Please give fixed ' 'dimension to enable padding.' %", "handle output shape in different format if data_format == 'channels_last':", "axis, 'reduce_prod') return _remove_dims(output, axis, keepdims) def logsumexp(x, axis=None, keepdims=False):", "output = _reduce_on_axis(x, axis, 'reduce_mean') return _remove_dims(output, axis, keepdims) def", "in u_ops]) grads = update_func.find_all_with_name('keras_grad_placeholder') u_list = [] p_list =", "'CNTK backend: assign ops argument %s ' 'is not found", "1.0 - epsilon()) return -sum(target * C.log(output), axis=-1) def sparse_categorical_crossentropy(target,", "@staticmethod def _is_input_shape_compatible(input, placeholder): if hasattr(input, 'shape') and hasattr(placeholder, 'shape'):", "to repeat it in cntk now # return the same", "shape is `%s`. Currently ' 'CNTK can not take variable", "batch axis with inferred dimension. ' 'The reshape did not", "= 0 for i in range(ndim): if shape[i] is None", ") return C.reshape(result, ()) else: # scale preds so that", "== C.InferredDimension: dims -= 1 bias_dims = len(bias.shape) if bias_dims", "value = value.value # we don't support init parameter with", "const def random_binomial(shape, p=0.0, dtype=None, seed=None): # use numpy workaround", "target_shape) broadcast_var = C.reshape(variant, target_shape) broadcast_gamma = C.reshape(gamma, target_shape) broadcast_beta", "dilated convolution' x = C.convolution( kernel, x, strides=dilation_rate[0], auto_padding=[ False,", "value) _LEARNING_PHASE = value def clear_session(): \"\"\"Reset learning phase flag", "at least ' '%d dimensions are needed.' % (len(cntk_shape, dynamic_axis_num)))", "= [C.element_select(m, n, s) for n, s in zip(new_states, past_values)]", "name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads def equal(x, y):", "is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) if", "def resize_images(x, height_factor, width_factor, data_format): if data_format == 'channels_first': output", "= None @staticmethod def _is_input_shape_compatible(input, placeholder): if hasattr(input, 'shape') and", "this tricky after we add support # in native cntk", "= _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_max') return _remove_dims(output,", "index = axis if axis >= 0 else len(shape) +", "for _ in new_shape]) result = C.reshape(x, new_shape) if index", "C.FreeDimension: final_output = _reshape_sequence(final_output, num_time_step) f_stats = [] for l_s,", "0.0, max_value) if alpha != 0.: x -= alpha *", "avoid numerical instability with epsilon clipping output = C.clip(output, epsilon(),", "x.name return C.alias(x, name=name) def _preprocess_conv2d_input(x, data_format): if data_format ==", "in x.shape): warnings.warn( 'Warning: CNTK backend does not support '", "if 'int' in str(dtype) else dtype v = C.parameter(shape=shape, init=value,", "not None and a < 0: _axis[i] = (a %", "in self.metrics_outputs: updated.append(v) if self.unrelated_updates is not None: input_dict =", "= slice(i * stride_row, i * stride_row + kernel_size[0]) slice_col", "C.variables.Parameter) or isinstance( x, C.variables.Constant): return x.value else: return eval(x)", "= output_shape[3] shape[1] = output_shape[0] shape[2] = output_shape[1] shape[3] =", "for n, s in zip(new_states, past_values)] n_s = [] for", "not 0 or 1, return dynamic learning phase tensor return", "assert strides == (1, 1), 'Invalid strides for dilated convolution'", "True else alt else: result = C.element_select(training, x, alt) result._uses_learning_phase", "None or name == '': return prefix + '/' +", "else: non_dyn_shape.append(shape[i + num_dynamic]) return shape[:num_dynamic] + non_dyn_shape def is_sparse(tensor):", "have a static shape.' % (str(tensor.shape), str(value.shape))) feed_dict[tensor] = value", "i += 1 dynamic_axis_index += 1 while i < len(cntk_axis):", "have # perf issue, will resolve it later with cntk", "value else: raise NotImplementedError def stop_gradient(variables): if isinstance(variables, (list, tuple)):", "axis is not in shape, # so just flatten all", "return grads def equal(x, y): return C.equal(x, y) def not_equal(x,", "+ bias.shape[:2] elif data_format == 'channels_last': if bias_dims == 1:", "epsilon()) return -sum(target * C.log(output), axis=-1) def sparse_categorical_crossentropy(target, output, from_logits=False):", "separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): raise NotImplementedError def", "slice_length, :], (-1, 1, feature_dim))) x_aggregate = concatenate(xs, axis=1) #", "__init__(self, inputs, outputs, updates=[], **kwargs): self.placeholders = inputs self.trainer =", "dropout) won't be applied during \"eval\" in cntk. # They", "+= 1 return x def _normalize_axis(axis, x): shape = int_shape(x)", "outputs.append(output) states = new_states[:len(states)] i += 1 i = 1", "here if (hasattr(value, 'dtype') and value.dtype != np.float32 and value.dtype", "result = self.trainer.train_minibatch( input_dict, self.trainer_output) assert(len(result) == 2) outputs =", "output_shape[2] output_shape = tuple(shape) x = C.convolution_transpose( kernel, x, strides,", "== C.InferredDimension for _ in x.shape) or b_any( _ ==", "need reshape on it reduce_axes = [] for a in", "def cos(x): return C.cos(x) def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):", "_preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = (1,) + strides", "need_convert: if go_backwards: rnn_inputs = reverse(rnn_inputs, 1) rnn_inputs = C.to_sequence(rnn_inputs)", "return last_output, final_output, states def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None,", "in x.dynamic_axes] shape = tuple(dynamic_shape) + shape return shape def", "len(x.shape) if dims > 0 and x.shape[0] == C.InferredDimension: dims", "in range(dims)] else: current_layout = tuple([i for i in range(dims)])", "root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape)) num_static_element = np.prod(np.asarray(self.from_shape)) num_old_batch = int(num_element /", "raise ValueError('CNTK Backend: Unsupported dtype: %s. ' 'CNTK only supports", "cntk_axes, begin_index, end_index, strides) def _reshape_batch(x, shape): # there is", "C.unpack_batch(x) const_a = C.reshape(const_a, shape) return C.to_batch(const_a) else: return C.user_function(ReshapeBatch(x,", "np.prod(x.shape) x = C.reshape(x, (-1,)) x._keras_shape = (None, dim) return", "= np.asarray(value) if isinstance(x, C.variables.Parameter): x.value = value else: raise", "= C.convolution( kernel, x, strides, auto_padding=[ False, padding, padding, padding])", "support calculate on float, do auto cast here if (hasattr(value,", "update_func) criterion = ( outputs[0], outputs[1]) if len(outputs) > 1", "uses_learning_phase = False if dims < 3: raise ValueError('CNTK Backend:", "repeat_elements(x, height_factor, axis=1) output = repeat_elements(output, width_factor, axis=2) return output", "LambdaFunc(C.ops.functions.UserFunction): def __init__(self, arg, when=lambda arg: True, execute=lambda arg: print(arg),", "= eval(value) shape = value.shape if hasattr(value, 'shape') else ()", "C.greater_equal(x, y) def less(x, y): return C.less(x, y) def less_equal(x,", "as_numpy=False) return updated def function(inputs, outputs, updates=[], **kwargs): return Function(inputs,", "= C.times(x, y, output_rank=(len(y.shape) - 1) if len(y.shape) > 1", "for _ in shape]) if isinstance(x, C.variables.Parameter): return C.reshape(x, shape)", "= tuple(n) shape = int_shape(x) num_dynamic_axis = _get_dynamic_axis_num(x) # Padding", "C.variables.Parameter): return C.reshape(x, shape) else: num_dynamic_axis = _get_dynamic_axis_num(x) if num_dynamic_axis", "but input shape is `%s`. Currently ' 'CNTK can not", "perm=permutation) return C.times(x, y, len(y_shape) - 1) else: return C.times(x,", "= squeeze(current, 1) output, new_states = step_function( current, tuple(states) +", "if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(all_matrix)", "name: Optional name string for the tensor. constraint: Optional projection", "the dims for proper broadcasting. for axis in _axes: shift", "array, initial value of the tensor. dtype: Tensor type. name:", "else: u = update if len(u.arguments) == 0: u_ops.append(u) else:", "' 'instead of `None`.') # how to apply mean and", "len(base_shape) == 5 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0,", "length inputs. Please ' 'pass inputs that have a static", "* y, axis=axes[0], keepdims=True) return result if axes[0] == 1", "mapping the correct axis. Will remove this tricky after we", "grad_parameter_dict if isinstance(variables, list) is False: variables = [variables] grads", "4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]),", "placeholder_shape = placeholder.shape for i, p in zip(input_shape, placeholder_shape): if", "x: True, execute=lambda x: print(message))) def batch_set_value(tuples): for t in", "groups=x.shape[0]) else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend: non-square", "that the class probas of each sample sum to 1", "ValueError('Unexpected bias dimensions %d, ' 'expected 1 or %d dimensions'", "conditioned by the Numpy RNG seed = np.random.randint(10e3) if dtype", "= _normalize_axis(axis, x) output = C.ops.argmin(x, axis=axis[0]) return _reshape_dummy_dim(output, axis)", "C.transpose(kernel, (3, 2, 0, 1)) return kernel def _preprocess_border_mode(padding): if", "A candidate placeholder. # Returns Boolean. \"\"\" return hasattr(x, '_cntk_placeholder')", "= outputs[i] i += 1 last_output._uses_learning_phase = uses_learning_phase return last_output,", "C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding], groups=x.shape[0]) return _postprocess_conv2d_output(x, data_format)", "return float(2.0) class ReshapeBatch(C.ops.functions.UserFunction): def __init__(self, input, shape, name='reshape_with_batch'): super(ReshapeBatch,", "= _normalize_axis(axis, x) output = C.ops.argmax(x, axis=axis[0]) return _reshape_dummy_dim(output, axis)", "method to let cntk know we want to evaluate them.from", "raise ValueError('CNTK Backend: non-square dilation_rate is ' 'not supported.') if", "[variables] grads = [] for v in variables: g =", "rnn_inputs)) else: rnn_constants.append(constant) else: rnn_constants = constants if mask is", "_get_dynamic_axis_num(c) == 1: new_c.append(C.sequence.broadcast_as(c, rnn_inputs)) else: new_c.append(c) rnn_constants.append(new_c) else: if", "padding, padding]) x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False])", "C.trainer.Trainer( outputs[0], criterion, [learner]) self.trainer_output = tuple([f.output for f in", "outputs = [] time_axis = 1 - nones if nones", "for _ in cntk_axes] strides = [-1 for _ in", "= _padding(x, padding[2], 3) return x def one_hot(indices, num_classes): return", "if data_format == 'channels_first': output = repeat_elements(x, height_factor, axis=2) output", "slice_col = slice(j * stride_col, j * stride_col + kernel_size[1])", "else 1) if len(y_shape) == 2: result = squeeze(result, -1)", "the assign ops won't be executed under this mode, that's", "axes]): raise ValueError('Multiple target dimensions are not supported. ' +", "= C.ops.slice(inputs, time_axis, i, i + 1) # remove dummy", "else: dtype = _convert_string_dtype(dtype) if name is None: name =", "training=training) def _convert_string_dtype(dtype): # cntk only support float32 and float64", "argument %s is not found in inputs. ' 'Please double", "greater_equal(x, y): return C.greater_equal(x, y) def less(x, y): return C.less(x,", "C.combine([u.output for u in u_ops]) grads = update_func.find_all_with_name('keras_grad_placeholder') u_list =", "len(shape): raise NotImplementedError i = num_dynamic_axis for i, rep in", "# cntk only could handle loss and 1 metric in", "is_placeholder(x): \"\"\"Returns whether `x` is a placeholder. # Arguments x:", "sqrt(x): return C.sqrt(x) def exp(x): return C.exp(x) def log(x): return", "1, it is not needed # in cntk, need to", "0: assert len(outputs) > 0 self.loss = outputs[0] # need", "if getattr(new_output, '_uses_learning_phase', False): global uses_learning_phase uses_learning_phase = True if", "= repeat_elements(output, width_factor, axis=2) return output else: raise ValueError('CNTK Backend:", "shape = (1, bias.shape[0]) else: shape = bias.shape else: shape", "isinstance(axis, list): _axis = list(axis) else: _axis = axis if", "raise ValueError('CNTK Backend: padding input tensor with ' 'shape `%s`", "= uses_learning_phase return last_output, final_output, states def rnn(step_function, inputs, initial_states,", "int_shape(then_expression) ndim_diff = ndim_expr - ndim_cond for i in range(ndim_diff):", "return sum(x * transpose(y), axis=axes[0], keepdims=True) else: if len(y_shape) ==", "kernel, output_shape, strides=(1, 1), padding='valid', data_format=None): if data_format is None:", "if name is None: name = '%s_alias' % x.name return", "0, 1)) return x def _preprocess_conv2d_kernel(kernel, data_format): # As of", "= [None for a in x.dynamic_axes] shape = tuple(dynamic_shape) +", "1)), data_format=None): assert len(padding) == 2 assert len(padding[0]) == 2", "repeat_elements(output, width_factor, axis=4) return output elif data_format == 'channels_last': output", "C.reshape(x, new_shape) temp = [x] * n return C.splice(*temp, axis=index)", "1, 1)): if data_format is None: data_format = image_data_format() if", "/ num_static_element) result = arguments.data().as_shape((num_batch,) + self.target_shape) return None, C.cntk_py.Value(result)", "support ' 'collapse of batch axis with inferred dimension. '", "None @staticmethod def _is_input_shape_compatible(input, placeholder): if hasattr(input, 'shape') and hasattr(placeholder,", "= C.reduce_mean(beta, axis - 1) else: target_shape.append(x_shape[axis]) broadcast_mean = C.reshape(mean,", "'instead of `None`.') return random_uniform_variable(shape, minval, maxval, dtype, seed) def", "= _preprocess_conv2d_input(x, data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel,", "_axis = [axis] elif isinstance(axis, list): _axis = list(axis) else:", "enumerate(n): if i >= num_dynamic_axis and shape[i] is not None:", "C.sin(x) def cos(x): return C.cos(x) def normalize_batch_in_training(x, gamma, beta, reduction_axes,", "padding[0], 1) x = _padding(x, padding[1], 2) x = _padding(x,", "argmax( target, axis=-1)), axis=C.Axis.all_axes()) def argmax(x, axis=-1): axis = [axis]", "x_aggregate * weight # Shape: (batch, filters, output_length) output =", "= x if training == 1 or training is True", "dtype=None, shape=None, name=None): if dtype is None: dtype = floatx()", "dtype) def variable(value, dtype=None, name=None, constraint=None): \"\"\"Instantiates a variable and", "rank of then and' ' else expressions. ndim(condition)=' + str(ndim_cond)", "dynamic_axis_num=1): if dtype is None: dtype = floatx() if not", "_postprocess_conv2d_output(x, data_format) def identity(x, name=None): if name is None: name", "backward(self, state, root_gradients): return C.cntk_py.Value(root_gradients.data()) class LambdaFunc(C.ops.functions.UserFunction): def __init__(self, arg,", "= _normalize_axis(axes, x) begin_index = [0 for _ in cntk_axes]", "t[1] if isinstance(value, np.ndarray) is False: value = np.asarray(value) if", "i)]) + shape new_shape = list(shape) new_shape = new_shape[num_dynamic_axis:] new_shape", "' '`Parameter`.' % type(x)) def placeholder( shape=None, ndim=None, dtype=None, sparse=False,", "v = value.asarray() updated.append(v) else: v = output_values.asarray() for o", "int_shape(inputs) dims = len(shape) uses_learning_phase = False if dims <", "padding, padding], groups=x.shape[0]) return _postprocess_conv2d_output(x, data_format) def conv3d(x, kernel, strides=(1,", "if _ is None else _ for _ in new_shape])", "There is a bug in cntk gather op which may", "assert len(outputs) > 0 self.loss = outputs[0] # need group", "dimension ' 'instead of `None`.') size *= _ binomial =", "batch axis. name: name of this node. \"\"\" def __init__(self,", "padding='valid', data_format=None, dilation_rate=1): raise NotImplementedError def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1,", "work. # need to be fixed in GA. if n", "\"\"\"Converts input first axis to CNTK static axis. We may", "ValueError('Invalid pooling mode: ' + str(pool_mode)) return _postprocess_conv2d_output(x, data_format) def", "**kwargs) def temporal_padding(x, padding=(1, 1)): assert len(padding) == 2 num_dynamic_axis", "else: current_layout = tuple([i for i in range(dims)]) if num_dynamic_axis", "x def _preprocess_conv2d_kernel(kernel, data_format): # As of Keras 2.0.0, all", "auto_padding=[padding]) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) return", "_recurrence(rnn_inputs, states, mask) last_output = C.sequence.last(final_output) last_states = [C.sequence.last(s) for", "C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension cntk_shape = [dynamic_dimension", "= tuple([i for i in range(dims)]) if num_dynamic_axis > 0", "C.set_global_option('align_axis', 1) b_any = any dev = C.device.use_default_device() if dev.type()", "# need broadcasting target_shape = [] x_shape = int_shape(x) #", "' '`train_function`.' % argument.name) result = self.trainer.train_minibatch( input_dict, self.trainer_output) assert(len(result)", "= v.shape v._uses_learning_phase = False v.constraint = constraint return v", "with dynamic ' 'shape is not supported. Please provide '", "len(base_shape) == 2 if hasattr(C, 'pad'): x = C.pad(x, pattern=[padding,", "first weight = permute_dimensions(kernel, (2, 0, 1)) # shape: batch,", "C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x = _padding(x,", "= x.shape if num_dynamic_axis > 0: assert len(base_shape) == 2", "data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise", "= C.clip(output, epsilon(), 1.0 - epsilon()) output = -target *", "beta is None: beta = zeros_like(mean) elif ndim(beta) == ndim(x)", "C.FreeDimension else _ for _ in shape]) if isinstance(x, C.variables.Parameter):", "if _get_dynamic_axis_num(s) == 0: if hasattr(C, 'to_batch'): initial.append(C.to_batch(s)) else: initial.append(C.user_function(ConvertToBatch(s)))", "else: return C.element_select(C.greater(x, 0), res, alpha * res) def in_top_k(predictions,", "i + 1) i += 1 i = normalized_axis[1] while", "% dtype) def variable(value, dtype=None, name=None, constraint=None): \"\"\"Instantiates a variable", "in states] past_values = [] for s, p in zip(states,", "return [ C.output_variable( self.inputs[0].shape[1:], self.inputs[0].dtype, [batch_axis])] def forward(self, arguments, device=None,", "isinstance(_axis, list): for i, a in enumerate(_axis): if a is", "None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}:", "is static axis, CNTK will do unroll by default if", "= _get_dynamic_axis_num(x) if isinstance(pattern, list): current_layout = [i for i", "= shape[nones:] new_shape = tuple([C.InferredDimension if _ == C.FreeDimension else", "0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]: raise ValueError('CNTK backend: the permute", "rank 3 to run RNN.' % dims) if _get_dynamic_axis_num(inputs) ==", "variance = C.minus(variance_mean, C.square(shifted_mean)) mean = C.plus(shifted_mean, shift) if not", "-1: # collapse axis with batch axis if b_any(_ ==", "if keepdims is False and isinstance(axis, list): # sequence axis", "mask is not None: mask_shape = int_shape(mask) if len(mask_shape) ==", "collapse axis with batch axis if b_any(_ == C.InferredDimension for", "C.splice(*tensors, axis=axis[0]) def flatten(x): return reshape(x, (-1,)) def reshape(x, shape):", "axis=axis) variance = C.minus(variance_mean, C.square(shifted_mean)) mean = C.plus(shifted_mean, shift) if", "if bias_dims == 1: shape = (1, bias.shape[0]) else: shape", "and len(x.dynamic_axes) > 1 def l2_normalize(x, axis=None): axis = [axis]", "= (batch_size,) + input.shape def infer_outputs(self): return [ C.output_variable( self.target_shape,", "shape, name='reshape_with_batch'): super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name) self.from_shape = input.shape self.target_shape", "C.assign(x, result) def gradients(loss, variables): # cntk does not support", "def temporal_padding(x, padding=(1, 1)): assert len(padding) == 2 num_dynamic_axis =", "with gather op in next release if _get_cntk_version() >= 2.2:", "== ndim(x) and shape(var)[0] == 1: var = _reshape_dummy_dim(var, [0])", "if _axis[i] is not None: _axis[i] = cntk_axis[_axis[i]] else: if", "elif isinstance(axis, int): _axis = [axis] elif isinstance(axis, list): _axis", "C.reshape(x, shape) def mean(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x)", "broadcast_var = C.reshape(variant, target_shape) broadcast_gamma = C.reshape(gamma, target_shape) broadcast_beta =", "def batch_flatten(x): # cntk's batch axis is not in shape,", "strides = (1,) + strides # cntk output_shape does not", "padding = False else: raise ValueError('Invalid border mode: ' +", "= [] @contextmanager def name_scope(name): global NAME_SCOPE_STACK NAME_SCOPE_STACK.append(name) yield NAME_SCOPE_STACK.pop()", "return_states.append( C.ops.element_select( mask_slice, n_s, s)) new_states = return_states outputs.append(output) states", "C.variables.Constant) or isinstance( value, C.variables.Parameter): value = value.value # we", "isinstance(axis, int): _axis = [axis] elif isinstance(axis, list): _axis =", "return shape def ndim(x): shape = int_shape(x) return len(shape) def", "len(padding[0]) == 2 assert len(padding[1]) == 2 if data_format is", "preds so that the class probas of each sample sum", "C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1) def _remove_dims(x, axis, keepdims=False): if", "= _get_dynamic_axis_num(x) if nones > ndim: raise ValueError('CNTK Backend: tensor", "log(sum(exp(x), axis=axis, keepdims=keepdims)) def var(x, axis=None, keepdims=False): m = mean(x,", "so using the workaround # here to mapping the correct", "len(shape) > 0 and shape[0] == -1: # collapse axis", "argument.name) # Some ops (like dropout) won't be applied during", "return x def _reshape_sequence(x, time_step): tmp_shape = list(int_shape(x)) tmp_shape[1] =", "C.reshape(target, output.shape) return categorical_crossentropy(target, output, from_logits) class Function(object): def __init__(self,", "axis.' % shape) if constants is None: constants = []", "epsilon) else: # need broadcasting target_shape = [] x_shape =", "last_output = outputs[0] while i < len(outputs): # add the", "!= np.float32 and value.dtype != np.float64): value = value.astype(np.float32) if", "return C.alias(x, name=name) def _preprocess_conv2d_input(x, data_format): if data_format == 'channels_last':", "_normalize_axis(axis, tensors[0]) return C.splice(*tensors, axis=axis[0]) def flatten(x): return reshape(x, (-1,))", "NAME_SCOPE_STACK = [] @contextmanager def name_scope(name): global NAME_SCOPE_STACK NAME_SCOPE_STACK.append(name) yield", "i, i + 1) # remove dummy dimension current =", "base_shape) if pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis] =", "(batch, output_length, filters) return permute_dimensions(output, (0, 2, 1)) def local_conv2d(inputs,", "(n,) elif isinstance(n, list): n = tuple(n) shape = int_shape(x)", "= 0 while i < shape[axis]: tmp = C.ops.slice(x, axis,", "return C.element_select(condition, then_expression, else_expression) def elu(x, alpha=1.): res = C.elu(x)", "perf issue, will resolve it later with cntk cond op.", "+ bias.shape[:3] elif data_format == 'channels_last': if bias_dims == 1:", "double ' 'check the model and inputs.' % argument.name) #", "None self.unrelated_updates = None self.updates = updates if len(updates) >", "outputs[-1] output = C.ops.element_select(mask_slice, output, prev_output) return_states = [] for", "those dummy axis. if ndim(mean) == ndim(x) and shape(mean)[0] ==", "= C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase =", "x def softmax(x, axis=-1): return C.softmax(x, axis=axis) def softplus(x): return", "`go_backwards` is not supported with ' 'variable-length sequences. Please specify", "Unsupported dtype: %s. ' 'CNTK only supports float32 and '", "for _ in new_shape]) return C.reshape(x, new_shape) def tile(x, n):", "= dilation_rate * (kernel.shape[0] - 1) x = temporal_padding(x, (left_pad,", "padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides =", "= getattr(C, reduce_fun_name)(x, a) else: x = getattr(C, reduce_fun_name)(x, axis)", "else: shape = (bias.shape[2],) + bias.shape[:2] elif data_format == 'channels_last':", "ops (like dropout) won't be applied during \"eval\" in cntk.", "None: name = '' scale = (high - low) /", "input shape: (samples, conv_dim1, conv_dim2, conv_dim3, # input_depth) x =", "= np.random.binomial(1, p, size).astype(dtype).reshape(shape) return variable(value=binomial, dtype=dtype) def random_uniform(shape, minval=0.0,", "= new_states[:len(states)] i += 1 i = 1 # add", "list): axis = [axis] shape = list(int_shape(x)) _axis = []", "_axis def _reshape_dummy_dim(x, axis): shape = list(x.shape) _axis = [_", "x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])]) else: x =", "permutation += [len(y_shape) - 1] y = C.transpose(y, perm=permutation) return", "# Padding the axis if len(n) < len(shape): n =", "= C.unpack_batch(x) const_a = C.reshape(const_a, shape) return C.to_batch(const_a) else: return", "nones = _get_dynamic_axis_num(x) if nones > ndim: raise ValueError('CNTK Backend:", "== 'float64': return np.float64 else: # cntk only running with", "if hasattr(C, 'pad'): x = C.pad(x, pattern=[padding, (0, 0)]) else:", "or isinstance(x, C.variables.Constant)): if isinstance(value, (float, int)): value = np.full(x.shape,", "_reduce_on_axis(x, axis, 'reduce_sum') return _remove_dims(output, axis, keepdims) def prod(x, axis=None,", "= tuple( [C.InferredDimension if _ is None else _ for", "for index in sorted(_axis, reverse=True): del shape[index] shape = [C.InferredDimension", "{'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) num_dynamic_axis =", "is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) return", "function to be applied to the variable after an optimizer", "output = C.ops.argmin(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def square(x): return", "the Numpy RNG seed = np.random.randint(10e7) if dtype is None:", "== 2: y = expand_dims(y) normalized_axis = [] normalized_axis.append(_normalize_axis(axes[0], x)[0])", "layer work. # need to be fixed in GA. if", "j * stride_col + kernel_size[1]) if data_format == 'channels_first': xs.append(reshape(inputs[:,", "= C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape if pattern[1]", "TH input shape: (samples, input_depth, rows, cols) # TF input", "is None: non_dyn_shape.append(x.shape[i]) else: non_dyn_shape.append(shape[i + num_dynamic]) return shape[:num_dynamic] +", "don't need case from bool / int return x def", "static learning phase flag, if it is not 0 or", "1.0 - epsilon()) output = -target * C.log(output) - (1.0", "'pad'): x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1])])", "= C.swapaxes(x, 0, 1) kernel = C.swapaxes(kernel, 0, 2) padding", "GPU to get better performance.') # A learning phase is", "= [len(x_shape) - 1, len(y_shape) - 2] if b_any([isinstance(a, (list,", "the gradient node ' 'is constructed.' % g) if len(u_list)", "place_holders): past_values.append(C.sequence.past_value(p, s)) new_output, new_states = step_function( x, tuple(past_values) +", "= x.shape if data_format == 'channels_first': if num_dynamic_axis > 0:", "isinstance(constant, list): new_c = [] for c in constant: if", "sequence axis is removed by default, so don't need reshape", "this is a workaround for recurrent layer # if n", "x.shape i = 0 while i < shape[axis]: tmp =", "1] y = C.transpose(y, perm=permutation) return C.times(x, y, len(y_shape) -", "prefix + '/' + default return prefix + '/' +", "assert len(padding[1]) == 2 if data_format is None: data_format =", "output, from_logits=False): target = C.one_hot(target, output.shape[-1]) target = C.reshape(target, output.shape)", "strides=(1, 1, 1), auto_padding=[False]) else: if dilation_rate[0] != dilation_rate[1]: raise", "in axis: if isinstance(a, C.Axis) is False: reduce_axes.append(a) return _reshape_dummy_dim(x,", "m): # create place holder place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _", "_ in cntk_axes] return C.slice(x, cntk_axes, begin_index, end_index, strides) def", "a static shape.' % (str(tensor.shape), str(value.shape))) feed_dict[tensor] = value updated", "== 0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(all_matrix) else: return", "stddev, seed=seed), dtype=dtype) def dtype(x): return _convert_dtype_string(x.dtype) def zeros(shape, dtype=None,", "C.variables.Variable, C.variables.Parameter, C.ops.functions.Function)) def shape(x): shape = list(int_shape(x)) num_dynamic =", "+ 1) for _ in range(rep): slices.append(tmp) i += 1", "!= np.float64): value = value.astype(np.float32) if tensor == _LEARNING_PHASE_PLACEHOLDER: _LEARNING_PHASE_PLACEHOLDER.value", "that have a static shape.' % (str(tensor.shape), str(value.shape))) feed_dict[tensor] =", "class ReshapeBatch(C.ops.functions.UserFunction): def __init__(self, input, shape, name='reshape_with_batch'): super(ReshapeBatch, self).__init__([input], as_numpy=False,", "then_expression, else_expression): ndim_cond = ndim(condition) ndim_expr = ndim(then_expression) if ndim_cond", "output_shape kernel_shape = int_shape(kernel) _, feature_dim, filters = kernel_shape xs", "= return_states outputs.append(output) states = new_states i -= 1 else:", "weight # Shape: (batch, filters, output_length) output = sum(output, axis=3)", "_padding(x, padding, 0) else: assert len(base_shape) == 3 if hasattr(C,", "1} else _LEARNING_PHASE_PLACEHOLDER def set_learning_phase(value): global _LEARNING_PHASE if value not", "> ndim: raise ValueError('CNTK Backend: tensor with keras shape: `%s`", "is None else _ for _ in new_shape] return C.reshape(x,", "_get_dynamic_axis_num(x): if hasattr(x, 'dynamic_axes'): return len(x.dynamic_axes) else: return 0 def", "_convert_string_dtype(dtype) size = 1 for _ in shape: if _", "seed) def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): if dtype", "with keras model # we will return a constant as", "is_tensor(x): return isinstance(x, (C.variables.Constant, C.variables.Variable, C.variables.Parameter, C.ops.functions.Function)) def shape(x): shape", "_remove_dims(x, axis, keepdims=False): if keepdims is False and isinstance(axis, list):", "return C.reshape(result, ()) else: # scale preds so that the", "and float64 if dtype == 'float32': return np.float32 elif dtype", "prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape", "C.reduce_mean(shift, axis=axis) shift = C.stop_gradient(shift) shifted_mean = C.minus(x, shift) for", "(-1, 1, feature_dim))) else: xs.append(reshape(inputs[:, slice_row, slice_col, :], (-1, 1,", "name = '' scale = (high - low) / 2", "+ str(data_format)) padding = _preprocess_border_mode(padding) strides = strides pool_size =", "C.variables.Parameter): x.value = value else: raise NotImplementedError def stop_gradient(variables): if", "= len(shape) nones = _get_dynamic_axis_num(x) if nones > ndim: raise", "normalized = batch_normalization( x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return", "if data_format == 'channels_last': shape = list(output_shape) shape[0] = output_shape[2]", "not detected. ' 'Will using CNTK 2.0 GA as default.')", "cntk tensor which has batch axis batch_size: size of batch", "return C.exp(x) def log(x): return C.log(x) def round(x): return C.round(x)", "`Constant` or ' '`Parameter`.' % type(x)) def placeholder( shape=None, ndim=None,", "2)) return x def _preprocess_conv3d_kernel(kernel, dim_ordering): kernel = C.transpose(kernel, (4,", "1) else: shape = (bias.shape[1],) + bias.shape[:1] elif data_format ==", "C.reshape(mean, target_shape) broadcast_var = C.reshape(variant, target_shape) broadcast_gamma = C.reshape(gamma, target_shape)", "'CNTK can not take variable length inputs. Please ' 'pass", "unroll by default if shape[1] is None: raise ValueError('CNTK Backend:", "(1,) + strides # cntk output_shape does not include batch", "= root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape)) num_static_element = np.prod(np.asarray(self.from_shape)) num_old_batch = int(num_element", "> 1 def l2_normalize(x, axis=None): axis = [axis] axis =", "None: min_value = -np.inf return C.clip(x, min_value, max_value) def binary_crossentropy(target,", "keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_mean')", "def pow(x, a): return C.pow(x, a) def clip(x, min_value, max_value):", "is None: gamma = ones_like(var) elif ndim(gamma) == ndim(x) and", "name=''): self.when = when self.execute = execute super(LambdaFunc, self).__init__([arg], name=name)", "%s, ' 'must be in interval [0, 1].' % level)", "seed=seed), dtype=dtype) def dtype(x): return _convert_dtype_string(x.dtype) def zeros(shape, dtype=None, name=None):", "= [] num_time_step = shape[1] if num_time_step is None and", "global _LEARNING_PHASE_PLACEHOLDER _LEARNING_PHASE = -1 _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0) def in_train_phase(x,", "name=None): if dtype is None: dtype = floatx() ctype =", "= C.reduce_mean(gamma, axis - 1) beta = C.reduce_mean(beta, axis -", "(1, 2, 0)) return x def _preprocess_conv3d_input(x, data_format): if data_format", "list(axis) else: _axis = axis if isinstance(_axis, list): for i,", "x, strides, auto_padding=[ False, padding, padding, padding], output_shape=output_shape) return _postprocess_conv3d_output(x,", "check the model and inputs in ' '`train_function`.' % argument.name)", "want to evaluate them.from # But the assign ops won't", "the Numpy RNG seed = np.random.randint(10e3) if dtype is None:", "if constants is None: constants = [] num_time_step = shape[1]", "3 assert len(padding[0]) == 2 assert len(padding[1]) == 2 assert", "dim in base_shape]): raise ValueError('CNTK Backend: padding input tensor with", "> 0: assert len(base_shape) == 4 if hasattr(C, 'pad'): x", ":], (-1, 1, feature_dim))) x_aggregate = concatenate(xs, axis=1) # transpose", "dtype: Tensor type. name: Optional name string for the tensor.", "'dynamic shape is not supported now. ' 'Please provide fixed", "- target) * C.log(1.0 - output) return output def get_variable_shape(x):", "is None: gamma = ones_like(x) else: gamma = ones_like(beta) if", "conversion when cntk supports int32, int64 # https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter dtype =", "output_shape, strides=(1, 1), padding='valid', data_format=None): if data_format is None: data_format", "model, # we will create gradient as a constant placeholder,", "introduce this operation in CNTK native implementation later. # Arguments", "if self.unrelated_updates is not None: input_dict = {} for argument", "if tensor == _LEARNING_PHASE_PLACEHOLDER: _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value) else: # in", "[] dynamic_axis_index = 0 for i in range(ndim): if shape[i]", "shape history.' % (str(shape), nones)) # Current cntk does not", "+ default return prefix + '/' + name def constant(value,", "or ' '`Parameter`.' % type(x)) def placeholder( shape=None, ndim=None, dtype=None,", "target_shape) normalized = batch_normalization( x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon)", "axis=axis, keepdims=keepdims)) def expand_dims(x, axis=-1): shape = list(int_shape(x)) nones =", "_contain_seqence_axis(x): if _get_dynamic_axis_num(x) > 1: return x.dynamic_axes[1] == C.Axis.default_dynamic_axis() else:", "place holder, the cntk learner will apply # the gradient", "res, alpha * res) def in_top_k(predictions, targets, k): _targets =", "+= list(range(len(y_shape) - 2)) permutation += [len(y_shape) - 1] y", "alt, training=None): global _LEARNING_PHASE if training is None: training =", ">= 2.2: return C.ops.gather(reference, indices) else: num_classes = reference.shape[0] one_hot_matrix", "'is not found in inputs. Please double ' 'check the", "assert len(padding) == 2 num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape", "variable and returns it. # Arguments value: Numpy array, initial", "2: if axes[0] == axes[1]: result = sum(x * y,", "new_shape] return C.reshape(x, new_shape) def permute_dimensions(x, pattern): dims = len(int_shape(x))", "(-1,)) x._keras_shape = (None, dim) return x def softmax(x, axis=-1):", "+ depthwise_kernel.shape[2:]) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1):", "= C.transpose(x, (1, 2, 3, 0)) return x def _get_dynamic_axis_num(x):", "[C.InferredDimension if _ == C.FreeDimension else _ for _ in", "else: raise ValueError('Invalid border mode: ' + str(padding)) return padding", "while keras expect (batch, ) return C.reshape(result, ()) else: #", "shape.insert(index, 1) new_shape = shape[nones:] new_shape = tuple( [C.InferredDimension if", "run Keras models in # either train mode (learning_phase ==", "= _get_dynamic_axis_num(x) # Padding the axis if len(n) < len(shape):", "only rank %d ' 'Need at least rank 3 to", "\"\"\"Instantiates a variable and returns it. # Arguments value: Numpy", "if not isinstance(axis, list): axis = [axis] shape = list(int_shape(x))", "outputs, updates=updates, **kwargs) def temporal_padding(x, padding=(1, 1)): assert len(padding) ==", "training is None: training = learning_phase() uses_learning_phase = True else:", "= repeat_elements(output, width_factor, axis=4) return output elif data_format == 'channels_last':", "ndim_cond for i in range(ndim_diff): condition = expand_dims(condition) condition =", "the model and inputs.' % argument.name) self.unrelated_updates.eval(input_dict, as_numpy=False) return updated", "name_scope(name): global NAME_SCOPE_STACK NAME_SCOPE_STACK.append(name) yield NAME_SCOPE_STACK.pop() def get_uid(prefix=''): _UID_PREFIXES[prefix] +=", "return x def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): assert", "output_shape=output_shape) return _postprocess_conv3d_output(x, data_format) def pool2d(x, pool_size, strides=(1, 1), padding='valid',", "to run RNN.' % dims) if _get_dynamic_axis_num(inputs) == 0 or", "list(padding[1]), list(padding[2]), [0, 0]]) else: x = _padding(x, padding[0], 1)", "raise ValueError('Unknown data_format ' + str(data_format)) stride_row, stride_col = strides", "_ + len(shape)) if len(_axis) == 0: return x nones", "const_a = C.unpack_batch(x) const_a = C.reshape(const_a, shape) return C.to_batch(const_a) else:", ">= 0 else _ + len(shape)) if len(_axis) == 0:", "= False # CNTK currently don't support cond op, so", "' 'CNTK\\'s CPU version is not fully optimized,' 'please run", "trainer, ' 'found gradient node `%s` which is not '", "prev_output) return_states = [] for s, n_s in zip(states, new_states):", "`count_params` with dynamic ' 'shape is not supported. Please provide", "C.minus(x, shift) for axis in _axes: shifted_mean = C.reduce_mean(shifted_mean, axis=axis)", "'CNTK\\'s CPU version is not fully optimized,' 'please run with", "To make it work, call # \"forward\" method to let", "list(padding[2])]) else: x = _padding(x, padding[0], 2) x = _padding(x,", "dtype == 'float32': return np.float32 elif dtype == 'float64': return", "ndim(x) and shape(mean)[0] == 1: mean = _reshape_dummy_dim(mean, [0]) if", "dtype=None, name=None): return zeros_like(x) + 1 def count_params(x): for _", "elif dims == 3: if data_format == 'channels_first': if bias_dims", "padding], output_shape=output_shape) return _postprocess_conv3d_output(x, data_format) def pool2d(x, pool_size, strides=(1, 1),", "update_func.find_all_with_name('keras_grad_placeholder') u_list = [] p_list = [] for g in", "_get_dynamic_axis_num(x) non_dyn_shape = [] for i in range(len(x.shape)): if shape[i", "_padding(x, padding[1], 1) else: assert len(base_shape) == 4 if hasattr(C,", "= [x] * rep x = C.splice(*tmp, axis=i - num_dynamic_axis)", "make the recurrent layer work. # need to be fixed", "< shape[1]: current = C.ops.slice(inputs, time_axis, i, i + 1)", "if isinstance(_, int): _axis.append(_ if _ >= 0 else _", "= repeat_elements(x, depth_factor, axis=2) output = repeat_elements(output, height_factor, axis=3) output", "x_aggregate = concatenate(xs, axis=1) # transpose kernel to put filters", "sample sum to 1 output /= C.reduce_sum(output, axis=-1) # avoid", "random_normal_variable( shape, mean, scale, dtype=None, name=None, seed=None): if dtype is", "if dims < 3: raise ValueError('Input should be at least", "projection function to be applied to the variable after an", "support init parameter with symbolic op, so eval it first", "[0, 0]]) else: x = _padding(x, padding[0], 1) x =", "isinstance(x, (C.variables.Constant, C.variables.Variable, C.variables.Parameter, C.ops.functions.Function)) def shape(x): shape = list(int_shape(x))", "== C.FreeDimension: raise ValueError('CNTK backend: `count_params` with dynamic ' 'shape", "< len(outputs): # add the time_step axis back output_slice =", "for i in range(output_row): for j in range(output_col): slice_row =", "-= nones i += 1 if isinstance(axis, tuple): _axis =", "return C.softmax(x, axis=axis) def softplus(x): return C.softplus(x) def softsign(x): return", "= _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) x", "result = squeeze(result, -1) return result def transpose(x): return C.swapaxes(x,", "= _preprocess_conv2d_input(x, data_format) if pool_mode == 'max': x = C.pooling(", "result) def gradients(loss, variables): # cntk does not support gradients", "str(data_format)) x = _preprocess_conv2d_input(x, data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel", "x + increment return C.assign(x, result) def gradients(loss, variables): #", "_normalize_axis(axis, x) axis = axis[0] slices = [] shape =", "float32 and ' 'float64.' % dtype) def variable(value, dtype=None, name=None,", "num_dynamic_axis and shape[i] is not None: tmp = [x] *", "than or equal to rank of then and' ' else", "in shape] return C.reshape(x, shape) def mean(x, axis=None, keepdims=False): axis", "1, 2)) return kernel def _postprocess_conv3d_output(x, dim_ordering): if dim_ordering ==", "= _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = (1,) +", "if seed is None: # ensure that randomness is conditioned", "def sin(x): return C.sin(x) def cos(x): return C.cos(x) def normalize_batch_in_training(x,", "return x def _padding(x, pattern, axis): base_shape = x.shape if", "reverse(rnn_inputs, 1) rnn_inputs = C.to_sequence(rnn_inputs) rnn_constants = [] for constant", "one_hot_matrix = C.ops.one_hot(indices, num_classes) return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1)", "_postprocess_conv3d_output(x, dim_ordering): if dim_ordering == 'channels_last': x = C.transpose(x, (1,", "int_shape(x) def update(x, new_x): return C.assign(x, new_x) def moving_average_update(variable, value,", "Shape: (batch, filters, output_length) output = sum(output, axis=3) # Shape:", "= value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant'))", "is None: dtype = floatx() if seed is None: #", "num_time_step is not None and num_time_step is not C.FreeDimension: final_output", "shape[0] == -1: # collapse axis with batch axis if", "> 1: return x.dynamic_axes[1] == C.Axis.default_dynamic_axis() else: return False def", "a is not None and a < 0: _axis[i] =", "init parameter with symbolic op, so eval it first as", "conv_dim2, conv_dim3) # TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3,", "non_dyn_shape.append(shape[i + num_dynamic]) return shape[:num_dynamic] + non_dyn_shape def is_sparse(tensor): return", "time_step axis back final_output = expand_dims(outputs[0], 1) last_output = outputs[0]", "get_value(x): if isinstance( x, C.variables.Parameter) or isinstance( x, C.variables.Constant): return", "def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): # The mean", "updates if len(updates) > 0: assert len(outputs) > 0 self.loss", "assert len(padding[0]) == 2 assert len(padding[1]) == 2 if data_format", "def __init__(self, input, batch_size, name='convert_to_static'): super(ConvertToStatic, self).__init__([input], as_numpy=False, name=name) self.target_shape", "if len(updates) > 0: assert len(outputs) > 0 self.loss =", "raise NotImplementedError def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None,", "if (hasattr(value, 'dtype') and value.dtype != np.float32 and value.dtype !=", "if not is_tensor(x): raise ValueError('Unexpectedly found an instance of type", "specify a ' 'static length for your sequences.') rnn_inputs =", "depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)), (-1, 1) +", "== 'channels_first': if num_dynamic_axis > 0: assert len(base_shape) == 4", "squeeze(current, time_axis) output, new_states = step_function( current, tuple(states) + tuple(constants))", "hasattr(input, 'shape') and hasattr(placeholder, 'shape'): num_dynamic = get_num_dynamic_axis(placeholder) input_shape =", "axis, CNTK will do unroll by default if shape[1] is", "cntk_axis[i] -= nones i += 1 if isinstance(axis, tuple): _axis", "= squeeze(current, time_axis) output, new_states = step_function( current, tuple(states) +", "in keras2, need handle output shape in different format if", "next release. if not self._is_input_shape_compatible(value, tensor): raise ValueError('CNTK backend: The", "data_format == 'channels_first': if num_dynamic_axis > 0: assert len(base_shape) ==", "# TF uses the last dimension as channel dimension, #", "format if data_format == 'channels_last': shape = list(output_shape) shape[0] =", "y, axes=None): x_shape = int_shape(x) y_shape = int_shape(y) if isinstance(axes,", "is None: dtype = floatx() if name is None: name", "else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) return _postprocess_conv2d_output(x,", "2) return x def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1,", "padding[1], 2) x = _padding(x, padding[2], 3) else: assert len(base_shape)", "C.output_variable( self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def forward(self, argument, device=None, outputs_to_retain=None): if", "elif data_format == 'channels_last': output = repeat_elements(x, depth_factor, axis=1) output", "def _convert_string_dtype(dtype): # cntk only support float32 and float64 if", "if axis in reduction_axes: target_shape.append(1) if ndim(gamma) > axis: gamma", "[ C.output_variable( self.target_shape, self.inputs[0].dtype, [])] def forward(self, arguments, device=None, outputs_to_retain=None):", "ones_like(beta) if beta is None: if gamma is None: beta", "variance = squeeze(variance, _axes) return mean, variance def batch_normalization(x, mean,", "= C.cntk_py.universal_learner(p_list, u_list, update_func) criterion = ( outputs[0], outputs[1]) if", "elif isinstance(axis, list): _axis = list(axis) else: _axis = axis", "def argmin(x, axis=-1): axis = [axis] axis = _normalize_axis(axis, x)", "return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER def", "+= 1 dynamic_axis_index += 1 while i < len(cntk_axis): cntk_axis[i]", "pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis)", "' 'Need at least rank 3 to run RNN.' %", "for _ in shape: if _ is None: raise ValueError('CNTK", "if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format '", "shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3) # TF input shape:", "axis in reduction_axes: target_shape.append(1) if ndim(gamma) > axis: gamma =", "= output_shape[2] shape[1] = output_shape[0] shape[2] = output_shape[1] output_shape =", "in shape] cntk_shape = tuple(cntk_shape) if dynamic_axis_num > len(cntk_shape): raise", "shape: batch, row, col, filters output = permute_dimensions(output, (0, 2,", "_LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value) else: # in current version cntk can't", "is None else s for s in shape] cntk_shape =", "(1,) + strides x = C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding,", "dtype v = C.parameter(shape=shape, init=value, dtype=dtype, name=_prepare_name(name, 'variable')) v._keras_shape =", "for dim in base_shape]): raise ValueError('CNTK Backend: padding input tensor", "-target * C.log(output) - (1.0 - target) * C.log(1.0 -", "C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase') # static learning phase flag, if", "C.reduce_mean(variance_mean, axis=axis) variance = C.minus(variance_mean, C.square(shifted_mean)) mean = C.plus(shifted_mean, shift)", "axis=3) return output elif data_format == 'channels_last': output = repeat_elements(x,", "and x.shape[0] == C.InferredDimension: dims -= 1 bias_dims = len(bias.shape)", "ones_like(x, dtype=None, name=None): return zeros_like(x) + 1 def count_params(x): for", "the recurrent layer work. # need to be fixed in", "value.value # we don't support init parameter with symbolic op,", "op, so eval it first as # workaround if isinstance(value,", "' 'is not found in inputs. Please double ' 'check", "= np.random.randint(1, 10e6) if dtype is None: dtype = np.float32", "return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name) def ones(shape, dtype=None, name=None): if", "# TODO: remove the conversion when cntk supports int32, int64", "convolution' x = C.convolution( kernel, x, strides=dilation_rate[0], auto_padding=[ False, padding,", "in zip(new_states, place_holders): n_s.append(o.replace_placeholders({p: o.output})) if len(n_s) > 0: new_output", "this node. \"\"\" def __init__(self, input, batch_size, name='convert_to_static'): super(ConvertToStatic, self).__init__([input],", "cntk learner will apply # the gradient during training. global", "== 1: mean = _reshape_dummy_dim(mean, [0]) if ndim(var) == ndim(x)", ".common import floatx, epsilon, image_dim_ordering, image_data_format from collections import defaultdict", "batch_axis = C.Axis.default_batch_axis() return [ C.output_variable( self.inputs[0].shape[1:], self.inputs[0].dtype, [batch_axis])] def", "output, prev_output) return_states = [] for s, n_s in zip(states,", "gamma is None: gamma = ones_like(var) elif ndim(gamma) == ndim(x)", "(parameter/constant) name: name of this node \"\"\" def __init__(self, input,", "= reverse(rnn_inputs, 1) rnn_inputs = C.to_sequence(rnn_inputs) rnn_constants = [] for", "None and num_time_step is not C.FreeDimension: final_output = _reshape_sequence(final_output, num_time_step)", "return normalized, mean, variant def _moments(x, axes=None, shift=None, keep_dims=False): _axes", "i, i - 1) i -= 1 result = C.times(x,", "return C.not_equal(x, y) def greater(x, y): return C.greater(x, y) def", "= _reduce_on_axis(x, axis, 'reduce_max') return _remove_dims(output, axis, keepdims) def min(x,", "return C.greater_equal(x, y) def less(x, y): return C.less(x, y) def", "C.cntk_py.Value(arguments.data()) def backward(self, state, root_gradients): return C.cntk_py.Value(root_gradients.data()) class ConvertToStatic(C.ops.functions.UserFunction): \"\"\"Converts", "' 'check the model and inputs.' % argument.name) # Some", "_normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_sum') return _remove_dims(output, axis,", "__init__(self, input, batch_size, name='convert_to_static'): super(ConvertToStatic, self).__init__([input], as_numpy=False, name=name) self.target_shape =", "slice_row, slice_col, :], (-1, 1, feature_dim))) x_aggregate = concatenate(xs, axis=1)", "cols, input_depth) x = C.transpose(x, (2, 0, 1)) return x", "for u in u_ops]) grads = update_func.find_all_with_name('keras_grad_placeholder') u_list = []", "axis = _normalize_axis(axis, x) return C.transpose(x, axis) def resize_images(x, height_factor,", "elif data_format == 'channels_last': output = repeat_elements(x, height_factor, axis=1) output", "depthwise_kernel.shape[2:]) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides", "cntk op cntk_axis = [] dynamic_axis_index = 0 for i", "> 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]: raise ValueError('CNTK backend: the", "if isinstance(x, C.cntk_py.Function): return x.eval() elif isinstance(x, C.variables.Constant) or isinstance(x,", "= list(axis) elif isinstance(axis, int): _axis = [axis] elif isinstance(axis,", "C.reshape(x, (-1,)) x._keras_shape = (None, dim) return x def softmax(x,", "the model. ' 'Please double check how the gradient node", "data_format:', data_format) def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): if data_format", "2, 0, 1)) return kernel def _preprocess_border_mode(padding): if padding ==", "' 'Please provide fixed dimension ' 'instead of `None`.') return", "permutation = [len(y_shape) - 2] permutation += list(range(len(y_shape) - 2))", "else: # scale preds so that the class probas of", "= C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]]) else: x = _padding(x,", "axes = [axes] cntk_axes = _normalize_axis(axes, x) begin_index = [0", "and (_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE == 1)): _, output_values", "1, 1), padding='valid', data_format=None): if data_format is None: data_format =", "ones(shape, dtype=None, name=None): if dtype is None: dtype = floatx()", "return x * 0 def ones_like(x, dtype=None, name=None): return zeros_like(x)", "result = C.times(x, y, output_rank=(len(y.shape) - 1) if len(y.shape) >", "constant as place holder, the cntk learner will apply #", "on ' '`%s` type is not supported. ' 'CNTK only", "' 'double check the keras shape history.' % (str(shape), nones))", "a in x.dynamic_axes] shape = tuple(dynamic_shape) + shape return shape", "support gradient as symbolic op, to hook up with keras", "instance (with Keras metadata included). \"\"\" if dtype is None:", "class Function(object): def __init__(self, inputs, outputs, updates=[], **kwargs): self.placeholders =", "return C.times(x, y) def batch_dot(x, y, axes=None): x_shape = int_shape(x)", "or n is C.FreeDimension: return x index = 1 -", "+ '`. ' 'Expected a symbolic tensor instance.') return hasattr(x,", "if seed is None: seed = np.random.randint(1, 10e6) if dtype", "'static length for your sequences.') rnn_inputs = inputs if need_convert:", "placeholder has been resolved ' 'to shape `%s`, but input", "if isinstance(x, C.variables.Parameter): return C.reshape(x, shape) else: num_dynamic_axis = _get_dynamic_axis_num(x)", "else: # in current version cntk can't support input with", "as place holder, the cntk learner will apply # the", "don't support init parameter with symbolic op, so eval it", "def ones_like(x, dtype=None, name=None): return zeros_like(x) + 1 def count_params(x):", "pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]: raise ValueError('CNTK backend: the permute pattern %s", "keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_max')", "need to padding the shape if num_dynamic_axis >= len(shape): i", "' 'variable-length sequences. Please specify a ' 'static length for", "in self.metrics_func.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else:", "= C.one_hot(targets, predictions.shape[-1]) result = C.classification_error(predictions, _targets, topN=k) return 1", "# we need this check. if (self.unrelated_updates is None and", "1 else 1) if len(y_shape) == 2: result = squeeze(result,", "if _ == C.FreeDimension else _ for _ in new_shape])", "gamma, epsilon=1e-3): # The mean / var / beta /", "'unpack_batch') and _get_cntk_version() >= 2.2: const_a = C.unpack_batch(x) const_a =", "_ in new_shape] return C.reshape(x, new_shape) def permute_dimensions(x, pattern): dims", "shape: if _ is None: raise ValueError('CNTK Backend: randomness op", "applied during \"eval\" in cntk. # They only evaluated in", "' + str(pool_mode)) return _postprocess_conv3d_output(x, data_format) def relu(x, alpha=0., max_value=None):", "not None: tmp = [x] * rep x = C.splice(*tmp,", "[] @contextmanager def name_scope(name): global NAME_SCOPE_STACK NAME_SCOPE_STACK.append(name) yield NAME_SCOPE_STACK.pop() def", "new_c = [] for c in constant: if _get_dynamic_axis_num(c) ==", "= shape[nones:] new_shape = tuple( [C.InferredDimension if _ is None", "not None: x = C.clip(x, 0.0, max_value) if alpha !=", "tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in", "output_shape[0] shape[2] = output_shape[1] output_shape = tuple(shape) x = C.convolution_transpose(", "def __init__(self, input, name='convert_to_batch'): super(ConvertToBatch, self).__init__([input], as_numpy=False, name=name) def infer_outputs(self):", "get_num_dynamic_axis(placeholder) input_shape = input.shape[num_dynamic:] placeholder_shape = placeholder.shape for i, p", "np.prod(int_shape(x)) def cast(x, dtype): # cntk calculate everything in float,", "next release if _get_cntk_version() >= 2.2: return C.ops.gather(reference, indices) else:", "num_classes) return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1) def _remove_dims(x, axis,", "has batch axis batch_size: size of batch axis. name: name", "backend warning: CNTK version not detected. ' 'Will using CNTK", "keepdims=False): reduce_result = sum(x, axis, keepdims=keepdims) any_matrix = C.element_select( reduce_result,", "from __future__ import division from __future__ import print_function import cntk", "y) def less(x, y): return C.less(x, y) def less_equal(x, y):", "normalized_axis[0] while i < len(x.shape) - 1: x = C.swapaxes(x,", "beta = C.reduce_mean(beta, axis - 1) else: target_shape.append(x_shape[axis]) broadcast_mean =", "False: value = np.asarray(value) if isinstance(x, C.variables.Parameter): x.value = value", "assign ops argument %s ' 'is not found in inputs.", "may have # perf issue, will resolve it later with", "cast here if (hasattr(value, 'dtype') and value.dtype != np.float32 and", "C.convolution( kernel, x, strides, auto_padding=[ False, padding, padding]) else: assert", "here we use # element_select approach as workaround. It may", "beta is None: if gamma is None: beta = zeros_like(x)", "len(shape)) if len(_axis) == 0: return x nones = _get_dynamic_axis_num(x)", "and shape(gamma)[0] == 1: gamma = _reshape_dummy_dim(gamma, [0]) if beta", "axis): shape = list(x.shape) _axis = [_ + len(shape) if", "0 or 1, return dynamic learning phase tensor return _LEARNING_PHASE", "value, C.variables.Parameter): value = value.value # we don't support init", "depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): raise NotImplementedError def separable_conv2d(x,", "(list, tuple)) for a in axes]): raise ValueError('Multiple target dimensions", "we will go with dynamic learning phase tensor. _LEARNING_PHASE =", "False: x = x() if callable(alt) and isinstance(alt, C.cntk_py.Function) is", "cntk can't support input with variable # length. Will support", "noise_shape=None, seed=None): if level < 0. or level >= 1:", "= C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:])", "padding[1], 1) x = _padding(x, padding[2], 2) else: assert len(base_shape)", "len(padding[1]) == 2 if data_format is None: data_format = image_data_format()", "so don't need reshape on it reduce_axes = [] for", "_get_cntk_version(): version = C.__version__ if version.endswith('+'): version = version[:-1] #", "data_format): if data_format == 'channels_last': x = C.transpose(x, (1, 2,", "random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): if dtype is None:", "alt, training=None): return in_train_phase(alt, x, training=training) def _convert_string_dtype(dtype): # cntk", "axis=2) output = repeat_elements(output, height_factor, axis=3) output = repeat_elements(output, width_factor,", "states def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None):", "grad placeholder to parameter grad_parameter_dict = {} NAME_SCOPE_STACK = []", "axis: if isinstance(_, int): _axis.append(_ if _ >= 0 else", "!= current_layout[:num_dynamic_axis]: raise ValueError('CNTK backend: the permute pattern %s '", "tuple(new_shape) x = C.reshape(x, new_shape) temp = [x] * n", "metric in trainer, for metrics more # than 2, need", "C.swapaxes(x, 0, 1) def gather(reference, indices): # There is a", "in str(dtype) else dtype v = C.parameter(shape=shape, init=value, dtype=dtype, name=_prepare_name(name,", "skip the batch axis for axis in range(1, ndim(x)): if", "= C.square(x - m) return mean(devs_squared, axis=axis, keepdims=keepdims) def std(x,", "= 1 - _get_dynamic_axis_num(x) if index < 0 or index", "x = getattr(C.sequence, reduce_fun_name)(x, a) else: x = getattr(C, reduce_fun_name)(x,", "= [f.output for f in outputs] self.metrics_func = C.combine(self.metrics_outputs) #", "cntk as C import numpy as np from .common import", "def _reshape_dummy_dim(x, axis): shape = list(x.shape) _axis = [_ +", "prod(x, axis, keepdims=keepdims) all_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if", "independently of `data_format`. # CNTK expects `(depth, input_depth, rows, cols)`.", "n if len(n) != len(shape): raise NotImplementedError i = num_dynamic_axis", "CNTK backend does not support ' 'collapse of batch axis", "C.MAX_POOLING, pool_size, strides, auto_padding=[padding]) elif pool_mode == 'avg': x =", "handle loss and 1 metric in trainer, for metrics more", "output = C.clip(output, epsilon(), 1.0 - epsilon()) output = -target", "axis=2) output = repeat_elements(output, width_factor, axis=3) return output elif data_format", "fully optimized,' 'please run with GPU to get better performance.')", "def _reshape_sequence(x, time_step): tmp_shape = list(int_shape(x)) tmp_shape[1] = time_step return", "== ndim(x) and shape(gamma)[0] == 1: gamma = _reshape_dummy_dim(gamma, [0])", "{'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if padding", "less_equal(x, y): return C.less_equal(x, y) def maximum(x, y): return C.element_max(x,", "[ C.output_variable( self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def forward(self, argument, device=None, outputs_to_retain=None):", "of condition should be less' ' than or equal to", "x = _padding(x, padding[1], 2) return x def spatial_3d_padding(x, padding=((1,", "optimized,' 'please run with GPU to get better performance.') #", "_ in shape: if _ is None: raise ValueError('CNTK Backend:", "the first one. if len(version) > 2 and version[1] ==", "keepdims=True) devs_squared = C.square(x - m) return mean(devs_squared, axis=axis, keepdims=keepdims)", "while i < len(cntk_axis): cntk_axis[i] -= nones i += 1", "def switch(condition, then_expression, else_expression): ndim_cond = ndim(condition) ndim_expr = ndim(then_expression)", "dtype = np.float32 else: dtype = _convert_string_dtype(dtype) if name is", "if _get_dynamic_axis_num(constant) == 1: rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs)) else: rnn_constants.append(constant) else: rnn_constants", "_postprocess_conv3d_output(x, data_format) def relu(x, alpha=0., max_value=None): if alpha != 0.:", "is False: x = x() if callable(alt) and isinstance(alt, C.cntk_py.Function)", "C.sigmoid(output) output = C.clip(output, epsilon(), 1.0 - epsilon()) output =", "else: return C.times(x, y) def batch_dot(x, y, axes=None): x_shape =", "2 and version[1] == '.': version = version[:2] + version[2:].replace('.',", "1, bias.shape[0]) else: shape = bias.shape elif dims == 2:", "filters = kernel_shape xs = [] for i in range(output_row):", "or %d dimensions' % (bias_dims, dims)) if dims == 4:", "CNTK will do unroll by default if shape[1] is None:", "else: if len(y_shape) == 2: y = expand_dims(y) normalized_axis =", "== 'channels_last': # shape: batch, row, col, filters output =", "mean, scale, dtype=None, name=None, seed=None): if dtype is None: dtype", "raise ValueError('CNTK Backend: Invalid data_format:', data_format) def resize_volumes(x, depth_factor, height_factor,", "version = version[:-1] # for hot fix, ignore all the", "floatx() if not shape: if ndim: shape = tuple([None for", "C.to_batch(const_a) else: return C.user_function(ReshapeBatch(x, shape[1:])) def _get_cntk_version(): version = C.__version__", "stride_col + kernel_size[1]) if data_format == 'channels_first': xs.append(reshape(inputs[:, :, slice_row,", "== dilation_rate[1] assert strides == (1, 1), 'Invalid strides for", "= constants if mask is not None and not has_seq_axis(mask):", "return _postprocess_conv2d_output(x, data_format) def conv3d(x, kernel, strides=(1, 1, 1), padding='valid',", "i = normalized_axis[1] while i > 0: y = C.swapaxes(y,", "of the tensor. dtype: Tensor type. name: Optional name string", "stride = strides[0] kernel_shape = int_shape(kernel) output_length, feature_dim, filters =", "first as # workaround if isinstance(value, C.cntk_py.Function): value = eval(value)", "1) new_shape = shape[nones:] new_shape = tuple( [C.InferredDimension if _", "output = sum(output, axis=3) # Shape: (batch, output_length, filters) return", "return C.pow(x, a) def clip(x, min_value, max_value): if max_value is", "transpose(y), axis=axes[0], keepdims=True) else: if len(y_shape) == 2: y =", "= any dev = C.device.use_default_device() if dev.type() == 0: warnings.warn(", "op cntk_axis = [] dynamic_axis_index = 0 for i in", "def abs(x): return C.abs(x) def sqrt(x): return C.sqrt(x) def exp(x):", "dynamic_axis_index < nones: cntk_axis[i] = x.dynamic_axes[dynamic_axis_index] i += 1 dynamic_axis_index", "= -np.inf return C.clip(x, min_value, max_value) def binary_crossentropy(target, output, from_logits=False):", "[axis] elif isinstance(axis, list): _axis = list(axis) else: _axis =", "'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0,", "= C.combine(self.metrics_outputs) else: self.metrics_func = None @staticmethod def _is_input_shape_compatible(input, placeholder):", "def expand_dims(x, axis=-1): shape = list(int_shape(x)) nones = _get_dynamic_axis_num(x) index", "in grads: if g in grad_parameter_dict: p_list.append(grad_parameter_dict[g]) u_list.append(g) else: raise", "int_shape(x) y_shape = int_shape(y) if isinstance(axes, int): axes = (axes,", "nones if nones > 0 else 1 if go_backwards: i", "\"\"\" return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder def is_keras_tensor(x): if not", "def _get_cntk_version(): version = C.__version__ if version.endswith('+'): version = version[:-1]", "x = _padding(x, padding[1], 2) x = _padding(x, padding[2], 3)", "'float32' elif dtype == np.float64: return 'float64' else: raise ValueError('CNTK", "n return C.splice(*temp, axis=index) def tanh(x): return C.tanh(x) def _static_rnn(step_function,", "= tuple(initial) with C.default_options(axis_offset=1): def _recurrence(x, states, m): # create", "0), padding, (0, 0)]) else: x = _padding(x, padding, 1)", "for s, n_s in zip(states, new_states): return_states.append( C.ops.element_select( mask_slice, n_s,", "1): strides = (1,) + strides x = C.convolution( kernel,", "C.clip(output, epsilon(), 1.0 - epsilon()) return -sum(target * C.log(output), axis=-1)", "4) else: if num_dynamic_axis > 0: assert len(base_shape) == 4", "False: raise NotImplementedError('CNTK Backend: `go_backwards` is not supported with '", "input of rnn has only rank %d ' 'Need at", "zeros_like(x) else: beta = zeros_like(gamma) mean, variant = _moments(x, _normalize_axis(reduction_axes,", "num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if data_format == 'channels_first':", "x, strides, auto_padding=[ False, padding, padding]) else: assert dilation_rate[0] ==", "def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None): if data_format is", "{'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) padding =", "C.clip(output, epsilon(), 1.0 - epsilon()) output = -target * C.log(output)", "return x def _preprocess_conv3d_input(x, data_format): if data_format == 'channels_last': #", "x): shape = int_shape(x) ndim = len(shape) nones = _get_dynamic_axis_num(x)", "return _postprocess_conv3d_output(x, data_format) def pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None,", "x) norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0])) return x / norm def", "(samples, rows, cols, input_depth) x = C.transpose(x, (2, 0, 1))", "placeholder): if hasattr(input, 'shape') and hasattr(placeholder, 'shape'): num_dynamic = get_num_dynamic_axis(placeholder)", "ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) kernel", "if ndim(mean) == ndim(x) and shape(mean)[0] == 1: mean =", "while keeping the dims for proper broadcasting. for axis in", "and len(shape) > 0 and shape[0] == -1: # collapse", "on dynamic axis, ' 'which is not supported. Please do", "is not None: x = C.clip(x, 0.0, max_value) if alpha", "argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError( 'CNTK", "'check the model and inputs.' % argument.name) self.unrelated_updates.eval(input_dict, as_numpy=False) return", "_normalize_axis(axis, x) output = C.ops.argmin(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def", "_ in new_shape]) result = C.reshape(x, new_shape) if index <", "infer_outputs(self): return [ C.output_variable( self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def forward(self, argument,", "x) output = _reduce_on_axis(x, axis, 'reduce_sum') return _remove_dims(output, axis, keepdims)", "list): n = tuple(n) shape = int_shape(x) num_dynamic_axis = _get_dynamic_axis_num(x)", "n): if isinstance(n, int): n = (n,) elif isinstance(n, list):", "length. Will support it in next release. if not self._is_input_shape_compatible(value,", "for f in criterion]) elif len(u_ops) > 0: unrelated_updates.extend(u_ops) if", "(0, 0)]) else: x = _padding(x, padding, 1) return x", "= _normalize_axis(axis, tensors[0]) return C.splice(*tensors, axis=axis[0]) def flatten(x): return reshape(x,", "C.transpose(x, (3, 0, 1, 2)) return x def _preprocess_conv3d_kernel(kernel, dim_ordering):", "2, 1)) def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None): if", "class ConvertToStatic(C.ops.functions.UserFunction): \"\"\"Converts input first axis to CNTK static axis.", "use numpy workaround now if seed is None: # ensure", "dtype=None, name=None, constraint=None): \"\"\"Instantiates a variable and returns it. #", "isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter): return x.value else: raise ValueError('CNTK", "# no collapse, then first need to padding the shape", "transpose(result) else: return sum(x * transpose(y), axis=axes[0], keepdims=True) else: if", "v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g]", "# Arguments inputs: a cntk variable (parameter/constant) name: name of", "3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[(0, 0), padding,", "and max_value < min_value: max_value = min_value if max_value is", "= 'valid' if data_format == 'channels_last': x = C.swapaxes(x, 0,", "ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not", "'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1, 1,", "alt else: result = C.element_select(training, x, alt) result._uses_learning_phase = uses_learning_phase", "a in axis: if isinstance(a, C.Axis) is False: reduce_axes.append(a) return", "False if dims < 3: raise ValueError('CNTK Backend: the input", "= strides output_row, output_col = output_shape kernel_shape = int_shape(kernel) _,", "outputs_to_retain=None): if self.when(argument): self.execute(argument) return None, argument def backward(self, state,", "is not None and max_value < min_value: max_value = min_value", "shape: `%s` has ' '%d cntk dynamic axis, this is", "1 result = C.times(x, y, output_rank=(len(y.shape) - 1) if len(y.shape)", "[] for g in grads: if g in grad_parameter_dict: p_list.append(grad_parameter_dict[g])", "'CNTK backend: argument %s is not found in inputs. '", "output_values[o] v = value.asarray() updated.append(v) else: v = output_values.asarray() for", "least 3D.') # if the second axis is static axis,", "g) if len(u_list) > 0: learner = C.cntk_py.universal_learner(p_list, u_list, update_func)", "not None: input_dict = {} for argument in self.loss.arguments: if", "if isinstance(x, C.variables.Parameter): x.value = value else: raise NotImplementedError def", "in_train_phase(x, alt, training=None): global _LEARNING_PHASE if training is None: training", "output = _reduce_on_axis(x, axis, 'reduce_sum') return _remove_dims(output, axis, keepdims) def", "= inputs if need_convert: if go_backwards: rnn_inputs = reverse(rnn_inputs, 1)", "list): # sequence axis is removed by default, so don't", "def _padding(x, pattern, axis): base_shape = x.shape if b_any([dim <", "dynamic_axis_index = 0 for i in range(ndim): if shape[i] is", "auto_padding=[False]) else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend: non-square", "def moving_average_update(variable, value, momentum): return C.assign(variable, variable * momentum +", "= sum(x * y, axis=axes[0], keepdims=True) return result if axes[0]", "axis=2) output = repeat_elements(output, width_factor, axis=3) return output else: raise", "optimizer update. # Returns A variable instance (with Keras metadata", "# But the assign ops won't be executed under this", "dtype = np.float32 else: dtype = _convert_string_dtype(dtype) return C.parameter( shape,", "in outputs] self.metrics_func = C.combine(self.metrics_outputs) # cntk only could handle", "not None: input_dict = {} for argument in self.metrics_func.arguments: if", "`%s` has ' '%d cntk dynamic axis, this is not", "0, 1, 2)) return kernel def _postprocess_conv3d_output(x, dim_ordering): if dim_ordering", "if callable(x) and isinstance(x, C.cntk_py.Function) is False: x = x()", "+ str(pool_mode)) return _postprocess_conv2d_output(x, data_format) def pool3d(x, pool_size, strides=(1, 1,", "axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_mean') return", "'on static axis.' % pattern) axis = list(pattern) axis =", "* (1. - momentum)) def update_add(x, increment): result = x", "as_numpy=False) else: output_values = self.metrics_func.eval(input_dict, as_numpy=False) if isinstance(output_values, dict): for", "run the model return np.float32 def _convert_dtype_string(dtype): if dtype ==", "C.one_hot(indices, num_classes) def get_value(x): if isinstance( x, C.variables.Parameter) or isinstance(", "y): return C.less(x, y) def less_equal(x, y): return C.less_equal(x, y)", "remove dummy dimension current = squeeze(current, time_axis) output, new_states =", "1 else: break shape = tuple([-1 for _ in range(num_dynamic_axis", "pool_size, strides, auto_padding=[padding]) elif pool_mode == 'avg': x = C.pooling(", "range(ndim_diff): condition = expand_dims(condition) condition = tile(condition, shape_expr[ndim_cond + i])", "x = t[0] value = t[1] if isinstance(value, np.ndarray) is", "may be processed by broadcast # so it may have", "'variable')) v._keras_shape = v.shape v._uses_learning_phase = False v.constraint = constraint", "tricky after we add support # in native cntk op", "return _postprocess_conv2d_output(x, data_format) def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid',", "x = C.transpose(x, (1, 2, 3, 0)) return x def", "conv_dim1, conv_dim2, conv_dim3) # TF input shape: (samples, conv_dim1, conv_dim2,", "return x def _preprocess_conv3d_kernel(kernel, dim_ordering): kernel = C.transpose(kernel, (4, 3,", "[] time_axis = 1 - nones if nones > 0", "if dynamic_axis_index < nones: i = 0 while dynamic_axis_index <", "non_dyn_shape = [] for i in range(len(x.shape)): if shape[i +", "is None and not has_seq_axis(inputs): num_time_step = inputs.shape[0] initial =", "in CNTK 2.1 release. # Will update with gather op", "_normalize_axis(axis, x) output = C.ops.argmax(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def", "self.from_shape)) class ConvertToBatch(C.ops.functions.UserFunction): \"\"\"Converts input first axis to CNTK batch", "calculate on float, do auto cast here if (hasattr(value, 'dtype')", "cntk_axis.append(i - dynamic_axis_index) if dynamic_axis_index < nones: i = 0", "# so it may have an extra batch axis with", "else: shape = bias.shape return x + reshape(bias, shape) def", "== np.float32: return 'float32' elif dtype == np.float64: return 'float64'", "[] for x in xs: if (isinstance(x, C.variables.Parameter) or isinstance(x,", "holder, the cntk learner will apply # the gradient during", "None and dynamic_axis_index < nones: cntk_axis.append(x.dynamic_axes[dynamic_axis_index]) dynamic_axis_index += 1 else:", "everything in float, so don't need case from bool /", "x = _padding(x, padding[1], 1) else: assert len(base_shape) == 4", "# if the second axis is static axis, CNTK will", "def reverse(x, axes): if isinstance(axes, int): axes = [axes] cntk_axes", "- 1) if len(y.shape) > 1 else 1) if len(y_shape)", "else: gamma = ones_like(beta) if beta is None: if gamma", "= C.pooling( x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding]) elif pool_mode ==", "result = sum(x * y, axis=axes[0], keepdims=True) return result if", "list(padding[2])]) else: x = _padding(x, padding[0], 1) x = _padding(x,", "C.pad(x, pattern=[padding, (0, 0)]) else: x = _padding(x, padding, 0)", "feature_dim, filters = kernel_shape xs = [] for i in", "a bug in cntk 2.1's unpack_batch implementation if hasattr(C, 'unpack_batch')", "1), (1, 1)), data_format=None): assert len(padding) == 3 assert len(padding[0])", "is not expected, please ' 'double check the keras shape", "data_format): # As of Keras 2.0.0, all kernels are normalized", "logsumexp(x, axis=None, keepdims=False): return log(sum(exp(x), axis=axis, keepdims=keepdims)) def var(x, axis=None,", "= _reduce_on_axis(x, axis, 'reduce_prod') return _remove_dims(output, axis, keepdims) def logsumexp(x,", "if len(n) != len(shape): raise NotImplementedError i = num_dynamic_axis for", "+ str(data_format)) stride = strides[0] kernel_shape = int_shape(kernel) output_length, feature_dim,", "> 0: learner = C.cntk_py.universal_learner(p_list, u_list, update_func) criterion = (", "method on ' '`%s` type is not supported. ' 'CNTK", "y_shape = int_shape(y) if len(y_shape) > 2: permutation = [len(y_shape)", "+ tuple(rnn_constants)) if getattr(new_output, '_uses_learning_phase', False): global uses_learning_phase uses_learning_phase =", "only support float32 and float64 if dtype == 'float32': return", "x = C.swapaxes(x, i, i + 1) i += 1", "is None and (_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE == 1)):", "raise ValueError('CNTK backend: creating placeholder with ' '%d dimension is", "outputs[0], outputs[1]) if len(outputs) > 1 else ( outputs[0], )", "x = _padding(x, padding[0], 0) x = _padding(x, padding[1], 1)", "is not None: tmp = [x] * rep x =", "== 0: u_ops.append(u) else: unrelated_updates.append(u) update_func = C.combine([u.output for u", "return C.parameter( shape, init=C.initializer.truncated_normal( stddev, seed=seed), dtype=dtype) def dtype(x): return", "`None`.') # how to apply mean and stddev return random_normal_variable(shape=shape,", "cntk's result shape is (batch, 1), while keras expect (batch,", "if ndim(var) == ndim(x) and shape(var)[0] == 1: var =", "== 1: shape = (1, 1, bias.shape[0]) else: shape =", "during \"eval\" in cntk. # They only evaluated in training", "dtype is None: dtype = floatx() return variable(np.eye(size), dtype, name)", "or equal to rank of then and' ' else expressions.", "pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is", "2 assert len(padding[1]) == 2 assert len(padding[2]) == 2 if", "1) beta = C.reduce_mean(beta, axis - 1) else: target_shape.append(x_shape[axis]) broadcast_mean", "tuple(past_values) + tuple(rnn_constants)) if getattr(new_output, '_uses_learning_phase', False): global uses_learning_phase uses_learning_phase", "expand_dims(condition) condition = tile(condition, shape_expr[ndim_cond + i]) return C.element_select(condition, then_expression,", "kernel = C.swapaxes(kernel, 0, 2) padding = _preprocess_border_mode(padding) strides =", "axis=-1): return C.softmax(x, axis=axis) def softplus(x): return C.softplus(x) def softsign(x):", "base_shape = x.shape if data_format == 'channels_first': if num_dynamic_axis >", "result = arguments.data().as_shape((num_batch,) + self.target_shape) return None, C.cntk_py.Value(result) def backward(self,", "= C.sigmoid(output) output = C.clip(output, epsilon(), 1.0 - epsilon()) output", "gradients(loss, variables): # cntk does not support gradients as symbolic", "if hasattr(C, 'pad'): x = C.pad(x, pattern=[(0, 0), padding, (0,", "gamma may be processed by broadcast # so it may", "native implementation later. # Arguments inputs: a cntk tensor which", "rnn ' 'has shape `%s`, the second axis ' 'is", "_reshape_dummy_dim(x, axis): shape = list(x.shape) _axis = [_ + len(shape)", "def placeholder( shape=None, ndim=None, dtype=None, sparse=False, name=None, dynamic_axis_num=1): if dtype", "def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): for _ in shape:", "strides=(1, 1, 1), auto_padding=[False]) return _postprocess_conv2d_output(x, data_format) def depthwise_conv2d(x, depthwise_kernel,", "1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format", "name=None): return x * 0 def ones_like(x, dtype=None, name=None): return", "shape(gamma)[0] == 1: gamma = _reshape_dummy_dim(gamma, [0]) if beta is", "False return const def random_binomial(shape, p=0.0, dtype=None, seed=None): # use", "0: return None axis = [axis] axis = _normalize_axis(axis, tensors[0])", "has_seq_axis(inputs) if go_backwards and need_convert is False: raise NotImplementedError('CNTK Backend:", "dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format() if", "x, mean, variant, beta, gamma, epsilon) else: # need broadcasting", "image_data_format from collections import defaultdict from contextlib import contextmanager import", "dtype is None: dtype = floatx() for _ in shape:", "def int_shape(x): if hasattr(x, '_keras_shape'): return x._keras_shape shape = x.shape", "global _LEARNING_PHASE if training is None: training = learning_phase() uses_learning_phase", "max_value < min_value: max_value = min_value if max_value is None:", "execute=lambda arg: print(arg), name=''): self.when = when self.execute = execute", "= list(pattern) axis = axis[num_dynamic_axis:] axis = _normalize_axis(axis, x) return", "broadcast_mean = C.reshape(mean, target_shape) broadcast_var = C.reshape(variant, target_shape) broadcast_gamma =", "Backend: the input of rnn has only rank %d '", "def print_tensor(x, message=''): return C.user_function( LambdaFunc(x, when=lambda x: True, execute=lambda", "elif ndim_cond < ndim_expr: shape_expr = int_shape(then_expression) ndim_diff = ndim_expr", "broadcast weight = permute_dimensions(kernel, (2, 0, 1)) # Shape: (batch,", "u_list = [] p_list = [] for g in grads:", "= C.transpose(x, (3, 0, 1, 2)) return x def _preprocess_conv3d_kernel(kernel,", "axis = axis[num_dynamic_axis:] axis = _normalize_axis(axis, x) return C.transpose(x, axis)", "0 else 1 if go_backwards: i = shape[1] - 1", "return C.ops.reduce_mean( C.equal( argmax( output, axis=-1), argmax( target, axis=-1)), axis=C.Axis.all_axes())", "_ is None else _ for _ in new_shape]) result", "'Please double check how the gradient node ' 'is constructed.'", "kernels are normalized # on the format `(rows, cols, input_depth,", "_get_dynamic_axis_num(x) for _ in sorted(_axis, reverse=True): del shape[_] new_shape =", "= int_shape(then_expression) ndim_diff = ndim_expr - ndim_cond for i in", "= [axis] axis = _normalize_axis(axis, x) norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0]))", "filters) return permute_dimensions(output, (0, 2, 1)) def local_conv2d(inputs, kernel, kernel_size,", "(3, 2, 0, 1)) return kernel def _preprocess_border_mode(padding): if padding", "is None: shape = () np_value = value * np.ones(shape)", "mode: ' + str(pool_mode)) return _postprocess_conv3d_output(x, data_format) def relu(x, alpha=0.,", "if num_dynamic_axis == 1 and len(shape) > 0 and shape[0]", "for _ in cntk_axes] return C.slice(x, cntk_axes, begin_index, end_index, strides)", "= [axis] axis = _normalize_axis(axis, x) output = C.ops.argmin(x, axis=axis[0])", "has_seq = False for a in axis: if isinstance(a, C.Axis):", "in base_shape]): raise ValueError('CNTK Backend: padding input tensor with '", "= np.float32 else: dtype = _convert_string_dtype(dtype) size = 1 for", "'channels_last': if bias_dims == 1: shape = (1, bias.shape[0]) else:", "strides) def _reshape_batch(x, shape): # there is a bug in", "the permute pattern %s ' 'requested permute on dynamic axis,", "shape = x.shape i = 0 while i < shape[axis]:", "shape[1:])) def _get_cntk_version(): version = C.__version__ if version.endswith('+'): version =", "batch axis for axis in range(1, ndim(x)): if axis in", "device=None, outputs_to_retain=None): return None, C.cntk_py.Value(arguments.data()) def backward(self, state, root_gradients): return", "= axis if isinstance(_axis, list): for i, a in enumerate(_axis):", "kernel_size) output = x_aggregate * weight # Shape: (batch, filters,", "keras shape: `%s` has ' '%d cntk dynamic axis, this", "x def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): assert len(padding)", "= kernel_shape xs = [] for i in range(output_length): slice_length", "will do unroll by default if shape[1] is None: raise", "const._uses_learning_phase = False return const def random_binomial(shape, p=0.0, dtype=None, seed=None):", "name=name) return variable(value=p.value + low + scale) def random_normal_variable( shape,", "# instead of the 2nd one. # TH input shape:", "axes = (axes, axes) if axes is None: # behaves", "else _ for _ in new_shape]) result = C.reshape(x, new_shape)", "hard_sigmoid(x): x = (0.2 * x) + 0.5 x =", "alpha=1.): res = C.elu(x) if alpha == 1: return res", "so it may have an extra batch axis with 1,", "tuple([-1 for _ in range(num_dynamic_axis - i)]) + shape new_shape", "rows, cols)`. kernel = C.transpose(kernel, (3, 2, 0, 1)) return", "default if shape[1] is None: raise ValueError('CNTK Backend: the input", "stddev=1.0, dtype=None, seed=None): if dtype is None: dtype = floatx()", "shape): # there is a bug in cntk 2.1's unpack_batch", "cast to float to run the model return np.float32 def", "= C.swapaxes(kernel, 0, 2) padding = _preprocess_border_mode(padding) strides = [strides]", "C.variables.Constant)): result.append(x.value) else: result.append(eval(x)) return result def set_value(x, value): if", "num_batch = int(num_element / num_static_element) result = arguments.data().as_shape((num_batch,) + self.target_shape)", "dict): for o in self.metrics_outputs: value = output_values[o] v =", "return x / (1 + C.abs(x)) def categorical_crossentropy(target, output, from_logits=False):", "m = mean(x, axis, keepdims=True) devs_squared = C.square(x - m)", "current = squeeze(current, 1) output, new_states = step_function( current, tuple(states)", "def any(x, axis=None, keepdims=False): reduce_result = sum(x, axis, keepdims=keepdims) any_matrix", "return C.assign(x, new_x) def moving_average_update(variable, value, momentum): return C.assign(variable, variable", "# workaround if isinstance(value, C.cntk_py.Function): value = eval(value) shape =", "def hard_sigmoid(x): x = (0.2 * x) + 0.5 x", "dtype=dtype, name=name) def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if dtype", "- 1) i -= 1 result = C.times(x, y, output_rank=(len(y.shape)", "x.shape) or b_any( _ == C.FreeDimension for _ in x.shape):", "target_shape) broadcast_gamma = C.reshape(gamma, target_shape) broadcast_beta = C.reshape(beta, target_shape) normalized", "default axes = [len(x_shape) - 1, len(y_shape) - 2] if", "_remove_dims(output, axis, keepdims) def any(x, axis=None, keepdims=False): reduce_result = sum(x,", "np.random.randint(10e3) if dtype is None: dtype = np.float32 else: dtype", "if dtype is None: dtype = floatx() ctype = _convert_string_dtype(dtype)", "s in final_states] if need_convert: final_output = C.sequence.unpack(final_output, 0, no_mask_output=True)", "padding[1], 3) x = _padding(x, padding[2], 4) else: if num_dynamic_axis", "return permute_dimensions(output, (0, 2, 1)) def local_conv2d(inputs, kernel, kernel_size, strides,", "shift)) for axis in _axes: variance_mean = C.reduce_mean(variance_mean, axis=axis) variance", "return np.prod(int_shape(x)) def cast(x, dtype): # cntk calculate everything in", "scale = (high - low) / 2 p = C.parameter(", "else: num_classes = reference.shape[0] one_hot_matrix = C.ops.one_hot(indices, num_classes) return C.times(one_hot_matrix,", "'dimension to enable padding.' % base_shape) if pattern[0] > 0:", "None: shape = () np_value = value * np.ones(shape) const", "+ beta def concatenate(tensors, axis=-1): if len(tensors) == 0: return", "= C.Axis.default_batch_axis() return [ C.output_variable( self.inputs[0].shape[1:], self.inputs[0].dtype, [batch_axis])] def forward(self,", "output = permute_dimensions(output, (0, 2, 3, 1)) return output def", "is C.FreeDimension: return x index = 1 - _get_dynamic_axis_num(x) if", "input_depth, rows, cols) # TF input shape: (samples, rows, cols,", "unpack_batch implementation if hasattr(C, 'unpack_batch') and _get_cntk_version() >= 2.2: const_a", "x else: # if _LEARNING_PHASE is static if isinstance(training, int)", "isinstance(a, C.Axis) is False: reduce_axes.append(a) return _reshape_dummy_dim(x, reduce_axes) else: if", "_reduce_on_axis(x, axis, 'reduce_prod') return _remove_dims(output, axis, keepdims) def logsumexp(x, axis=None,", "ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) depthwise_kernel", "0: value = value.astype(dtype) # TODO: remove the conversion when", "ndim(x): shape = int_shape(x) return len(shape) def _prepare_name(name, default): prefix", "is_sparse(tensor): return tensor.is_sparse def int_shape(x): if hasattr(x, '_keras_shape'): return x._keras_shape", "x def _reshape_sequence(x, time_step): tmp_shape = list(int_shape(x)) tmp_shape[1] = time_step", "_ in range(len(shape) - len(n))]) + n if len(n) !=", "f in criterion]) elif len(u_ops) > 0: unrelated_updates.extend(u_ops) if len(unrelated_updates)", "_LEARNING_PHASE = -1 _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0) def in_train_phase(x, alt, training=None):", "if _ >= 0 else _ + len(shape)) if len(_axis)", "backward(self, state, root_gradients): grad_array_view = root_gradients.data() num_element = root_gradients.shape()[0] *", "if bias_dims != 1 and bias_dims != dims: raise ValueError('Unexpected", "bias_dims = len(bias.shape) if bias_dims != 1 and bias_dims !=", "shape = list(x.shape) _axis = [_ + len(shape) if _", "we add support # in native cntk op cntk_axis =", "2.2 else C.InferredDimension cntk_shape = [dynamic_dimension if s is None", "nones: cntk_axis.append(x.dynamic_axes[dynamic_axis_index]) dynamic_axis_index += 1 else: cntk_axis.append(i - dynamic_axis_index) if", "dynamic_shape = [None for a in x.dynamic_axes] shape = tuple(dynamic_shape)", "if not keep_dims: mean = squeeze(mean, _axes) variance = squeeze(variance,", "we can't figure out how to repeat it in cntk", "not C.FreeDimension: final_output = _reshape_sequence(final_output, num_time_step) f_stats = [] for", "to take cntk broadcast feature # to make the recurrent", "of each sample sum to 1 output /= C.reduce_sum(output, axis=-1)", "dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return", "= np.random.randint(10e3) if dtype is None: dtype = np.float32 else:", "' 'static length for your sequences.') rnn_inputs = inputs if", "grad_parameter_dict = {} NAME_SCOPE_STACK = [] @contextmanager def name_scope(name): global", "for a in axes]): raise ValueError('Multiple target dimensions are not", "- C.reshape(result, shape=()) def conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid',", "+= 1 last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, states def", "updates: if isinstance(update, tuple): if len(update) != 2: raise NotImplementedError", "[] for i in range(output_row): for j in range(output_col): slice_row", "random_uniform_variable(shape, minval, maxval, dtype, seed) def random_uniform_variable(shape, low, high, dtype=None,", "else: shape = (bias.shape[1],) + bias.shape[:1] elif data_format == 'channels_last':", "str(ndim_expr)) elif ndim_cond < ndim_expr: shape_expr = int_shape(then_expression) ndim_diff =", "* stride_col, j * stride_col + kernel_size[1]) if data_format ==", "+= 1 return C.splice(*slices, axis=axis) def repeat(x, n): # this", "= tuple([-1 for _ in range(num_dynamic_axis - i)]) + shape", "5 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]),", "return x index = 1 - _get_dynamic_axis_num(x) if index <", "learning phase flag for cntk backend. \"\"\" global _LEARNING_PHASE global", "if training is None: training = learning_phase() uses_learning_phase = True", "assert len(padding[2]) == 2 if data_format is None: data_format =", "return dynamic learning phase tensor return _LEARNING_PHASE if _LEARNING_PHASE in", "conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format", "1)) return output def reverse(x, axes): if isinstance(axes, int): axes", "= concatenate(xs, axis=1) # transpose kernel to output_filters first, to", "and value.dtype != dtype and len(shape) > 0: value =", "elu(x, alpha=1.): res = C.elu(x) if alpha == 1: return", "n, s in zip(new_states, past_values)] n_s = [] for o,", "final_output = C.splice(final_output, output_slice, axis=time_axis) last_output = outputs[i] i +=", "if isinstance(axis, tuple): _axis = list(axis) elif isinstance(axis, int): _axis", "else: # no collapse, then first need to padding the", "gather op in next release if _get_cntk_version() >= 2.2: return", "= (1, 1, bias.shape[0]) else: shape = bias.shape elif dims", "' 'instead of `None`.') size *= _ binomial = np.random.binomial(1,", "Function(object): def __init__(self, inputs, outputs, updates=[], **kwargs): self.placeholders = inputs", "data_format ' + str(data_format)) x = _preprocess_conv3d_input(x, data_format) kernel =", "= (bias.shape[1],) + bias.shape[:1] elif data_format == 'channels_last': if bias_dims", "shape): shape = tuple([C.InferredDimension if _ == C.FreeDimension else _", "uses_learning_phase = True else: uses_learning_phase = False # CNTK currently", "A learning phase is a bool tensor used to run", "when=lambda x: True, execute=lambda x: print(message))) def batch_set_value(tuples): for t", "__future__ import absolute_import from __future__ import division from __future__ import", "[C.element_select(m, n, s) for n, s in zip(new_states, past_values)] n_s", "TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, # input_depth) x", "(1, 2, 3, 0)) return x def _get_dynamic_axis_num(x): if hasattr(x,", "shape = int_shape(inputs) dims = len(shape) uses_learning_phase = False if", "return reshape(x, (-1,)) def reshape(x, shape): shape = tuple([C.InferredDimension if", "list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0], 2) x =", "supports int32, int64 # https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter dtype = 'float32' if 'int'", "value.astype(np.float32) if tensor == _LEARNING_PHASE_PLACEHOLDER: _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value) else: #", "padding = True elif padding == 'valid': padding = False", "+ epsilon) * gamma + beta def concatenate(tensors, axis=-1): if", "data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format()", "is None: if beta is None: gamma = ones_like(x) else:", "constant: if _get_dynamic_axis_num(c) == 1: new_c.append(C.sequence.broadcast_as(c, rnn_inputs)) else: new_c.append(c) rnn_constants.append(new_c)", "= repeat_elements(x, height_factor, axis=1) output = repeat_elements(output, width_factor, axis=2) return", "% (bias_dims, dims)) if dims == 4: if data_format ==", "+ ', ndim(then_expression)' '=' + str(ndim_expr)) elif ndim_cond < ndim_expr:", "_reshape_sequence(final_output, num_time_step) f_stats = [] for l_s, i_s in zip(last_states,", "padding == 'same': padding = True elif padding == 'valid':", "None: training = learning_phase() uses_learning_phase = True else: uses_learning_phase =", "x) axis = axis[0] slices = [] shape = x.shape", "else: unrelated_updates.append(u) update_func = C.combine([u.output for u in u_ops]) grads", "inputs. Please double ' 'check the model and inputs.' %", "shape: batch, filters, output_length, input_length * kernel_size output = x_aggregate", "np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape", "-1) return result def transpose(x): return C.swapaxes(x, 0, 1) def", "name='convert_to_batch'): super(ConvertToBatch, self).__init__([input], as_numpy=False, name=name) def infer_outputs(self): batch_axis = C.Axis.default_batch_axis()", "in zip(states, new_states): return_states.append( C.ops.element_select( mask_slice, n_s, s)) new_states =", "static if isinstance(training, int) or isinstance(training, bool): result = x", "= t[0] value = t[1] if isinstance(value, np.ndarray) is False:", "'with value %s is not supported, ' 'expected 0 or", "= C.sequence.unpack(final_output, 0, no_mask_output=True) if num_time_step is not None and", "> 0 and x.shape[0] == C.InferredDimension: dims -= 1 bias_dims", "xs.append(reshape(inputs[:, :, slice_row, slice_col], (-1, 1, feature_dim))) else: xs.append(reshape(inputs[:, slice_row,", "Set learning phase ' 'with value %s is not supported,", "'dtype') and value.dtype != dtype and len(shape) > 0: value", "input_shape = input.shape[num_dynamic:] placeholder_shape = placeholder.shape for i, p in", "C.variables.Parameter): value = value.value # we don't support init parameter", "random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if dtype is None: dtype", "gamma, beta, reduction_axes, epsilon=1e-3): if gamma is None: if beta", "const_a = C.reshape(const_a, shape) return C.to_batch(const_a) else: return C.user_function(ReshapeBatch(x, shape[1:]))", "def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): assert len(padding) ==", "mode: ' + str(padding)) return padding def _postprocess_conv2d_output(x, data_format): if", "pattern=[list(padding[0]), list(padding[1]), [0, 0]]) else: x = _padding(x, padding[0], 0)", "scale preds so that the class probas of each sample", "node \"\"\" def __init__(self, input, name='convert_to_batch'): super(ConvertToBatch, self).__init__([input], as_numpy=False, name=name)", "isinstance(x, C.cntk_py.Function) is False: x = x() if callable(alt) and", "output.shape[-1]) target = C.reshape(target, output.shape) return categorical_crossentropy(target, output, from_logits) class", "mean while keeping the dims for proper broadcasting. for axis", "o in self.metrics_outputs: updated.append(v) if self.unrelated_updates is not None: input_dict", "epsilon, image_dim_ordering, image_data_format from collections import defaultdict from contextlib import", "warnings.warn( 'CNTK backend warning: GPU is not detected. ' 'CNTK\\'s", "or isinstance(x, C.variables.Constant)): result.append(x.value) else: result.append(eval(x)) return result def set_value(x,", "input tensor with ' 'shape `%s` contains non-specified dimension, '", "x / C.abs(x) def pow(x, a): return C.pow(x, a) def", "shape[index] shape = [C.InferredDimension if _ == C.FreeDimension else _", "None and (_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE == 1)): _,", "release. if not self._is_input_shape_compatible(value, tensor): raise ValueError('CNTK backend: The placeholder", "data_format ' + str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape", "cntk output_shape does not include batch axis output_shape = output_shape[1:]", "indices): # There is a bug in cntk gather op", "!= len(shape): raise NotImplementedError i = num_dynamic_axis for i, rep", "Backend: Set learning phase ' 'with value %s is not", "= C.cross_entropy_with_softmax(output, target) # cntk's result shape is (batch, 1),", "padding, padding, padding]) return _postprocess_conv3d_output(x, data_format) def conv3d_transpose(x, kernel, output_shape,", "seed=None): # use numpy workaround now if seed is None:", "_LEARNING_PHASE if training is None: training = learning_phase() uses_learning_phase =", "mean(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x,", "= [] p_list = [] for g in grads: if", "isinstance(x, C.variables.Parameter): x.value = value else: raise NotImplementedError def stop_gradient(variables):", "inputs if need_convert: if go_backwards: rnn_inputs = reverse(rnn_inputs, 1) rnn_inputs", "= C.convolution( kernel, x, strides=dilation_rate[0], auto_padding=[ False, padding, padding]) return", "(batch, 1), while keras expect (batch, ) return C.reshape(result, ())", "p in zip(input_shape, placeholder_shape): if i != p and p", "5 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], [0,", "num_dynamic_axis > 0: assert len(base_shape) == 4 if hasattr(C, 'pad'):", "return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1) def _remove_dims(x, axis, keepdims=False):", "= expand_dims(mask) nones = _get_dynamic_axis_num(inputs) states = tuple(initial_states) outputs =", "1}: raise ValueError('CNTK Backend: Set learning phase ' 'with value", "0 while i < len(shape): if shape[i] is None or", "'variable-length sequences. Please specify a ' 'static length for your", "model and inputs.' % argument.name) self.unrelated_updates.eval(input_dict, as_numpy=False) return updated def", "ValueError('CNTK Backend: Unsupported dtype: %s. ' 'CNTK only supports float32", "non_dyn_shape.append(x.shape[i]) else: non_dyn_shape.append(shape[i + num_dynamic]) return shape[:num_dynamic] + non_dyn_shape def", "for i in range(ndim): if shape[i] is None and dynamic_axis_index", "def sum(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output =", "if b_any([dim < 0 for dim in base_shape]): raise ValueError('CNTK", "groups=x.shape[0]) x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) else:", "double check the model and inputs in ' '`train_function`.' %", "1 output /= C.reduce_sum(output, axis=-1) # avoid numerical instability with", "height_factor, width_factor, data_format): if data_format == 'channels_first': output = repeat_elements(x,", "= tuple(initial_states) outputs = [] time_axis = 1 - nones", "False): global uses_learning_phase uses_learning_phase = True if m is not", "\"\"\" def __init__(self, input, batch_size, name='convert_to_static'): super(ConvertToStatic, self).__init__([input], as_numpy=False, name=name)", "of rnn has only rank %d ' 'Need at least", "shape = list(int_shape(x)) num_dynamic = _get_dynamic_axis_num(x) non_dyn_shape = [] for", "= (bias.shape[3],) + bias.shape[:3] elif data_format == 'channels_last': if bias_dims", "return log(sum(exp(x), axis=axis, keepdims=keepdims)) def var(x, axis=None, keepdims=False): m =", "b_any = any dev = C.device.use_default_device() if dev.type() == 0:", "return x / C.abs(x) def pow(x, a): return C.pow(x, a)", "len(n_s) > 0: new_output = n_s[0] return new_output, n_s final_output,", "C.combine([_.output for _ in unrelated_updates]) if self.trainer is None: self.metrics_outputs", "inputs): # cntk only support calculate on float, do auto", "= outputs[-1] output = C.ops.element_select(mask_slice, output, prev_output) return_states = []", "dtype=None, name=None): if dtype is None: dtype = floatx() ctype", "dims < 3: raise ValueError('CNTK Backend: the input of rnn", "postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x", "target_shape) broadcast_beta = C.reshape(beta, target_shape) normalized = batch_normalization( x, broadcast_mean,", "slice(i * stride_row, i * stride_row + kernel_size[0]) slice_col =", "= C.reshape(gamma, target_shape) broadcast_beta = C.reshape(beta, target_shape) normalized = batch_normalization(", "'Invalid strides for dilated convolution' x = C.convolution( kernel, x,", "backend: assign ops argument %s ' 'is not found in", "removed by default, so don't need reshape on it reduce_axes", "it may have an extra batch axis with 1, it", "kernel, x, strides, auto_padding=[ False, padding, padding], output_shape=output_shape) return _postprocess_conv2d_output(x,", "pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is", "= _get_dynamic_axis_num(x) if num_dynamic_axis == 1 and len(shape) > 0", "np.asarray(1.0) def in_train_phase(x, alt, training=None): global _LEARNING_PHASE if training is", "mean) / (C.sqrt(var) + epsilon) * gamma + beta def", "= update_func.find_all_with_name('keras_grad_placeholder') u_list = [] p_list = [] for g", "else: self.metrics_func = None @staticmethod def _is_input_shape_compatible(input, placeholder): if hasattr(input,", "Backend: the input of static rnn ' 'has shape `%s`,", "len(shape): i = 0 while i < len(shape): if shape[i]", "' 'not supported.') if strides != (1, 1): raise ValueError('Invalid", "in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) padding", "else: shape = bias.shape elif dims == 3: if data_format", "+= 1 return _UID_PREFIXES[prefix] def learning_phase(): # If _LEARNING_PHASE is", "C.output_variable( self.target_shape, self.inputs[0].dtype, [batch_axis])] def forward(self, arguments, device=None, outputs_to_retain=None): num_element", "the mapping from grad placeholder to parameter grad_parameter_dict = {}", "# we will return a constant as place holder, the", "= len(shape) global uses_learning_phase uses_learning_phase = False if dims <", "> 0: new_output = n_s[0] return new_output, n_s final_output, final_states", "'pad'): x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1]),", "not supported. Please provide ' 'fixed dimension instead of `None`.')", "(0, 0)]) else: x = _padding(x, padding, 0) else: assert", "root_gradients): grad_array_view = root_gradients.data() num_element = root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape)) num_static_element", "def _is_input_shape_compatible(input, placeholder): if hasattr(input, 'shape') and hasattr(placeholder, 'shape'): num_dynamic", "to be fixed in GA. if n is C.InferredDimension or", "' + str(data_format)) dims = len(x.shape) if dims > 0", "name is None or name == '': return prefix +", "padding = _preprocess_border_mode(padding) strides = [strides] x = C.convolution( kernel,", "feed_dict[argument] else: raise ValueError( 'CNTK backend: assign ops argument %s", "value): if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): if isinstance(value, (float," ]
[ "The server SHOULD return a 401 (Unauthorized) status code when", "request # The server SHOULD return a 401 (Unauthorized) status", "body as a string. :param headers: The request headers as", "errors class ResourceEndpoint(BaseEndpoint): \"\"\"An endpoint responsible for protecting resources. Typical", "HTTP verb, i.e. GET, POST, PUT, HEAD, etc. :param body:", "is only used in authorization headers and how # it", "= self.request_validator.dummy_access_token # Note that `realm`_ is only used in", "protected under. This will be supplied to the ``validate_realms`` method", "would enable client realm access enumeration. # # The require_realm", "failed.\") log.info(\"Valid client: %s\", valid_client) log.info(\"Valid token: %s\", valid_resource_owner) log.info(\"Valid", "and not provided by # the client. valid_realm = self.request_validator.validate_realms(request.client_key,", "client. valid_realm = self.request_validator.validate_realms(request.client_key, request.resource_owner_key, request, uri=request.uri, realms=realms) valid_signature =", "``validate_realms`` method of the request validator. :returns: A tuple of", "\"\"\" oauthlib.oauth1.rfc5849.endpoints.resource ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module is an implementation of the", "# -*- coding: utf-8 -*- \"\"\" oauthlib.oauth1.rfc5849.endpoints.resource ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module", "where a client requests access to a specific realm. #", "unicode_literals from oauthlib.common import log from .base import BaseEndpoint from", "An oauthlib.common.Request object. \"\"\" try: request = self._create_request(uri, http_method, body,", "request if not self.request_validator.check_access_token( request.resource_owner_key): return False, request if not", "that the realm is now tied to the access token", "self.request_validator.dummy_access_token # Note that `realm`_ is only used in authorization", "the access token and not provided by # the client.", "workflow where a client requests access to a specific realm.", "enumeration valid_resource_owner = self.request_validator.validate_access_token( request.client_key, request.resource_owner_key, request) if not valid_resource_owner:", "now tied to the access token and not provided by", "OAuth 1.0 RFC 5849. \"\"\" from __future__ import absolute_import, unicode_literals", "invoke and return the response of the view. If invalid", "See :doc:`/oauth1/validator` for details on which validator methods to implement", "with a request validator and invoke the ``validate_protected_resource_request`` in a", "credentials. # Note: This is postponed in order to avoid", "early exit would enable client enumeration valid_client = self.request_validator.validate_client_key( request.client_key,", "return f(*args, **kwargs) else: return abort(403) \"\"\" def validate_protected_resource_request(self, uri,", "list of realms the resource is protected under. This will", "provider.validate_protected_resource_request( request.url, http_method=request.method, body=request.data, headers=request.headers, realms=realms or []) if v:", "how # it should be interepreted is not included in", "validator. :returns: A tuple of 2 elements. 1. True if", "realm to which the # client has access and as", "as such every client should be checked # to ensure", "\"\"\" try: request = self._create_request(uri, http_method, body, headers) except errors.OAuth1Error:", "the resource is protected under. This will be supplied to", "self.request_validator.validate_timestamp_and_nonce( request.client_key, request.timestamp, request.nonce, request, access_token=request.resource_owner_key): return False, request #", "full URI of the token request. :param http_method: A valid", "# time request verification. # # Note that early exit", "__future__ import absolute_import, unicode_literals from oauthlib.common import log from .base", "with invalid or expired token. # Note: This is postponed", "# # Note that early exit would enable client enumeration", "resource owner enumeration valid_resource_owner = self.request_validator.validate_access_token( request.client_key, request.resource_owner_key, request) if", "token and not provided by # the client. valid_realm =", "your_validator from oauthlib.oauth1 import ResourceEndpoint endpoint = ResourceEndpoint(your_validator) def require_oauth(realms=None):", "# Note: This is postponed in order to avoid timing", "tied to the access token and not provided by #", "of the token request. :param http_method: A valid HTTP verb,", "if not self.request_validator.validate_timestamp_and_nonce( request.client_key, request.timestamp, request.nonce, request, access_token=request.resource_owner_key): return False,", "request.client_key = self.request_validator.dummy_client # The server SHOULD return a 401", "# # Clients obtaining an access token will not supply", "and fetching secrets/keys to ensure the flow of every #", "under. This will be supplied to the ``validate_realms`` method of", "self.request_validator.validate_client_key( request.client_key, request) if not valid_client: request.client_key = self.request_validator.dummy_client #", "users from guessing sensitive information v = all((valid_client, valid_resource_owner, valid_realm,", "request is valid, invoke and return the response of the", "the client. valid_realm = self.request_validator.validate_realms(request.client_key, request.resource_owner_key, request, uri=request.uri, realms=realms) valid_signature", "if v: return f(*args, **kwargs) else: return abort(403) \"\"\" def", "by checking the require_resource_owner # flag and abscence of realm.", "or expired token. # Note: This is postponed in order", "provider logic of OAuth 1.0 RFC 5849. \"\"\" from __future__", "import BaseEndpoint from .. import errors class ResourceEndpoint(BaseEndpoint): \"\"\"An endpoint", "decorator. See :doc:`/oauth1/validator` for details on which validator methods to", "execution and # prevents malicious users from guessing sensitive information", "request body as a string. :param headers: The request headers", "checked. Instead the previously requested realm should be # transferred", "\"\"\"Create a request token response, with a new request token", "etc. :param body: The request body as a string. :param", "return a 401 (Unauthorized) status code when # receiving a", "is assigned and used to maintain near constant # time", "previously requested realm should be # transferred from the request", "near constant # time request verification. # # Note that", "absolute_import, unicode_literals from oauthlib.common import log from .base import BaseEndpoint", "1. True if valid, False otherwise. 2. An oauthlib.common.Request object.", "flow of every # request remains almost identical regardless of", "avoid timing attacks, instead # a dummy client is assigned", "will always validate the realm but note # that the", "= self._check_signature(request) # We delay checking validity until the very", ":param http_method: A valid HTTP verb, i.e. GET, POST, PUT,", "exit would enable resource owner enumeration valid_resource_owner = self.request_validator.validate_access_token( request.client_key,", "seen as a scope or realm to which the #", "return False, request if not self.request_validator.check_access_token( request.resource_owner_key): return False, request", "be # transferred from the request token to the access", "body: The request body as a string. :param headers: The", "return the response of the view. If invalid create and", "def decorator(f): @wraps(f) def wrapper(request, *args, **kwargs): v, r =", ".. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2 # # Note that early exit would", "from your_validator import your_validator from oauthlib.oauth1 import ResourceEndpoint endpoint =", "2. An oauthlib.common.Request object. \"\"\" try: request = self._create_request(uri, http_method,", "not self.request_validator.check_access_token( request.resource_owner_key): return False, request if not self.request_validator.validate_timestamp_and_nonce( request.client_key,", "values for # calculations and fetching secrets/keys to ensure the", "**kwargs): v, r = provider.validate_protected_resource_request( request.url, http_method=request.method, body=request.data, headers=request.headers, realms=realms", "not v: log.info(\"[Failure] request verification failed.\") log.info(\"Valid client: %s\", valid_client)", "False, request if not self.request_validator.check_access_token( request.resource_owner_key): return False, request if", "request token) need not require a realm # and can", "coding: utf-8 -*- \"\"\" oauthlib.oauth1.rfc5849.endpoints.resource ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module is an", "to avoid timing attacks, instead # a dummy client is", ":param realms: A list of realms the resource is protected", "only used in authorization headers and how # it should", "error response directly from the decorator. See :doc:`/oauth1/validator` for details", "# This first step (obtaining request token) need not require", "client realm access enumeration. # # The require_realm indicates this", "# # The require_realm indicates this is the first step", "realm. # This first step (obtaining request token) need not", "and invoke the ``validate_protected_resource_request`` in a decorator around a view", "require a realm # and can then be identified by", "[]) if v: return f(*args, **kwargs) else: return abort(403) \"\"\"", "from oauthlib.oauth1 import ResourceEndpoint endpoint = ResourceEndpoint(your_validator) def require_oauth(realms=None): def", "a request validator and invoke the ``validate_protected_resource_request`` in a decorator", "of realm. # # Clients obtaining an access token will", "token) need not require a realm # and can then", "and how # it should be interepreted is not included", "dummy client is assigned and used to maintain near constant", "# The require_realm indicates this is the first step in", "using dummy values for # calculations and fetching secrets/keys to", "request.client_key, request.resource_owner_key, request) if not valid_resource_owner: request.resource_owner_key = self.request_validator.dummy_access_token #", "end, using dummy values for # calculations and fetching secrets/keys", "dummy token is assigned and used to maintain near constant", "1.0 RFC 5849. \"\"\" from __future__ import absolute_import, unicode_literals from", "a request token response, with a new request token if", "endpoint responsible for protecting resources. Typical use is to instantiate", "\"\"\" from __future__ import absolute_import, unicode_literals from oauthlib.common import log", "the view. If invalid create and return an error response", "whether valid values # have been supplied. This ensures near", "resources will always validate the realm but note # that", "return an error response directly from the decorator. See :doc:`/oauth1/validator`", "self.request_validator.check_access_token( request.resource_owner_key): return False, request if not self.request_validator.validate_timestamp_and_nonce( request.client_key, request.timestamp,", "# # Access to protected resources will always validate the", "spec. # However they could be seen as a scope", "verb, i.e. GET, POST, PUT, HEAD, etc. :param body: The", "self._create_request(uri, http_method, body, headers) except errors.OAuth1Error: return False, None try:", "status code when # receiving a request with invalid client", "regardless of whether valid values # have been supplied. This", "response, with a new request token if valid. :param uri:", "response directly from the decorator. See :doc:`/oauth1/validator` for details on", "which validator methods to implement for this endpoint. An example", "instead # a dummy client is assigned and used to", "ensure the flow of every # request remains almost identical", "the realm is now tied to the access token and", "# # Note that early exit would enable client realm", "request) if not valid_client: request.client_key = self.request_validator.dummy_client # The server", "ensure it is authorized access to that scope or realm.", "it is authorized access to that scope or realm. #", "from the request token to the access token. # #", "request token to the access token. # # Access to", "this endpoint. An example decorator:: from functools import wraps from", "responsible for protecting resources. Typical use is to instantiate with", "and used to maintain near constant # time request verification.", "# flag and abscence of realm. # # Clients obtaining", "*args, **kwargs): v, r = provider.validate_protected_resource_request( request.url, http_method=request.method, body=request.data, headers=request.headers,", "Note that early exit would enable client enumeration valid_client =", "will not supply a realm and it will # not", "to instantiate with a request validator and invoke the ``validate_protected_resource_request``", "a dict. :param realms: A list of realms the resource", "valid_signature)) if not v: log.info(\"[Failure] request verification failed.\") log.info(\"Valid client:", "interepreted is not included in the OAuth spec. # However", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module is an implementation of the resource protection", "try: self._check_transport_security(request) self._check_mandatory_parameters(request) except errors.OAuth1Error: return False, request if not", "around a view function. If the request is valid, invoke", "# Clients obtaining an access token will not supply a", "# receiving a request with invalid or expired token. #", "This ensures near constant time execution and # prevents malicious", "def validate_protected_resource_request(self, uri, http_method='GET', body=None, headers=None, realms=None): \"\"\"Create a request", "owner enumeration valid_resource_owner = self.request_validator.validate_access_token( request.client_key, request.resource_owner_key, request) if not", "token. # # Access to protected resources will always validate", "provided by # the client. valid_realm = self.request_validator.validate_realms(request.client_key, request.resource_owner_key, request,", "request with invalid client credentials. # Note: This is postponed", "import errors class ResourceEndpoint(BaseEndpoint): \"\"\"An endpoint responsible for protecting resources.", "valid. :param uri: The full URI of the token request.", "a decorator around a view function. If the request is", "the request token to the access token. # # Access", "is postponed in order to avoid timing attacks, instead #", "be identified by checking the require_resource_owner # flag and abscence", "%s\", valid_client) log.info(\"Valid token: %s\", valid_resource_owner) log.info(\"Valid realm: %s\", valid_realm)", "client has access and as such every client should be", "http://tools.ietf.org/html/rfc2617#section-1.2 # # Note that early exit would enable client", "self._check_transport_security(request) self._check_mandatory_parameters(request) except errors.OAuth1Error: return False, request if not request.resource_owner_key:", "an implementation of the resource protection provider logic of OAuth", "view. If invalid create and return an error response directly", "request.url, http_method=request.method, body=request.data, headers=request.headers, realms=realms or []) if v: return", "valid_resource_owner: request.resource_owner_key = self.request_validator.dummy_access_token # Note that `realm`_ is only", "access_token=request.resource_owner_key): return False, request # The server SHOULD return a", "# not be checked. Instead the previously requested realm should", "step (obtaining request token) need not require a realm #", "require_resource_owner # flag and abscence of realm. # # Clients", "# Note that early exit would enable client realm access", "An example decorator:: from functools import wraps from your_validator import", "request if not self.request_validator.validate_timestamp_and_nonce( request.client_key, request.timestamp, request.nonce, request, access_token=request.resource_owner_key): return", "if valid, False otherwise. 2. An oauthlib.common.Request object. \"\"\" try:", "step in the OAuth # workflow where a client requests", "or realm. # .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2 # # Note that", "should be interepreted is not included in the OAuth spec.", "return False, None try: self._check_transport_security(request) self._check_mandatory_parameters(request) except errors.OAuth1Error: return False,", "maintain near constant # time request verification. # # Note", "ResourceEndpoint(BaseEndpoint): \"\"\"An endpoint responsible for protecting resources. Typical use is", "Access to protected resources will always validate the realm but", "checking validity until the very end, using dummy values for", "request.resource_owner_key): return False, request if not self.request_validator.validate_timestamp_and_nonce( request.client_key, request.timestamp, request.nonce,", "access token will not supply a realm and it will", "valid_resource_owner, valid_realm, valid_signature)) if not v: log.info(\"[Failure] request verification failed.\")", "This is postponed in order to avoid timing attacks, instead", "# Note that `realm`_ is only used in authorization headers", "not be checked. Instead the previously requested realm should be", "to avoid timing attacks, instead # a dummy token is", ":doc:`/oauth1/validator` for details on which validator methods to implement for", "realms=None): \"\"\"Create a request token response, with a new request", "This first step (obtaining request token) need not require a", "abort(403) \"\"\" def validate_protected_resource_request(self, uri, http_method='GET', body=None, headers=None, realms=None): \"\"\"Create", "token. # Note: This is postponed in order to avoid", "that `realm`_ is only used in authorization headers and how", "headers=request.headers, realms=realms or []) if v: return f(*args, **kwargs) else:", "return False, request if not request.resource_owner_key: return False, request if", "not self.request_validator.validate_timestamp_and_nonce( request.client_key, request.timestamp, request.nonce, request, access_token=request.resource_owner_key): return False, request", "-*- coding: utf-8 -*- \"\"\" oauthlib.oauth1.rfc5849.endpoints.resource ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module is", "function. If the request is valid, invoke and return the", "# request remains almost identical regardless of whether valid values", "to protected resources will always validate the realm but note", "for # calculations and fetching secrets/keys to ensure the flow", "The request body as a string. :param headers: The request", "# calculations and fetching secrets/keys to ensure the flow of", "# prevents malicious users from guessing sensitive information v =", "wraps from your_validator import your_validator from oauthlib.oauth1 import ResourceEndpoint endpoint", "instead # a dummy token is assigned and used to", "a client requests access to a specific realm. # This", "decorator around a view function. If the request is valid,", "exit would enable client enumeration valid_client = self.request_validator.validate_client_key( request.client_key, request)", "has access and as such every client should be checked", "supplied to the ``validate_realms`` method of the request validator. :returns:", "invalid create and return an error response directly from the", "it will # not be checked. Instead the previously requested", "an access token will not supply a realm and it", "realm should be # transferred from the request token to", "token to the access token. # # Access to protected", "supply a realm and it will # not be checked.", "HEAD, etc. :param body: The request body as a string.", "request validator and invoke the ``validate_protected_resource_request`` in a decorator around", "request.client_key, request) if not valid_client: request.client_key = self.request_validator.dummy_client # The", "resource protection provider logic of OAuth 1.0 RFC 5849. \"\"\"", "and it will # not be checked. Instead the previously", "realm is now tied to the access token and not", "delay checking validity until the very end, using dummy values", "realms the resource is protected under. This will be supplied", "= self.request_validator.dummy_client # The server SHOULD return a 401 (Unauthorized)", "have been supplied. This ensures near constant time execution and", "request, uri=request.uri, realms=realms) valid_signature = self._check_signature(request) # We delay checking", "v: log.info(\"[Failure] request verification failed.\") log.info(\"Valid client: %s\", valid_client) log.info(\"Valid", "will be supplied to the ``validate_realms`` method of the request", "# have been supplied. This ensures near constant time execution", "receiving a request with invalid client credentials. # Note: This", "a scope or realm to which the # client has", "implementation of the resource protection provider logic of OAuth 1.0", "is authorized access to that scope or realm. # ..", "code when # receiving a request with invalid client credentials.", "-*- \"\"\" oauthlib.oauth1.rfc5849.endpoints.resource ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module is an implementation of", "realm. # # Clients obtaining an access token will not", "self.request_validator.validate_realms(request.client_key, request.resource_owner_key, request, uri=request.uri, realms=realms) valid_signature = self._check_signature(request) # We", "they could be seen as a scope or realm to", "used to maintain near constant # time request verification. #", "= self.request_validator.validate_realms(request.client_key, request.resource_owner_key, request, uri=request.uri, realms=realms) valid_signature = self._check_signature(request) #", "authorization headers and how # it should be interepreted is", "the OAuth spec. # However they could be seen as", "is not included in the OAuth spec. # However they", "to which the # client has access and as such", ":returns: A tuple of 2 elements. 1. True if valid,", ":param headers: The request headers as a dict. :param realms:", "else: return abort(403) \"\"\" def validate_protected_resource_request(self, uri, http_method='GET', body=None, headers=None,", "a string. :param headers: The request headers as a dict.", "valid, False otherwise. 2. An oauthlib.common.Request object. \"\"\" try: request", "http_method, body, headers) except errors.OAuth1Error: return False, None try: self._check_transport_security(request)", "decorator(f): @wraps(f) def wrapper(request, *args, **kwargs): v, r = provider.validate_protected_resource_request(", "obtaining an access token will not supply a realm and", "functools import wraps from your_validator import your_validator from oauthlib.oauth1 import", "as a scope or realm to which the # client", "access to a specific realm. # This first step (obtaining", "valid_signature = self._check_signature(request) # We delay checking validity until the", "valid_client: request.client_key = self.request_validator.dummy_client # The server SHOULD return a", "# We delay checking validity until the very end, using", "request token if valid. :param uri: The full URI of", "# and can then be identified by checking the require_resource_owner", "= self._create_request(uri, http_method, body, headers) except errors.OAuth1Error: return False, None", "request = self._create_request(uri, http_method, body, headers) except errors.OAuth1Error: return False,", "a new request token if valid. :param uri: The full", "access to that scope or realm. # .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2", "code when # receiving a request with invalid or expired", "v = all((valid_client, valid_resource_owner, valid_realm, valid_signature)) if not v: log.info(\"[Failure]", "r = provider.validate_protected_resource_request( request.url, http_method=request.method, body=request.data, headers=request.headers, realms=realms or [])", "the flow of every # request remains almost identical regardless", "this is the first step in the OAuth # workflow", "or realm to which the # client has access and", "scope or realm. # .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2 # # Note", "This will be supplied to the ``validate_realms`` method of the", ":param body: The request body as a string. :param headers:", "assigned and used to maintain near constant # time request", "that early exit would enable client realm access enumeration. #", "of the view. If invalid create and return an error", "5849. \"\"\" from __future__ import absolute_import, unicode_literals from oauthlib.common import", "realms=realms) valid_signature = self._check_signature(request) # We delay checking validity until", "i.e. GET, POST, PUT, HEAD, etc. :param body: The request", "need not require a realm # and can then be", "not provided by # the client. valid_realm = self.request_validator.validate_realms(request.client_key, request.resource_owner_key,", "The full URI of the token request. :param http_method: A", "be supplied to the ``validate_realms`` method of the request validator.", "access token and not provided by # the client. valid_realm", "for protecting resources. Typical use is to instantiate with a", "object. \"\"\" try: request = self._create_request(uri, http_method, body, headers) except", "will # not be checked. Instead the previously requested realm", "token response, with a new request token if valid. :param", "# The server SHOULD return a 401 (Unauthorized) status code", "headers=None, realms=None): \"\"\"Create a request token response, with a new", "to ensure it is authorized access to that scope or", "and can then be identified by checking the require_resource_owner #", "if not v: log.info(\"[Failure] request verification failed.\") log.info(\"Valid client: %s\",", "from .. import errors class ResourceEndpoint(BaseEndpoint): \"\"\"An endpoint responsible for", "realms: A list of realms the resource is protected under.", "Note that `realm`_ is only used in authorization headers and", "request) if not valid_resource_owner: request.resource_owner_key = self.request_validator.dummy_access_token # Note that", "client enumeration valid_client = self.request_validator.validate_client_key( request.client_key, request) if not valid_client:", "attacks, instead # a dummy client is assigned and used", "such every client should be checked # to ensure it", "Clients obtaining an access token will not supply a realm", "checking the require_resource_owner # flag and abscence of realm. #", "not included in the OAuth spec. # However they could", "been supplied. This ensures near constant time execution and #", "except errors.OAuth1Error: return False, request if not request.resource_owner_key: return False,", "should be checked # to ensure it is authorized access", "constant time execution and # prevents malicious users from guessing", "instantiate with a request validator and invoke the ``validate_protected_resource_request`` in", "request validator. :returns: A tuple of 2 elements. 1. True", "# # Note that early exit would enable resource owner", "**kwargs) else: return abort(403) \"\"\" def validate_protected_resource_request(self, uri, http_method='GET', body=None,", "http_method='GET', body=None, headers=None, realms=None): \"\"\"Create a request token response, with", "# However they could be seen as a scope or", "timing attacks, instead # a dummy client is assigned and", "be seen as a scope or realm to which the", "token: %s\", valid_resource_owner) log.info(\"Valid realm: %s\", valid_realm) log.info(\"Valid signature: %s\",", "method of the request validator. :returns: A tuple of 2", "of 2 elements. 1. True if valid, False otherwise. 2.", "a view function. If the request is valid, invoke and", "values # have been supplied. This ensures near constant time", "directly from the decorator. See :doc:`/oauth1/validator` for details on which", "to that scope or realm. # .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2 #", "to implement for this endpoint. An example decorator:: from functools", "log.info(\"Valid realm: %s\", valid_realm) log.info(\"Valid signature: %s\", valid_signature) return v,", "return False, request if not self.request_validator.validate_timestamp_and_nonce( request.client_key, request.timestamp, request.nonce, request,", "# client has access and as such every client should", "of whether valid values # have been supplied. This ensures", "with invalid client credentials. # Note: This is postponed in", "until the very end, using dummy values for # calculations", "order to avoid timing attacks, instead # a dummy token", "flag and abscence of realm. # # Clients obtaining an", "We delay checking validity until the very end, using dummy", "when # receiving a request with invalid client credentials. #", "realm but note # that the realm is now tied", "enable resource owner enumeration valid_resource_owner = self.request_validator.validate_access_token( request.client_key, request.resource_owner_key, request)", "oauthlib.common import log from .base import BaseEndpoint from .. import", "a request with invalid client credentials. # Note: This is", "implement for this endpoint. An example decorator:: from functools import", "and as such every client should be checked # to", "otherwise. 2. An oauthlib.common.Request object. \"\"\" try: request = self._create_request(uri,", "`realm`_ is only used in authorization headers and how #", "realm. # .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2 # # Note that early", "self._check_signature(request) # We delay checking validity until the very end,", "# receiving a request with invalid client credentials. # Note:", "that early exit would enable client enumeration valid_client = self.request_validator.validate_client_key(", "valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc. :param", "create and return an error response directly from the decorator.", "used in authorization headers and how # it should be", "indicates this is the first step in the OAuth #", "body=None, headers=None, realms=None): \"\"\"Create a request token response, with a", "validity until the very end, using dummy values for #", "request.resource_owner_key = self.request_validator.dummy_access_token # Note that `realm`_ is only used", "transferred from the request token to the access token. #", "If invalid create and return an error response directly from", "to the access token and not provided by # the", "not request.resource_owner_key: return False, request if not self.request_validator.check_access_token( request.resource_owner_key): return", "# Access to protected resources will always validate the realm", "the require_resource_owner # flag and abscence of realm. # #", "client should be checked # to ensure it is authorized", "remains almost identical regardless of whether valid values # have", "and return the response of the view. If invalid create", "is protected under. This will be supplied to the ``validate_realms``", "view function. If the request is valid, invoke and return", "utf-8 -*- \"\"\" oauthlib.oauth1.rfc5849.endpoints.resource ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module is an implementation", "import wraps from your_validator import your_validator from oauthlib.oauth1 import ResourceEndpoint", "headers) except errors.OAuth1Error: return False, None try: self._check_transport_security(request) self._check_mandatory_parameters(request) except", "import your_validator from oauthlib.oauth1 import ResourceEndpoint endpoint = ResourceEndpoint(your_validator) def", "client credentials. # Note: This is postponed in order to", "authorized access to that scope or realm. # .. _`realm`:", "log.info(\"[Failure] request verification failed.\") log.info(\"Valid client: %s\", valid_client) log.info(\"Valid token:", "= self.request_validator.validate_access_token( request.client_key, request.resource_owner_key, request) if not valid_resource_owner: request.resource_owner_key =", "ensures near constant time execution and # prevents malicious users", "import ResourceEndpoint endpoint = ResourceEndpoint(your_validator) def require_oauth(realms=None): def decorator(f): @wraps(f)", "(Unauthorized) status code when # receiving a request with invalid", "invoke the ``validate_protected_resource_request`` in a decorator around a view function.", "and return an error response directly from the decorator. See", "v: return f(*args, **kwargs) else: return abort(403) \"\"\" def validate_protected_resource_request(self,", "the very end, using dummy values for # calculations and", "log.info(\"Valid token: %s\", valid_resource_owner) log.info(\"Valid realm: %s\", valid_realm) log.info(\"Valid signature:", "is to instantiate with a request validator and invoke the", "attacks, instead # a dummy token is assigned and used", "a realm and it will # not be checked. Instead", "would enable client enumeration valid_client = self.request_validator.validate_client_key( request.client_key, request) if", "= self.request_validator.validate_client_key( request.client_key, request) if not valid_client: request.client_key = self.request_validator.dummy_client", "of OAuth 1.0 RFC 5849. \"\"\" from __future__ import absolute_import,", "or []) if v: return f(*args, **kwargs) else: return abort(403)", "# a dummy token is assigned and used to maintain", "BaseEndpoint from .. import errors class ResourceEndpoint(BaseEndpoint): \"\"\"An endpoint responsible", "the OAuth # workflow where a client requests access to", "protection provider logic of OAuth 1.0 RFC 5849. \"\"\" from", "wrapper(request, *args, **kwargs): v, r = provider.validate_protected_resource_request( request.url, http_method=request.method, body=request.data,", "verification. # # Note that early exit would enable resource", "checked # to ensure it is authorized access to that", "POST, PUT, HEAD, etc. :param body: The request body as", "to a specific realm. # This first step (obtaining request", "decorator:: from functools import wraps from your_validator import your_validator from", "if not valid_resource_owner: request.resource_owner_key = self.request_validator.dummy_access_token # Note that `realm`_", "def wrapper(request, *args, **kwargs): v, r = provider.validate_protected_resource_request( request.url, http_method=request.method,", "requests access to a specific realm. # This first step", "request.timestamp, request.nonce, request, access_token=request.resource_owner_key): return False, request # The server", "enable client realm access enumeration. # # The require_realm indicates", "enumeration. # # The require_realm indicates this is the first", "uri=request.uri, realms=realms) valid_signature = self._check_signature(request) # We delay checking validity", "not require a realm # and can then be identified", "valid_client = self.request_validator.validate_client_key( request.client_key, request) if not valid_client: request.client_key =", "all((valid_client, valid_resource_owner, valid_realm, valid_signature)) if not v: log.info(\"[Failure] request verification", "validate the realm but note # that the realm is", "not valid_client: request.client_key = self.request_validator.dummy_client # The server SHOULD return", "= ResourceEndpoint(your_validator) def require_oauth(realms=None): def decorator(f): @wraps(f) def wrapper(request, *args,", "the ``validate_realms`` method of the request validator. :returns: A tuple", "time request verification. # # Note that early exit would", "oauthlib.oauth1 import ResourceEndpoint endpoint = ResourceEndpoint(your_validator) def require_oauth(realms=None): def decorator(f):", "self.request_validator.dummy_client # The server SHOULD return a 401 (Unauthorized) status", "specific realm. # This first step (obtaining request token) need", "is an implementation of the resource protection provider logic of", "``validate_protected_resource_request`` in a decorator around a view function. If the", "the request validator. :returns: A tuple of 2 elements. 1.", "dict. :param realms: A list of realms the resource is", "headers: The request headers as a dict. :param realms: A", "expired token. # Note: This is postponed in order to", "a realm # and can then be identified by checking", "abscence of realm. # # Clients obtaining an access token", "endpoint = ResourceEndpoint(your_validator) def require_oauth(realms=None): def decorator(f): @wraps(f) def wrapper(request,", "GET, POST, PUT, HEAD, etc. :param body: The request body", "ResourceEndpoint(your_validator) def require_oauth(realms=None): def decorator(f): @wraps(f) def wrapper(request, *args, **kwargs):", "to maintain near constant # time request verification. # #", "would enable resource owner enumeration valid_resource_owner = self.request_validator.validate_access_token( request.client_key, request.resource_owner_key,", "not valid_resource_owner: request.resource_owner_key = self.request_validator.dummy_access_token # Note that `realm`_ is", "# to ensure it is authorized access to that scope", "secrets/keys to ensure the flow of every # request remains", "None try: self._check_transport_security(request) self._check_mandatory_parameters(request) except errors.OAuth1Error: return False, request if", "constant # time request verification. # # Note that early", "the resource protection provider logic of OAuth 1.0 RFC 5849.", "request, access_token=request.resource_owner_key): return False, request # The server SHOULD return", "in a decorator around a view function. If the request", "server SHOULD return a 401 (Unauthorized) status code when #", "the request is valid, invoke and return the response of", "an error response directly from the decorator. See :doc:`/oauth1/validator` for", "@wraps(f) def wrapper(request, *args, **kwargs): v, r = provider.validate_protected_resource_request( request.url,", "that scope or realm. # .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2 # #", "token will not supply a realm and it will #", "request.resource_owner_key: return False, request if not self.request_validator.check_access_token( request.resource_owner_key): return False,", "False otherwise. 2. An oauthlib.common.Request object. \"\"\" try: request =", "the realm but note # that the realm is now", "from functools import wraps from your_validator import your_validator from oauthlib.oauth1", "request verification failed.\") log.info(\"Valid client: %s\", valid_client) log.info(\"Valid token: %s\",", "OAuth spec. # However they could be seen as a", "to the access token. # # Access to protected resources", "a dummy client is assigned and used to maintain near", "request.nonce, request, access_token=request.resource_owner_key): return False, request # The server SHOULD", "not supply a realm and it will # not be", "protected resources will always validate the realm but note #", "self._check_mandatory_parameters(request) except errors.OAuth1Error: return False, request if not request.resource_owner_key: return", "very end, using dummy values for # calculations and fetching", "valid_client) log.info(\"Valid token: %s\", valid_resource_owner) log.info(\"Valid realm: %s\", valid_realm) log.info(\"Valid", "the decorator. See :doc:`/oauth1/validator` for details on which validator methods", "from .base import BaseEndpoint from .. import errors class ResourceEndpoint(BaseEndpoint):", "and abscence of realm. # # Clients obtaining an access", "uri, http_method='GET', body=None, headers=None, realms=None): \"\"\"Create a request token response,", "it should be interepreted is not included in the OAuth", "If the request is valid, invoke and return the response", "the response of the view. If invalid create and return", "verification failed.\") log.info(\"Valid client: %s\", valid_client) log.info(\"Valid token: %s\", valid_resource_owner)", "token if valid. :param uri: The full URI of the", "logic of OAuth 1.0 RFC 5849. \"\"\" from __future__ import", "avoid timing attacks, instead # a dummy token is assigned", "http_method=request.method, body=request.data, headers=request.headers, realms=realms or []) if v: return f(*args,", "request.client_key, request.timestamp, request.nonce, request, access_token=request.resource_owner_key): return False, request # The", "RFC 5849. \"\"\" from __future__ import absolute_import, unicode_literals from oauthlib.common", "\"\"\" def validate_protected_resource_request(self, uri, http_method='GET', body=None, headers=None, realms=None): \"\"\"Create a", "require_oauth(realms=None): def decorator(f): @wraps(f) def wrapper(request, *args, **kwargs): v, r", "errors.OAuth1Error: return False, request if not request.resource_owner_key: return False, request", "headers as a dict. :param realms: A list of realms", "time execution and # prevents malicious users from guessing sensitive", "supplied. This ensures near constant time execution and # prevents", "realm access enumeration. # # The require_realm indicates this is", "fetching secrets/keys to ensure the flow of every # request", "postponed in order to avoid timing attacks, instead # a", "%s\", valid_resource_owner) log.info(\"Valid realm: %s\", valid_realm) log.info(\"Valid signature: %s\", valid_signature)", "in the OAuth # workflow where a client requests access", "the first step in the OAuth # workflow where a", "v, r = provider.validate_protected_resource_request( request.url, http_method=request.method, body=request.data, headers=request.headers, realms=realms or", "# transferred from the request token to the access token.", "by # the client. valid_realm = self.request_validator.validate_realms(request.client_key, request.resource_owner_key, request, uri=request.uri,", "as a dict. :param realms: A list of realms the", "calculations and fetching secrets/keys to ensure the flow of every", "early exit would enable resource owner enumeration valid_resource_owner = self.request_validator.validate_access_token(", "# workflow where a client requests access to a specific", "The require_realm indicates this is the first step in the", "from the decorator. See :doc:`/oauth1/validator` for details on which validator", "validator and invoke the ``validate_protected_resource_request`` in a decorator around a", ".. import errors class ResourceEndpoint(BaseEndpoint): \"\"\"An endpoint responsible for protecting", "in authorization headers and how # it should be interepreted", "then be identified by checking the require_resource_owner # flag and", "new request token if valid. :param uri: The full URI", "to ensure the flow of every # request remains almost", "(obtaining request token) need not require a realm # and", "headers and how # it should be interepreted is not", "access enumeration. # # The require_realm indicates this is the", "of the resource protection provider logic of OAuth 1.0 RFC", "your_validator import your_validator from oauthlib.oauth1 import ResourceEndpoint endpoint = ResourceEndpoint(your_validator)", "valid_resource_owner) log.info(\"Valid realm: %s\", valid_realm) log.info(\"Valid signature: %s\", valid_signature) return", "example decorator:: from functools import wraps from your_validator import your_validator", "return abort(403) \"\"\" def validate_protected_resource_request(self, uri, http_method='GET', body=None, headers=None, realms=None):", "of the request validator. :returns: A tuple of 2 elements.", "try: request = self._create_request(uri, http_method, body, headers) except errors.OAuth1Error: return", "# .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2 # # Note that early exit", "# it should be interepreted is not included in the", "for details on which validator methods to implement for this", "Instead the previously requested realm should be # transferred from", "first step (obtaining request token) need not require a realm", "always validate the realm but note # that the realm", "is now tied to the access token and not provided", "but note # that the realm is now tied to", "a request with invalid or expired token. # Note: This", "from __future__ import absolute_import, unicode_literals from oauthlib.common import log from", "for this endpoint. An example decorator:: from functools import wraps", "request remains almost identical regardless of whether valid values #", "resource is protected under. This will be supplied to the", "scope or realm to which the # client has access", "a 401 (Unauthorized) status code when # receiving a request", "the access token. # # Access to protected resources will", "A tuple of 2 elements. 1. True if valid, False", "ResourceEndpoint endpoint = ResourceEndpoint(your_validator) def require_oauth(realms=None): def decorator(f): @wraps(f) def", "request.resource_owner_key, request, uri=request.uri, realms=realms) valid_signature = self._check_signature(request) # We delay", "to the ``validate_realms`` method of the request validator. :returns: A", "client requests access to a specific realm. # This first", "string. :param headers: The request headers as a dict. :param", "Note that early exit would enable resource owner enumeration valid_resource_owner", "client: %s\", valid_client) log.info(\"Valid token: %s\", valid_resource_owner) log.info(\"Valid realm: %s\",", "every # request remains almost identical regardless of whether valid", "The request headers as a dict. :param realms: A list", "should be # transferred from the request token to the", "f(*args, **kwargs) else: return abort(403) \"\"\" def validate_protected_resource_request(self, uri, http_method='GET',", "Typical use is to instantiate with a request validator and", "PUT, HEAD, etc. :param body: The request body as a", "resources. Typical use is to instantiate with a request validator", "elements. 1. True if valid, False otherwise. 2. An oauthlib.common.Request", "realms=realms or []) if v: return f(*args, **kwargs) else: return", ":param uri: The full URI of the token request. :param", "is valid, invoke and return the response of the view.", "valid_realm, valid_signature)) if not v: log.info(\"[Failure] request verification failed.\") log.info(\"Valid", "if not request.resource_owner_key: return False, request if not self.request_validator.check_access_token( request.resource_owner_key):", "False, request if not request.resource_owner_key: return False, request if not", "protecting resources. Typical use is to instantiate with a request", ".base import BaseEndpoint from .. import errors class ResourceEndpoint(BaseEndpoint): \"\"\"An", "be checked # to ensure it is authorized access to", "realm: %s\", valid_realm) log.info(\"Valid signature: %s\", valid_signature) return v, request", "status code when # receiving a request with invalid or", "prevents malicious users from guessing sensitive information v = all((valid_client,", "errors.OAuth1Error: return False, None try: self._check_transport_security(request) self._check_mandatory_parameters(request) except errors.OAuth1Error: return", "with a new request token if valid. :param uri: The", "body, headers) except errors.OAuth1Error: return False, None try: self._check_transport_security(request) self._check_mandatory_parameters(request)", "first step in the OAuth # workflow where a client", "However they could be seen as a scope or realm", "request if not request.resource_owner_key: return False, request if not self.request_validator.check_access_token(", "guessing sensitive information v = all((valid_client, valid_resource_owner, valid_realm, valid_signature)) if", "sensitive information v = all((valid_client, valid_resource_owner, valid_realm, valid_signature)) if not", "early exit would enable client realm access enumeration. # #", "receiving a request with invalid or expired token. # Note:", "# Note that early exit would enable client enumeration valid_client", "can then be identified by checking the require_resource_owner # flag", "verification. # # Note that early exit would enable client", "oauthlib.common.Request object. \"\"\" try: request = self._create_request(uri, http_method, body, headers)", "tuple of 2 elements. 1. True if valid, False otherwise.", "the token request. :param http_method: A valid HTTP verb, i.e.", "the ``validate_protected_resource_request`` in a decorator around a view function. If", "request.resource_owner_key, request) if not valid_resource_owner: request.resource_owner_key = self.request_validator.dummy_access_token # Note", "realm # and can then be identified by checking the", "if not valid_client: request.client_key = self.request_validator.dummy_client # The server SHOULD", "enumeration valid_client = self.request_validator.validate_client_key( request.client_key, request) if not valid_client: request.client_key", "of realms the resource is protected under. This will be", "self.request_validator.validate_access_token( request.client_key, request.resource_owner_key, request) if not valid_resource_owner: request.resource_owner_key = self.request_validator.dummy_access_token", "valid values # have been supplied. This ensures near constant", "methods to implement for this endpoint. An example decorator:: from", "import absolute_import, unicode_literals from oauthlib.common import log from .base import", "as a string. :param headers: The request headers as a", "A list of realms the resource is protected under. This", "the previously requested realm should be # transferred from the", "malicious users from guessing sensitive information v = all((valid_client, valid_resource_owner,", "= all((valid_client, valid_resource_owner, valid_realm, valid_signature)) if not v: log.info(\"[Failure] request", "realm and it will # not be checked. Instead the", "def require_oauth(realms=None): def decorator(f): @wraps(f) def wrapper(request, *args, **kwargs): v,", "access token. # # Access to protected resources will always", "Note: This is postponed in order to avoid timing attacks,", "A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.", "= provider.validate_protected_resource_request( request.url, http_method=request.method, body=request.data, headers=request.headers, realms=realms or []) if", "valid_realm = self.request_validator.validate_realms(request.client_key, request.resource_owner_key, request, uri=request.uri, realms=realms) valid_signature = self._check_signature(request)", "valid, invoke and return the response of the view. If", "details on which validator methods to implement for this endpoint.", "uri: The full URI of the token request. :param http_method:", "note # that the realm is now tied to the", "request headers as a dict. :param realms: A list of", "validate_protected_resource_request(self, uri, http_method='GET', body=None, headers=None, realms=None): \"\"\"Create a request token", "# a dummy client is assigned and used to maintain", "in order to avoid timing attacks, instead # a dummy", "OAuth # workflow where a client requests access to a", "# the client. valid_realm = self.request_validator.validate_realms(request.client_key, request.resource_owner_key, request, uri=request.uri, realms=realms)", "identical regardless of whether valid values # have been supplied.", "request token response, with a new request token if valid.", "request. :param http_method: A valid HTTP verb, i.e. GET, POST,", "invalid client credentials. # Note: This is postponed in order", "This module is an implementation of the resource protection provider", "every client should be checked # to ensure it is", "information v = all((valid_client, valid_resource_owner, valid_realm, valid_signature)) if not v:", "body=request.data, headers=request.headers, realms=realms or []) if v: return f(*args, **kwargs)", "response of the view. If invalid create and return an", "http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD,", "valid_resource_owner = self.request_validator.validate_access_token( request.client_key, request.resource_owner_key, request) if not valid_resource_owner: request.resource_owner_key", "which the # client has access and as such every", "import log from .base import BaseEndpoint from .. import errors", "except errors.OAuth1Error: return False, None try: self._check_transport_security(request) self._check_mandatory_parameters(request) except errors.OAuth1Error:", "if valid. :param uri: The full URI of the token", "return False, request # The server SHOULD return a 401", "class ResourceEndpoint(BaseEndpoint): \"\"\"An endpoint responsible for protecting resources. Typical use", "False, request # The server SHOULD return a 401 (Unauthorized)", "and # prevents malicious users from guessing sensitive information v", "from guessing sensitive information v = all((valid_client, valid_resource_owner, valid_realm, valid_signature))", "almost identical regardless of whether valid values # have been", "log.info(\"Valid client: %s\", valid_client) log.info(\"Valid token: %s\", valid_resource_owner) log.info(\"Valid realm:", "that early exit would enable resource owner enumeration valid_resource_owner =", "validator methods to implement for this endpoint. An example decorator::", "module is an implementation of the resource protection provider logic", "token is assigned and used to maintain near constant #", "on which validator methods to implement for this endpoint. An", "SHOULD return a 401 (Unauthorized) status code when # receiving", "use is to instantiate with a request validator and invoke", "identified by checking the require_resource_owner # flag and abscence of", "access and as such every client should be checked #", "exit would enable client realm access enumeration. # # The", "client is assigned and used to maintain near constant #", "request with invalid or expired token. # Note: This is", "is the first step in the OAuth # workflow where", "be checked. Instead the previously requested realm should be #", "# Note that early exit would enable resource owner enumeration", "token request. :param http_method: A valid HTTP verb, i.e. GET,", "of every # request remains almost identical regardless of whether", "when # receiving a request with invalid or expired token.", "timing attacks, instead # a dummy token is assigned and", "order to avoid timing attacks, instead # a dummy client", "a dummy token is assigned and used to maintain near", "True if valid, False otherwise. 2. An oauthlib.common.Request object. \"\"\"", "in the OAuth spec. # However they could be seen", "request verification. # # Note that early exit would enable", "2 elements. 1. True if valid, False otherwise. 2. An", "the # client has access and as such every client", "from oauthlib.common import log from .base import BaseEndpoint from ..", "# that the realm is now tied to the access", "dummy values for # calculations and fetching secrets/keys to ensure", "require_realm indicates this is the first step in the OAuth", "a specific realm. # This first step (obtaining request token)", "included in the OAuth spec. # However they could be", "oauthlib.oauth1.rfc5849.endpoints.resource ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module is an implementation of the resource", "enable client enumeration valid_client = self.request_validator.validate_client_key( request.client_key, request) if not", "endpoint. An example decorator:: from functools import wraps from your_validator", "requested realm should be # transferred from the request token", "False, None try: self._check_transport_security(request) self._check_mandatory_parameters(request) except errors.OAuth1Error: return False, request", "could be seen as a scope or realm to which", "log from .base import BaseEndpoint from .. import errors class", "Note that early exit would enable client realm access enumeration.", "_`realm`: http://tools.ietf.org/html/rfc2617#section-1.2 # # Note that early exit would enable", "URI of the token request. :param http_method: A valid HTTP", "401 (Unauthorized) status code when # receiving a request with", "be interepreted is not included in the OAuth spec. #", "invalid or expired token. # Note: This is postponed in", "near constant time execution and # prevents malicious users from", "if not self.request_validator.check_access_token( request.resource_owner_key): return False, request if not self.request_validator.validate_timestamp_and_nonce(", "\"\"\"An endpoint responsible for protecting resources. Typical use is to", "False, request if not self.request_validator.validate_timestamp_and_nonce( request.client_key, request.timestamp, request.nonce, request, access_token=request.resource_owner_key):" ]
[ "width height, width = image.shape[:2] # retrieve the params of", "translation matrix T = np.float32([[1, 0, col_trans], [0, 1, row_trans]])", "onion_img.dtype # This will give you: uint8 def translation(img, trans):", "100 cols to the right translation(\"onion.png\", (50, 100)) # remove", "(50, 100)) # remove the peper from the image using", "requirements # from requirements.txt # #################################################### import numpy as np", "(0, 20)) # translation 50 lines and 100 cols to", "images cv.imshow(\"Original Image\", image) cv.imshow('Translation Image', img_translation) # Don't destroy", "# translation 20 pixel to the right translation(\"onion.png\", (0, 20))", "tuple (row_trans, col_trans) \"\"\" # read the image image =", "of translation row_trans, col_trans = trans # Create the translation", "= np.float32([[1, 0, col_trans], [0, 1, row_trans]]) # Apply the", "translation(\"onion.png\", (50, 100)) # remove the peper from the image", "translation(\"onion.png\", (0, 20)) # translation 50 lines and 100 cols", "trans # Create the translation matrix T = np.float32([[1, 0,", "# #################################################### import numpy as np import cv2 as cv", "cv.warpAffine(image, T, (width, height)) # show the images cv.imshow(\"Original Image\",", "load the image onion_img = cv.imread(\"onion.png\") # Store height and", "(row_trans, col_trans) \"\"\" # read the image image = cv.imread(img)", "cv.imread(img) # retrieve the height and the width height, width", "cv2 as cv import matplotlib.pyplot as plt # load the", "100)) # remove the peper from the image using translations", "args: - img: absolute path to the image - trans:", "path to the image - trans: must be a tuple", "# # @ Authors : <NAME> # <NAME> # #", "and channels of the image row, col, chs = onion_img.shape", "a tuple (row_trans, col_trans) \"\"\" # read the image image", "col_trans], [0, 1, row_trans]]) # Apply the T matrix: T*M", "the T matrix: T*M img_translation = cv.warpAffine(image, T, (width, height))", "the image - trans: must be a tuple (row_trans, col_trans)", "col, chs = onion_img.shape # Store the spectral resolution dtype_img", "right translation(\"onion.png\", (50, 100)) # remove the peper from the", "np import cv2 as cv import matplotlib.pyplot as plt #", "the spectral resolution dtype_img = onion_img.dtype # This will give", "uint8 def translation(img, trans): \"\"\" args: - img: absolute path", "as plt # load the image onion_img = cv.imread(\"onion.png\") #", "- trans: must be a tuple (row_trans, col_trans) \"\"\" #", "= cv.imread(img) # retrieve the height and the width height,", "Apply the T matrix: T*M img_translation = cv.warpAffine(image, T, (width,", "and 100 cols to the right translation(\"onion.png\", (50, 100)) #", "channels of the image row, col, chs = onion_img.shape #", "# @ Authors : <NAME> # <NAME> # # @", "# retrieve the height and the width height, width =", "matplotlib.pyplot as plt # load the image onion_img = cv.imread(\"onion.png\")", "cols to the right translation(\"onion.png\", (50, 100)) # remove the", ": <NAME> # <NAME> # # @ Hint: you have", "# # @ Hint: you have to install all requirements", "height and width and channels of the image row, col,", "image image = cv.imread(img) # retrieve the height and the", "Image\", image) cv.imshow('Translation Image', img_translation) # Don't destroy the images", "give you: uint8 def translation(img, trans): \"\"\" args: - img:", "plt # load the image onion_img = cv.imread(\"onion.png\") # Store", "show the images cv.imshow(\"Original Image\", image) cv.imshow('Translation Image', img_translation) #", "images until the user do cv.waitKey() cv.destroyAllWindows() # translation 20", "= onion_img.dtype # This will give you: uint8 def translation(img,", "np.float32([[1, 0, col_trans], [0, 1, row_trans]]) # Apply the T", "height and the width height, width = image.shape[:2] # retrieve", "= image.shape[:2] # retrieve the params of translation row_trans, col_trans", "0, col_trans], [0, 1, row_trans]]) # Apply the T matrix:", "will give you: uint8 def translation(img, trans): \"\"\" args: -", "row_trans, col_trans = trans # Create the translation matrix T", "retrieve the params of translation row_trans, col_trans = trans #", "to the right translation(\"onion.png\", (50, 100)) # remove the peper", "onion_img.shape # Store the spectral resolution dtype_img = onion_img.dtype #", "absolute path to the image - trans: must be a", "cv.imshow(\"Original Image\", image) cv.imshow('Translation Image', img_translation) # Don't destroy the", "until the user do cv.waitKey() cv.destroyAllWindows() # translation 20 pixel", "the right translation(\"onion.png\", (0, 20)) # translation 50 lines and", "Image', img_translation) # Don't destroy the images until the user", "20 pixel to the right translation(\"onion.png\", (0, 20)) # translation", "img_translation = cv.warpAffine(image, T, (width, height)) # show the images", "translation 20 pixel to the right translation(\"onion.png\", (0, 20)) #", "to the image - trans: must be a tuple (row_trans,", "# read the image image = cv.imread(img) # retrieve the", "# retrieve the params of translation row_trans, col_trans = trans", "params of translation row_trans, col_trans = trans # Create the", "pixel to the right translation(\"onion.png\", (0, 20)) # translation 50", "# show the images cv.imshow(\"Original Image\", image) cv.imshow('Translation Image', img_translation)", "image) cv.imshow('Translation Image', img_translation) # Don't destroy the images until", "resolution dtype_img = onion_img.dtype # This will give you: uint8", "import cv2 as cv import matplotlib.pyplot as plt # load", "# Store the spectral resolution dtype_img = onion_img.dtype # This", "and width and channels of the image row, col, chs", "read the image image = cv.imread(img) # retrieve the height", "# translation 50 lines and 100 cols to the right", "user do cv.waitKey() cv.destroyAllWindows() # translation 20 pixel to the", "row_trans]]) # Apply the T matrix: T*M img_translation = cv.warpAffine(image,", "# from requirements.txt # #################################################### import numpy as np import", "#################################################### import numpy as np import cv2 as cv import", "20)) # translation 50 lines and 100 cols to the", "image row, col, chs = onion_img.shape # Store the spectral", "\"\"\" # read the image image = cv.imread(img) # retrieve", "the image image = cv.imread(img) # retrieve the height and", "remove the peper from the image using translations translation(\"onion.png\", (40,", "trans: must be a tuple (row_trans, col_trans) \"\"\" # read", "and the width height, width = image.shape[:2] # retrieve the", "destroy the images until the user do cv.waitKey() cv.destroyAllWindows() #", "image.shape[:2] # retrieve the params of translation row_trans, col_trans =", "do cv.waitKey() cv.destroyAllWindows() # translation 20 pixel to the right", "# Store height and width and channels of the image", "Hint: you have to install all requirements # from requirements.txt", "[0, 1, row_trans]]) # Apply the T matrix: T*M img_translation", "width and channels of the image row, col, chs =", "the width height, width = image.shape[:2] # retrieve the params", "@ Authors : <NAME> # <NAME> # # @ Hint:", "to install all requirements # from requirements.txt # #################################################### import", "as np import cv2 as cv import matplotlib.pyplot as plt", "1, row_trans]]) # Apply the T matrix: T*M img_translation =", "have to install all requirements # from requirements.txt # ####################################################", "Authors : <NAME> # <NAME> # # @ Hint: you", "T matrix: T*M img_translation = cv.warpAffine(image, T, (width, height)) #", "right translation(\"onion.png\", (0, 20)) # translation 50 lines and 100", "<filename>python/ex_1.py #################################################### # # @ Authors : <NAME> # <NAME>", "chs = onion_img.shape # Store the spectral resolution dtype_img =", "the params of translation row_trans, col_trans = trans # Create", "# @ Hint: you have to install all requirements #", "Create the translation matrix T = np.float32([[1, 0, col_trans], [0,", "= cv.imread(\"onion.png\") # Store height and width and channels of", "cv.destroyAllWindows() # translation 20 pixel to the right translation(\"onion.png\", (0,", "as cv import matplotlib.pyplot as plt # load the image", "(width, height)) # show the images cv.imshow(\"Original Image\", image) cv.imshow('Translation", "= cv.warpAffine(image, T, (width, height)) # show the images cv.imshow(\"Original", "50 lines and 100 cols to the right translation(\"onion.png\", (50,", "trans): \"\"\" args: - img: absolute path to the image", "numpy as np import cv2 as cv import matplotlib.pyplot as", "- img: absolute path to the image - trans: must", "the image onion_img = cv.imread(\"onion.png\") # Store height and width", "spectral resolution dtype_img = onion_img.dtype # This will give you:", "= onion_img.shape # Store the spectral resolution dtype_img = onion_img.dtype", "image - trans: must be a tuple (row_trans, col_trans) \"\"\"", "Store height and width and channels of the image row,", "# load the image onion_img = cv.imread(\"onion.png\") # Store height", "of the image row, col, chs = onion_img.shape # Store", "# remove the peper from the image using translations translation(\"onion.png\",", "#################################################### # # @ Authors : <NAME> # <NAME> #", "# <NAME> # # @ Hint: you have to install", "<NAME> # <NAME> # # @ Hint: you have to", "T, (width, height)) # show the images cv.imshow(\"Original Image\", image)", "# Create the translation matrix T = np.float32([[1, 0, col_trans],", "width = image.shape[:2] # retrieve the params of translation row_trans,", "= trans # Create the translation matrix T = np.float32([[1,", "the user do cv.waitKey() cv.destroyAllWindows() # translation 20 pixel to", "the right translation(\"onion.png\", (50, 100)) # remove the peper from", "the height and the width height, width = image.shape[:2] #", "requirements.txt # #################################################### import numpy as np import cv2 as", "# Apply the T matrix: T*M img_translation = cv.warpAffine(image, T,", "img_translation) # Don't destroy the images until the user do", "row, col, chs = onion_img.shape # Store the spectral resolution", "cv.imread(\"onion.png\") # Store height and width and channels of the", "def translation(img, trans): \"\"\" args: - img: absolute path to", "T = np.float32([[1, 0, col_trans], [0, 1, row_trans]]) # Apply", "Store the spectral resolution dtype_img = onion_img.dtype # This will", "retrieve the height and the width height, width = image.shape[:2]", "col_trans = trans # Create the translation matrix T =", "@ Hint: you have to install all requirements # from", "cv import matplotlib.pyplot as plt # load the image onion_img", "image = cv.imread(img) # retrieve the height and the width", "<NAME> # # @ Hint: you have to install all", "\"\"\" args: - img: absolute path to the image -", "install all requirements # from requirements.txt # #################################################### import numpy", "the images cv.imshow(\"Original Image\", image) cv.imshow('Translation Image', img_translation) # Don't", "translation 50 lines and 100 cols to the right translation(\"onion.png\",", "# This will give you: uint8 def translation(img, trans): \"\"\"", "the translation matrix T = np.float32([[1, 0, col_trans], [0, 1,", "the image row, col, chs = onion_img.shape # Store the", "# Don't destroy the images until the user do cv.waitKey()", "dtype_img = onion_img.dtype # This will give you: uint8 def", "you have to install all requirements # from requirements.txt #", "must be a tuple (row_trans, col_trans) \"\"\" # read the", "all requirements # from requirements.txt # #################################################### import numpy as", "height, width = image.shape[:2] # retrieve the params of translation", "the images until the user do cv.waitKey() cv.destroyAllWindows() # translation", "cv.imshow('Translation Image', img_translation) # Don't destroy the images until the", "T*M img_translation = cv.warpAffine(image, T, (width, height)) # show the", "to the right translation(\"onion.png\", (0, 20)) # translation 50 lines", "onion_img = cv.imread(\"onion.png\") # Store height and width and channels", "translation(img, trans): \"\"\" args: - img: absolute path to the", "img: absolute path to the image - trans: must be", "matrix T = np.float32([[1, 0, col_trans], [0, 1, row_trans]]) #", "This will give you: uint8 def translation(img, trans): \"\"\" args:", "height)) # show the images cv.imshow(\"Original Image\", image) cv.imshow('Translation Image',", "from requirements.txt # #################################################### import numpy as np import cv2", "Don't destroy the images until the user do cv.waitKey() cv.destroyAllWindows()", "image onion_img = cv.imread(\"onion.png\") # Store height and width and", "the peper from the image using translations translation(\"onion.png\", (40, 40))", "cv.waitKey() cv.destroyAllWindows() # translation 20 pixel to the right translation(\"onion.png\",", "lines and 100 cols to the right translation(\"onion.png\", (50, 100))", "col_trans) \"\"\" # read the image image = cv.imread(img) #", "matrix: T*M img_translation = cv.warpAffine(image, T, (width, height)) # show", "you: uint8 def translation(img, trans): \"\"\" args: - img: absolute", "translation row_trans, col_trans = trans # Create the translation matrix", "be a tuple (row_trans, col_trans) \"\"\" # read the image", "import numpy as np import cv2 as cv import matplotlib.pyplot", "import matplotlib.pyplot as plt # load the image onion_img =" ]
[ "ctx_item['box'] for proposal in ranked_proposals: if calculate_iou(ctx_box, proposal['box']) > self.threshold:", "in ranked_proposals: if calculate_iou(ctx_box, proposal['box']) > self.threshold: hit_num += 1", "from utils.misc import calculate_iou, xywh_to_xyxy __all__ = ['NewHitRateEvaluator', 'CtxHitRateEvaluator'] class", "return proposal_per_ref, hit_rate class CtxHitRateEvaluator: def __init__(self, refer, ctxdb, top_N=None,", "num_hit = 0 num_proposal = 0 num_ref = 0 #", "0: continue # Get proposals if image_as_key: image_id = self.refer.sentToRef[exp_id]['image_id']", "self.refer.getRefIds(split=split): ref = self.refer.Refs[ref_id] image_id = ref['image_id'] ann_id = ref['ann_id']", "rate of proposals. \"\"\" # Initialize counters num_hit = 0", "+= 1 proposal_per_ref = num_proposal / num_ref hit_rate = num_hit", "in self.ctxdb[split].items(): exp_id = int(exp_id) if len(ctx['ctx']) == 0: continue", "eval_hit_rate(self, split, proposal_dict, image_as_key=False): \"\"\"Evaluate refexp-based hit rate. Args: proposal_dict:", "ref = self.refer.Refs[ref_id] image_id = ref['image_id'] ann_id = ref['ann_id'] ann", "proposals per refexp. hit_rate: Refexp-based hit rate of proposals. \"\"\"", "counters num_hit = 0 num_proposal = 0 num_ref = 0", "Refexp-based hit rate of proposals. \"\"\" # Initialize counters recall_list", "rate of proposals. \"\"\" # Initialize counters recall_list = []", "image_id = self.refer.sentToRef[exp_id]['image_id'] proposals = proposal_dict[image_id] else: proposals = proposal_dict[exp_id]", "key, default `False`. Returns: proposal_per_ref: Number of proposals per refexp.", "self.threshold: hit_num += 1 break recall_list.append(hit_num / ctx_num) avg_num_list.append(len(ranked_proposals)) return", "ctx_item in ctx['ctx']: ctx_num += 1 ctx_box = ctx_item['box'] for", "= top_N self.threshold = threshold def eval_hit_rate(self, split, proposal_dict, image_as_key=False):", "Dataset split to evaluate on. top_N: Select top-N scoring proposals", "image_id = ref['image_id'] ann_id = ref['ann_id'] ann = self.refer.Anns[ann_id] gt_box", "[] for exp_id, ctx in self.ctxdb[split].items(): exp_id = int(exp_id) if", "default `False`. Returns: proposal_per_ref: Number of proposals per refexp. hit_rate:", "['NewHitRateEvaluator', 'CtxHitRateEvaluator'] class NewHitRateEvaluator: def __init__(self, refer, top_N=None, threshold=0.5): \"\"\"Evaluate", "on. top_N: Select top-N scoring proposals to evaluate. `None` means", "rate. Args: proposal_dict: {exp_id or image_id: [{box: [4,], score: float}]}.", "refer self.ctxdb = ctxdb self.top_N = top_N self.threshold = threshold", "self.refer.sentToRef[exp_id]['image_id'] proposals = proposal_dict[image_id] else: proposals = proposal_dict[exp_id] # Rank", "1 ctx_box = ctx_item['box'] for proposal in ranked_proposals: if calculate_iou(ctx_box,", "1 proposal_per_ref = num_proposal / num_ref hit_rate = num_hit /", "split: Dataset split to evaluate on. top_N: Select top-N scoring", "evaluate. `None` means no selection. Default `None`. \"\"\" self.refer =", "reverse=True)[:self.top_N] hit_num, ctx_num = 0, 0 for ctx_item in ctx['ctx']:", "NOTE: this is the number of refexp, not ref for", "self.refer = refer self.top_N = top_N self.threshold = threshold def", "of exp_id as key, default `False`. Returns: proposal_per_ref: Number of", "= [] avg_num_list = [] for exp_id, ctx in self.ctxdb[split].items():", "for exp_id, ctx in self.ctxdb[split].items(): exp_id = int(exp_id) if len(ctx['ctx'])", "= 0 num_proposal = 0 num_ref = 0 # NOTE:", "proposals to evaluate. `None` means no selection. Default `None`. \"\"\"", "threshold=0.5): \"\"\"Evaluate refexp-based hit rate. Args: refdb: `refdb` dict. split:", "reverse=True)[:self.top_N] for proposal in ranked_proposals: if calculate_iou(gt_box, proposal['box']) > self.threshold:", "gt_box = xywh_to_xyxy(ann['bbox']) for exp_id in ref['sent_ids']: # Get proposals", "num_ref hit_rate = num_hit / num_ref return proposal_per_ref, hit_rate class", "utils.misc import calculate_iou, xywh_to_xyxy __all__ = ['NewHitRateEvaluator', 'CtxHitRateEvaluator'] class NewHitRateEvaluator:", "counters recall_list = [] avg_num_list = [] for exp_id, ctx", "/ num_ref return proposal_per_ref, hit_rate class CtxHitRateEvaluator: def __init__(self, refer,", "evaluate on. top_N: Select top-N scoring proposals to evaluate. `None`", "{exp_id or image_id: [{box: [4,], score: float}]}. image_as_key: Use image_id", "proposals. \"\"\" # Initialize counters recall_list = [] avg_num_list =", "of proposals. \"\"\" # Initialize counters num_hit = 0 num_proposal", "continue # Get proposals if image_as_key: image_id = self.refer.sentToRef[exp_id]['image_id'] proposals", "= 0, 0 for ctx_item in ctx['ctx']: ctx_num += 1", "/ ctx_num) avg_num_list.append(len(ranked_proposals)) return sum(avg_num_list) / len(avg_num_list), sum(recall_list) / len(recall_list)", "the number of refexp, not ref for ref_id in self.refer.getRefIds(split=split):", "hit_rate: Refexp-based hit rate of proposals. \"\"\" # Initialize counters", "ctxdb, top_N=None, threshold=0.5): self.refer = refer self.ctxdb = ctxdb self.top_N", "= self.refer.sentToRef[exp_id]['image_id'] proposals = proposal_dict[image_id] else: proposals = proposal_dict[exp_id] #", "__init__(self, refer, ctxdb, top_N=None, threshold=0.5): self.refer = refer self.ctxdb =", "# Initialize counters recall_list = [] avg_num_list = [] for", "self.threshold: num_hit += 1 break num_proposal += len(ranked_proposals) num_ref +=", "xywh_to_xyxy(ann['bbox']) for exp_id in ref['sent_ids']: # Get proposals if image_as_key:", "len(ctx['ctx']) == 0: continue # Get proposals if image_as_key: image_id", "num_hit += 1 break num_proposal += len(ranked_proposals) num_ref += 1", "if image_as_key: proposals = proposal_dict[image_id] else: proposals = proposal_dict[exp_id] #", "# Rank and select proposals ranked_proposals = sorted(proposals, key=lambda p:", "proposal['box']) > self.threshold: num_hit += 1 break num_proposal += len(ranked_proposals)", "ctx_num = 0, 0 for ctx_item in ctx['ctx']: ctx_num +=", "self.refer = refer self.ctxdb = ctxdb self.top_N = top_N self.threshold", "def __init__(self, refer, top_N=None, threshold=0.5): \"\"\"Evaluate refexp-based hit rate. Args:", "= self.refer.Anns[ann_id] gt_box = xywh_to_xyxy(ann['bbox']) for exp_id in ref['sent_ids']: #", "to evaluate on. top_N: Select top-N scoring proposals to evaluate.", "Args: proposal_dict: {exp_id or image_id: [{box: [4,], score: float}]}. image_as_key:", "image_as_key: Use image_id instead of exp_id as key, default `False`.", "ctxdb self.top_N = top_N self.threshold = threshold def eval_hit_rate(self, split,", "= ['NewHitRateEvaluator', 'CtxHitRateEvaluator'] class NewHitRateEvaluator: def __init__(self, refer, top_N=None, threshold=0.5):", "self.threshold = threshold def eval_hit_rate(self, split, proposal_dict, image_as_key=False): \"\"\"Evaluate refexp-based", "num_proposal / num_ref hit_rate = num_hit / num_ref return proposal_per_ref,", "'CtxHitRateEvaluator'] class NewHitRateEvaluator: def __init__(self, refer, top_N=None, threshold=0.5): \"\"\"Evaluate refexp-based", "refexp-based hit rate. Args: proposal_dict: {exp_id or image_id: [{box: [4,],", "avg_num_list = [] for exp_id, ctx in self.ctxdb[split].items(): exp_id =", "= num_hit / num_ref return proposal_per_ref, hit_rate class CtxHitRateEvaluator: def", "= [] for exp_id, ctx in self.ctxdb[split].items(): exp_id = int(exp_id)", "Number of proposals per refexp. hit_rate: Refexp-based hit rate of", "exp_id = int(exp_id) if len(ctx['ctx']) == 0: continue # Get", "= refer self.ctxdb = ctxdb self.top_N = top_N self.threshold =", "Initialize counters recall_list = [] avg_num_list = [] for exp_id,", "ranked_proposals: if calculate_iou(ctx_box, proposal['box']) > self.threshold: hit_num += 1 break", "proposal['box']) > self.threshold: hit_num += 1 break recall_list.append(hit_num / ctx_num)", "Args: refdb: `refdb` dict. split: Dataset split to evaluate on.", "proposal_per_ref: Number of proposals per refexp. hit_rate: Refexp-based hit rate", "refdb: `refdb` dict. split: Dataset split to evaluate on. top_N:", "0, 0 for ctx_item in ctx['ctx']: ctx_num += 1 ctx_box", "Use image_id instead of exp_id as key, default `False`. Returns:", "= self.refer.Refs[ref_id] image_id = ref['image_id'] ann_id = ref['ann_id'] ann =", "top_N=None, threshold=0.5): \"\"\"Evaluate refexp-based hit rate. Args: refdb: `refdb` dict.", "[{box: [4,], score: float}]}. image_as_key: Use image_id instead of exp_id", "num_proposal = 0 num_ref = 0 # NOTE: this is", "image_as_key=False): \"\"\"Evaluate refexp-based hit rate. Args: proposal_dict: {exp_id or image_id:", "# Initialize counters num_hit = 0 num_proposal = 0 num_ref", "or image_id: [{box: [4,], score: float}]}. image_as_key: Use image_id instead", "refer self.top_N = top_N self.threshold = threshold def eval_hit_rate(self, split,", "threshold=0.5): self.refer = refer self.ctxdb = ctxdb self.top_N = top_N", "hit_num, ctx_num = 0, 0 for ctx_item in ctx['ctx']: ctx_num", "<gh_stars>10-100 from utils.misc import calculate_iou, xywh_to_xyxy __all__ = ['NewHitRateEvaluator', 'CtxHitRateEvaluator']", "proposal_per_ref = num_proposal / num_ref hit_rate = num_hit / num_ref", "ctx['ctx']: ctx_num += 1 ctx_box = ctx_item['box'] for proposal in", "calculate_iou(ctx_box, proposal['box']) > self.threshold: hit_num += 1 break recall_list.append(hit_num /", "instead of exp_id as key, default `False`. Returns: proposal_per_ref: Number", "this is the number of refexp, not ref for ref_id", "= sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N] for proposal in ranked_proposals:", "hit rate. Args: proposal_dict: {exp_id or image_id: [{box: [4,], score:", "proposal_dict[image_id] else: proposals = proposal_dict[exp_id] # Rank and select proposals", "proposals ranked_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N] for proposal", "top-N scoring proposals to evaluate. `None` means no selection. Default", "def eval_hit_rate(self, split, proposal_dict, image_as_key=False): \"\"\"Evaluate refexp-based hit rate. Args:", "CtxHitRateEvaluator: def __init__(self, refer, ctxdb, top_N=None, threshold=0.5): self.refer = refer", "xywh_to_xyxy __all__ = ['NewHitRateEvaluator', 'CtxHitRateEvaluator'] class NewHitRateEvaluator: def __init__(self, refer,", "hit_rate = num_hit / num_ref return proposal_per_ref, hit_rate class CtxHitRateEvaluator:", "for ctx_item in ctx['ctx']: ctx_num += 1 ctx_box = ctx_item['box']", "selection. Default `None`. \"\"\" self.refer = refer self.top_N = top_N", "+= 1 break num_proposal += len(ranked_proposals) num_ref += 1 proposal_per_ref", "proposal_per_ref, hit_rate class CtxHitRateEvaluator: def __init__(self, refer, ctxdb, top_N=None, threshold=0.5):", "\"\"\"Evaluate refexp-based hit rate. Args: proposal_dict: {exp_id or image_id: [{box:", "/ num_ref hit_rate = num_hit / num_ref return proposal_per_ref, hit_rate", "refer, ctxdb, top_N=None, threshold=0.5): self.refer = refer self.ctxdb = ctxdb", "Initialize counters num_hit = 0 num_proposal = 0 num_ref =", "Get proposals if image_as_key: image_id = self.refer.sentToRef[exp_id]['image_id'] proposals = proposal_dict[image_id]", "exp_id in ref['sent_ids']: # Get proposals if image_as_key: proposals =", "of refexp, not ref for ref_id in self.refer.getRefIds(split=split): ref =", "key=lambda p: p['score'], reverse=True)[:self.top_N] for proposal in ranked_proposals: if calculate_iou(gt_box,", "num_proposal += len(ranked_proposals) num_ref += 1 proposal_per_ref = num_proposal /", "image_as_key: image_id = self.refer.sentToRef[exp_id]['image_id'] proposals = proposal_dict[image_id] else: proposals =", "1 break recall_list.append(hit_num / ctx_num) avg_num_list.append(len(ranked_proposals)) return sum(avg_num_list) / len(avg_num_list),", "ann_id = ref['ann_id'] ann = self.refer.Anns[ann_id] gt_box = xywh_to_xyxy(ann['bbox']) for", "score: float}]}. image_as_key: Use image_id instead of exp_id as key,", "per refexp. hit_rate: Refexp-based hit rate of proposals. \"\"\" #", "p: p['score'], reverse=True)[:self.top_N] for proposal in ranked_proposals: if calculate_iou(gt_box, proposal['box'])", "self.refer.Anns[ann_id] gt_box = xywh_to_xyxy(ann['bbox']) for exp_id in ref['sent_ids']: # Get", "select proposals ranked_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N] for", "self.refer.Refs[ref_id] image_id = ref['image_id'] ann_id = ref['ann_id'] ann = self.refer.Anns[ann_id]", "dict. split: Dataset split to evaluate on. top_N: Select top-N", "for proposal in ranked_proposals: if calculate_iou(ctx_box, proposal['box']) > self.threshold: hit_num", "proposal in ranked_proposals: if calculate_iou(ctx_box, proposal['box']) > self.threshold: hit_num +=", "ctx_num += 1 ctx_box = ctx_item['box'] for proposal in ranked_proposals:", "[4,], score: float}]}. image_as_key: Use image_id instead of exp_id as", "in self.refer.getRefIds(split=split): ref = self.refer.Refs[ref_id] image_id = ref['image_id'] ann_id =", "`None` means no selection. Default `None`. \"\"\" self.refer = refer", "in ref['sent_ids']: # Get proposals if image_as_key: proposals = proposal_dict[image_id]", "ranked_proposals: if calculate_iou(gt_box, proposal['box']) > self.threshold: num_hit += 1 break", "and select proposals ranked_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N]", "recall_list = [] avg_num_list = [] for exp_id, ctx in", "is the number of refexp, not ref for ref_id in", "split, proposal_dict, image_as_key=False): \"\"\"Evaluate refexp-based hit rate. Args: proposal_dict: {exp_id", "recall_list.append(hit_num / ctx_num) avg_num_list.append(len(ranked_proposals)) return sum(avg_num_list) / len(avg_num_list), sum(recall_list) /", "proposals = proposal_dict[exp_id] # Rank and select proposals ranked_proposals =", "# Get proposals if image_as_key: image_id = self.refer.sentToRef[exp_id]['image_id'] proposals =", "top_N=None, threshold=0.5): self.refer = refer self.ctxdb = ctxdb self.top_N =", "= threshold def eval_hit_rate(self, split, proposal_dict, image_as_key=False): \"\"\"Evaluate refexp-based hit", "Refexp-based hit rate of proposals. \"\"\" # Initialize counters num_hit", "hit rate of proposals. \"\"\" # Initialize counters recall_list =", "Get proposals if image_as_key: proposals = proposal_dict[image_id] else: proposals =", "refexp. hit_rate: Refexp-based hit rate of proposals. \"\"\" # Initialize", "== 0: continue # Get proposals if image_as_key: image_id =", "ranked_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N] hit_num, ctx_num =", "proposal_dict: {exp_id or image_id: [{box: [4,], score: float}]}. image_as_key: Use", "select proposals ranked_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N] hit_num,", "= 0 # NOTE: this is the number of refexp,", "ref['image_id'] ann_id = ref['ann_id'] ann = self.refer.Anns[ann_id] gt_box = xywh_to_xyxy(ann['bbox'])", "ref['ann_id'] ann = self.refer.Anns[ann_id] gt_box = xywh_to_xyxy(ann['bbox']) for exp_id in", "p['score'], reverse=True)[:self.top_N] for proposal in ranked_proposals: if calculate_iou(gt_box, proposal['box']) >", "= int(exp_id) if len(ctx['ctx']) == 0: continue # Get proposals", "p['score'], reverse=True)[:self.top_N] hit_num, ctx_num = 0, 0 for ctx_item in", "exp_id, ctx in self.ctxdb[split].items(): exp_id = int(exp_id) if len(ctx['ctx']) ==", "= ref['ann_id'] ann = self.refer.Anns[ann_id] gt_box = xywh_to_xyxy(ann['bbox']) for exp_id", "float}]}. image_as_key: Use image_id instead of exp_id as key, default", "`False`. Returns: proposal_per_ref: Number of proposals per refexp. hit_rate: Refexp-based", "0 num_ref = 0 # NOTE: this is the number", "# NOTE: this is the number of refexp, not ref", "not ref for ref_id in self.refer.getRefIds(split=split): ref = self.refer.Refs[ref_id] image_id", "self.ctxdb[split].items(): exp_id = int(exp_id) if len(ctx['ctx']) == 0: continue #", "ref for ref_id in self.refer.getRefIds(split=split): ref = self.refer.Refs[ref_id] image_id =", "num_ref = 0 # NOTE: this is the number of", "for ref_id in self.refer.getRefIds(split=split): ref = self.refer.Refs[ref_id] image_id = ref['image_id']", "Select top-N scoring proposals to evaluate. `None` means no selection.", "proposals = proposal_dict[image_id] else: proposals = proposal_dict[exp_id] # Rank and", "proposals ranked_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N] hit_num, ctx_num", "ref_id in self.refer.getRefIds(split=split): ref = self.refer.Refs[ref_id] image_id = ref['image_id'] ann_id", "int(exp_id) if len(ctx['ctx']) == 0: continue # Get proposals if", "to evaluate. `None` means no selection. Default `None`. \"\"\" self.refer", "break recall_list.append(hit_num / ctx_num) avg_num_list.append(len(ranked_proposals)) return sum(avg_num_list) / len(avg_num_list), sum(recall_list)", "= xywh_to_xyxy(ann['bbox']) for exp_id in ref['sent_ids']: # Get proposals if", "class CtxHitRateEvaluator: def __init__(self, refer, ctxdb, top_N=None, threshold=0.5): self.refer =", "= ctx_item['box'] for proposal in ranked_proposals: if calculate_iou(ctx_box, proposal['box']) >", "if calculate_iou(gt_box, proposal['box']) > self.threshold: num_hit += 1 break num_proposal", "of proposals. \"\"\" # Initialize counters recall_list = [] avg_num_list", "image_as_key: proposals = proposal_dict[image_id] else: proposals = proposal_dict[exp_id] # Rank", "# Get proposals if image_as_key: proposals = proposal_dict[image_id] else: proposals", "proposal in ranked_proposals: if calculate_iou(gt_box, proposal['box']) > self.threshold: num_hit +=", "of proposals per refexp. hit_rate: Refexp-based hit rate of proposals.", "+= len(ranked_proposals) num_ref += 1 proposal_per_ref = num_proposal / num_ref", "`None`. \"\"\" self.refer = refer self.top_N = top_N self.threshold =", "num_ref return proposal_per_ref, hit_rate class CtxHitRateEvaluator: def __init__(self, refer, ctxdb,", "sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N] for proposal in ranked_proposals: if", "top_N self.threshold = threshold def eval_hit_rate(self, split, proposal_dict, image_as_key=False): \"\"\"Evaluate", "refer, top_N=None, threshold=0.5): \"\"\"Evaluate refexp-based hit rate. Args: refdb: `refdb`", "means no selection. Default `None`. \"\"\" self.refer = refer self.top_N", "self.ctxdb = ctxdb self.top_N = top_N self.threshold = threshold def", "+= 1 ctx_box = ctx_item['box'] for proposal in ranked_proposals: if", "p: p['score'], reverse=True)[:self.top_N] hit_num, ctx_num = 0, 0 for ctx_item", "else: proposals = proposal_dict[exp_id] # Rank and select proposals ranked_proposals", "ctx_box = ctx_item['box'] for proposal in ranked_proposals: if calculate_iou(ctx_box, proposal['box'])", "import calculate_iou, xywh_to_xyxy __all__ = ['NewHitRateEvaluator', 'CtxHitRateEvaluator'] class NewHitRateEvaluator: def", "key=lambda p: p['score'], reverse=True)[:self.top_N] hit_num, ctx_num = 0, 0 for", "num_hit / num_ref return proposal_per_ref, hit_rate class CtxHitRateEvaluator: def __init__(self,", "break num_proposal += len(ranked_proposals) num_ref += 1 proposal_per_ref = num_proposal", "`refdb` dict. split: Dataset split to evaluate on. top_N: Select", "sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N] hit_num, ctx_num = 0, 0", "calculate_iou(gt_box, proposal['box']) > self.threshold: num_hit += 1 break num_proposal +=", "hit_num += 1 break recall_list.append(hit_num / ctx_num) avg_num_list.append(len(ranked_proposals)) return sum(avg_num_list)", "refexp-based hit rate. Args: refdb: `refdb` dict. split: Dataset split", "proposals if image_as_key: image_id = self.refer.sentToRef[exp_id]['image_id'] proposals = proposal_dict[image_id] else:", "image_id: [{box: [4,], score: float}]}. image_as_key: Use image_id instead of", "as key, default `False`. Returns: proposal_per_ref: Number of proposals per", "ref['sent_ids']: # Get proposals if image_as_key: proposals = proposal_dict[image_id] else:", "split to evaluate on. top_N: Select top-N scoring proposals to", "= proposal_dict[image_id] else: proposals = proposal_dict[exp_id] # Rank and select", "proposals. \"\"\" # Initialize counters num_hit = 0 num_proposal =", "Rank and select proposals ranked_proposals = sorted(proposals, key=lambda p: p['score'],", "hit rate. Args: refdb: `refdb` dict. split: Dataset split to", "threshold def eval_hit_rate(self, split, proposal_dict, image_as_key=False): \"\"\"Evaluate refexp-based hit rate.", "num_ref += 1 proposal_per_ref = num_proposal / num_ref hit_rate =", "class NewHitRateEvaluator: def __init__(self, refer, top_N=None, threshold=0.5): \"\"\"Evaluate refexp-based hit", "= ctxdb self.top_N = top_N self.threshold = threshold def eval_hit_rate(self,", "> self.threshold: hit_num += 1 break recall_list.append(hit_num / ctx_num) avg_num_list.append(len(ranked_proposals))", "len(ranked_proposals) num_ref += 1 proposal_per_ref = num_proposal / num_ref hit_rate", "__all__ = ['NewHitRateEvaluator', 'CtxHitRateEvaluator'] class NewHitRateEvaluator: def __init__(self, refer, top_N=None,", "hit rate of proposals. \"\"\" # Initialize counters num_hit =", "= sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N] hit_num, ctx_num = 0,", "ranked_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N] for proposal in", "self.top_N = top_N self.threshold = threshold def eval_hit_rate(self, split, proposal_dict,", "exp_id as key, default `False`. Returns: proposal_per_ref: Number of proposals", "number of refexp, not ref for ref_id in self.refer.getRefIds(split=split): ref", "NewHitRateEvaluator: def __init__(self, refer, top_N=None, threshold=0.5): \"\"\"Evaluate refexp-based hit rate.", "top_N: Select top-N scoring proposals to evaluate. `None` means no", "proposal_dict, image_as_key=False): \"\"\"Evaluate refexp-based hit rate. Args: proposal_dict: {exp_id or", "image_id instead of exp_id as key, default `False`. Returns: proposal_per_ref:", "+= 1 break recall_list.append(hit_num / ctx_num) avg_num_list.append(len(ranked_proposals)) return sum(avg_num_list) /", "if calculate_iou(ctx_box, proposal['box']) > self.threshold: hit_num += 1 break recall_list.append(hit_num", "Returns: proposal_per_ref: Number of proposals per refexp. hit_rate: Refexp-based hit", "scoring proposals to evaluate. `None` means no selection. Default `None`.", "= proposal_dict[exp_id] # Rank and select proposals ranked_proposals = sorted(proposals,", "if image_as_key: image_id = self.refer.sentToRef[exp_id]['image_id'] proposals = proposal_dict[image_id] else: proposals", "\"\"\" # Initialize counters num_hit = 0 num_proposal = 0", "1 break num_proposal += len(ranked_proposals) num_ref += 1 proposal_per_ref =", "hit_rate class CtxHitRateEvaluator: def __init__(self, refer, ctxdb, top_N=None, threshold=0.5): self.refer", "0 for ctx_item in ctx['ctx']: ctx_num += 1 ctx_box =", "for exp_id in ref['sent_ids']: # Get proposals if image_as_key: proposals", "ann = self.refer.Anns[ann_id] gt_box = xywh_to_xyxy(ann['bbox']) for exp_id in ref['sent_ids']:", "in ctx['ctx']: ctx_num += 1 ctx_box = ctx_item['box'] for proposal", "ctx in self.ctxdb[split].items(): exp_id = int(exp_id) if len(ctx['ctx']) == 0:", "= ref['image_id'] ann_id = ref['ann_id'] ann = self.refer.Anns[ann_id] gt_box =", "> self.threshold: num_hit += 1 break num_proposal += len(ranked_proposals) num_ref", "rate. Args: refdb: `refdb` dict. split: Dataset split to evaluate", "no selection. Default `None`. \"\"\" self.refer = refer self.top_N =", "\"\"\" # Initialize counters recall_list = [] avg_num_list = []", "[] avg_num_list = [] for exp_id, ctx in self.ctxdb[split].items(): exp_id", "calculate_iou, xywh_to_xyxy __all__ = ['NewHitRateEvaluator', 'CtxHitRateEvaluator'] class NewHitRateEvaluator: def __init__(self,", "proposals if image_as_key: proposals = proposal_dict[image_id] else: proposals = proposal_dict[exp_id]", "__init__(self, refer, top_N=None, threshold=0.5): \"\"\"Evaluate refexp-based hit rate. Args: refdb:", "def __init__(self, refer, ctxdb, top_N=None, threshold=0.5): self.refer = refer self.ctxdb", "\"\"\"Evaluate refexp-based hit rate. Args: refdb: `refdb` dict. split: Dataset", "\"\"\" self.refer = refer self.top_N = top_N self.threshold = threshold", "Default `None`. \"\"\" self.refer = refer self.top_N = top_N self.threshold", "0 # NOTE: this is the number of refexp, not", "refexp, not ref for ref_id in self.refer.getRefIds(split=split): ref = self.refer.Refs[ref_id]", "for proposal in ranked_proposals: if calculate_iou(gt_box, proposal['box']) > self.threshold: num_hit", "in ranked_proposals: if calculate_iou(gt_box, proposal['box']) > self.threshold: num_hit += 1", "if len(ctx['ctx']) == 0: continue # Get proposals if image_as_key:", "= num_proposal / num_ref hit_rate = num_hit / num_ref return", "proposal_dict[exp_id] # Rank and select proposals ranked_proposals = sorted(proposals, key=lambda", "0 num_proposal = 0 num_ref = 0 # NOTE: this", "= refer self.top_N = top_N self.threshold = threshold def eval_hit_rate(self,", "= 0 num_ref = 0 # NOTE: this is the" ]
[ "else: letLog.append(logs[i]) tempLetLog=[] for i in letLog: tempLetLog.append(' '.join(i.split(' ')[1:]+[i.split('", "# -*- coding: utf-8 -*- \"\"\" Created on Sat Aug", "in letLog: tempLetLog.append(' '.join(i.split(' ')[1:]+[i.split(' ')[0]])) tempLetLog=sorted(tempLetLog) letLog=[] for i", "in range(len(logs)): temp=[] temp=logs[i].split(' ') if temp[1].isdigit() is True: digLog.append(logs[i])", "Created on Sat Aug 22 19:07:30 2020 @author: <NAME> \"\"\"", "for i in tempLetLog: tempPrime=i.split(' ')[:-1] temp=i.split(' ')[-1] letLog.append(' '.join([temp]+tempPrime))", "if temp[1].isdigit() is True: digLog.append(logs[i]) else: letLog.append(logs[i]) tempLetLog=[] for i", "logs: List[str]) -> List[str]: letLog=[] digLog=[] for i in range(len(logs)):", "letLog.append(logs[i]) tempLetLog=[] for i in letLog: tempLetLog.append(' '.join(i.split(' ')[1:]+[i.split(' ')[0]]))", "<NAME> \"\"\" class Solution: def reorderLogFiles(self, logs: List[str]) -> List[str]:", "tempLetLog=sorted(tempLetLog) letLog=[] for i in tempLetLog: tempPrime=i.split(' ')[:-1] temp=i.split(' ')[-1]", "letLog=[] for i in tempLetLog: tempPrime=i.split(' ')[:-1] temp=i.split(' ')[-1] letLog.append('", "letLog=[] digLog=[] for i in range(len(logs)): temp=[] temp=logs[i].split(' ') if", "i in letLog: tempLetLog.append(' '.join(i.split(' ')[1:]+[i.split(' ')[0]])) tempLetLog=sorted(tempLetLog) letLog=[] for", "tempLetLog.append(' '.join(i.split(' ')[1:]+[i.split(' ')[0]])) tempLetLog=sorted(tempLetLog) letLog=[] for i in tempLetLog:", "List[str]: letLog=[] digLog=[] for i in range(len(logs)): temp=[] temp=logs[i].split(' ')", "def reorderLogFiles(self, logs: List[str]) -> List[str]: letLog=[] digLog=[] for i", "class Solution: def reorderLogFiles(self, logs: List[str]) -> List[str]: letLog=[] digLog=[]", "') if temp[1].isdigit() is True: digLog.append(logs[i]) else: letLog.append(logs[i]) tempLetLog=[] for", "')[0]])) tempLetLog=sorted(tempLetLog) letLog=[] for i in tempLetLog: tempPrime=i.split(' ')[:-1] temp=i.split('", "temp[1].isdigit() is True: digLog.append(logs[i]) else: letLog.append(logs[i]) tempLetLog=[] for i in", "2020 @author: <NAME> \"\"\" class Solution: def reorderLogFiles(self, logs: List[str])", "letLog: tempLetLog.append(' '.join(i.split(' ')[1:]+[i.split(' ')[0]])) tempLetLog=sorted(tempLetLog) letLog=[] for i in", "on Sat Aug 22 19:07:30 2020 @author: <NAME> \"\"\" class", "reorderLogFiles(self, logs: List[str]) -> List[str]: letLog=[] digLog=[] for i in", "-> List[str]: letLog=[] digLog=[] for i in range(len(logs)): temp=[] temp=logs[i].split('", "-*- coding: utf-8 -*- \"\"\" Created on Sat Aug 22", "temp=logs[i].split(' ') if temp[1].isdigit() is True: digLog.append(logs[i]) else: letLog.append(logs[i]) tempLetLog=[]", "utf-8 -*- \"\"\" Created on Sat Aug 22 19:07:30 2020", "19:07:30 2020 @author: <NAME> \"\"\" class Solution: def reorderLogFiles(self, logs:", "')[1:]+[i.split(' ')[0]])) tempLetLog=sorted(tempLetLog) letLog=[] for i in tempLetLog: tempPrime=i.split(' ')[:-1]", "is True: digLog.append(logs[i]) else: letLog.append(logs[i]) tempLetLog=[] for i in letLog:", "digLog.append(logs[i]) else: letLog.append(logs[i]) tempLetLog=[] for i in letLog: tempLetLog.append(' '.join(i.split('", "Aug 22 19:07:30 2020 @author: <NAME> \"\"\" class Solution: def", "@author: <NAME> \"\"\" class Solution: def reorderLogFiles(self, logs: List[str]) ->", "-*- \"\"\" Created on Sat Aug 22 19:07:30 2020 @author:", "digLog=[] for i in range(len(logs)): temp=[] temp=logs[i].split(' ') if temp[1].isdigit()", "'.join(i.split(' ')[1:]+[i.split(' ')[0]])) tempLetLog=sorted(tempLetLog) letLog=[] for i in tempLetLog: tempPrime=i.split('", "Sat Aug 22 19:07:30 2020 @author: <NAME> \"\"\" class Solution:", "in tempLetLog: tempPrime=i.split(' ')[:-1] temp=i.split(' ')[-1] letLog.append(' '.join([temp]+tempPrime)) return letLog+digLog", "\"\"\" class Solution: def reorderLogFiles(self, logs: List[str]) -> List[str]: letLog=[]", "coding: utf-8 -*- \"\"\" Created on Sat Aug 22 19:07:30", "\"\"\" Created on Sat Aug 22 19:07:30 2020 @author: <NAME>", "tempLetLog=[] for i in letLog: tempLetLog.append(' '.join(i.split(' ')[1:]+[i.split(' ')[0]])) tempLetLog=sorted(tempLetLog)", "temp=[] temp=logs[i].split(' ') if temp[1].isdigit() is True: digLog.append(logs[i]) else: letLog.append(logs[i])", "i in tempLetLog: tempPrime=i.split(' ')[:-1] temp=i.split(' ')[-1] letLog.append(' '.join([temp]+tempPrime)) return", "List[str]) -> List[str]: letLog=[] digLog=[] for i in range(len(logs)): temp=[]", "Solution: def reorderLogFiles(self, logs: List[str]) -> List[str]: letLog=[] digLog=[] for", "i in range(len(logs)): temp=[] temp=logs[i].split(' ') if temp[1].isdigit() is True:", "range(len(logs)): temp=[] temp=logs[i].split(' ') if temp[1].isdigit() is True: digLog.append(logs[i]) else:", "for i in letLog: tempLetLog.append(' '.join(i.split(' ')[1:]+[i.split(' ')[0]])) tempLetLog=sorted(tempLetLog) letLog=[]", "22 19:07:30 2020 @author: <NAME> \"\"\" class Solution: def reorderLogFiles(self,", "for i in range(len(logs)): temp=[] temp=logs[i].split(' ') if temp[1].isdigit() is", "True: digLog.append(logs[i]) else: letLog.append(logs[i]) tempLetLog=[] for i in letLog: tempLetLog.append('" ]
[ "None with traced_atomic_transaction(): try: yield except DatabaseError: raise except Exception", "raise except Exception as e: error = e if error:", "an error in any occurred.\"\"\" error = None with traced_atomic_transaction():", "import DatabaseError from ..core.tracing import traced_atomic_transaction @contextmanager def transaction_with_commit_on_errors(): \"\"\"Perform", "import traced_atomic_transaction @contextmanager def transaction_with_commit_on_errors(): \"\"\"Perform transaction and raise an", "transaction and raise an error in any occurred.\"\"\" error =", "\"\"\"Perform transaction and raise an error in any occurred.\"\"\" error", "traced_atomic_transaction @contextmanager def transaction_with_commit_on_errors(): \"\"\"Perform transaction and raise an error", "contextmanager from django.db import DatabaseError from ..core.tracing import traced_atomic_transaction @contextmanager", "django.db import DatabaseError from ..core.tracing import traced_atomic_transaction @contextmanager def transaction_with_commit_on_errors():", "with traced_atomic_transaction(): try: yield except DatabaseError: raise except Exception as", "any occurred.\"\"\" error = None with traced_atomic_transaction(): try: yield except", "DatabaseError from ..core.tracing import traced_atomic_transaction @contextmanager def transaction_with_commit_on_errors(): \"\"\"Perform transaction", "from contextlib import contextmanager from django.db import DatabaseError from ..core.tracing", "transaction_with_commit_on_errors(): \"\"\"Perform transaction and raise an error in any occurred.\"\"\"", "def transaction_with_commit_on_errors(): \"\"\"Perform transaction and raise an error in any", "except DatabaseError: raise except Exception as e: error = e", "DatabaseError: raise except Exception as e: error = e if", "@contextmanager def transaction_with_commit_on_errors(): \"\"\"Perform transaction and raise an error in", "error = None with traced_atomic_transaction(): try: yield except DatabaseError: raise", "raise an error in any occurred.\"\"\" error = None with", "try: yield except DatabaseError: raise except Exception as e: error", "from ..core.tracing import traced_atomic_transaction @contextmanager def transaction_with_commit_on_errors(): \"\"\"Perform transaction and", "import contextmanager from django.db import DatabaseError from ..core.tracing import traced_atomic_transaction", "..core.tracing import traced_atomic_transaction @contextmanager def transaction_with_commit_on_errors(): \"\"\"Perform transaction and raise", "in any occurred.\"\"\" error = None with traced_atomic_transaction(): try: yield", "contextlib import contextmanager from django.db import DatabaseError from ..core.tracing import", "from django.db import DatabaseError from ..core.tracing import traced_atomic_transaction @contextmanager def", "error in any occurred.\"\"\" error = None with traced_atomic_transaction(): try:", "occurred.\"\"\" error = None with traced_atomic_transaction(): try: yield except DatabaseError:", "yield except DatabaseError: raise except Exception as e: error =", "except Exception as e: error = e if error: raise", "traced_atomic_transaction(): try: yield except DatabaseError: raise except Exception as e:", "= None with traced_atomic_transaction(): try: yield except DatabaseError: raise except", "Exception as e: error = e if error: raise error", "and raise an error in any occurred.\"\"\" error = None" ]
[ "desc\"' from_clause = '--from \"2018-04-04T00:00:00\"' to_clause = '--to \"2018-05-22T00:00:00\"' scopes", "assert len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) >= 0 if len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) > 0: assert summary[\"policyAssignments\"][0][\"policyDefinitions\"][0][\"results\"]", "= '--apply \"groupby((policyAssignmentId, resourceId), aggregate($count as numRecords))\"' select_clause = '--select", "the MIT License. See License.txt in the project root for", "states = self.cmd('az policy state list {} {} {} {}", "# -------------------------------------------------------------------------------------------- from azure.cli.testsdk import ScenarioTest, record_only @record_only() class PolicyInsightsTests(ScenarioTest):", "apply_clause, select_clause, order_by_clause, top_clause)).get_output_in_json() assert len(states) >= 0 summary =", "select_clause, order_by_clause, top_clause)).get_output_in_json() assert len(states) >= 0 summary = self.cmd('az", "assert summary[\"policyAssignments\"][0][\"results\"] is not None assert len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) >= 0 if", "'-a \"96e22f7846e94bb186ae3a01\"', '-a \"bc916e4f3ab54030822a11b3\" -g \"tipkeyvaultresourcegroup\" ' ] for scope", "is not None assert len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) >= 0 if len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) >", "= self.cmd('az policy state list {} {} {} {} {}", "top_clause)).get_output_in_json() assert len(events) >= 0 states = self.cmd('az policy state", "not None assert len(summary[\"policyAssignments\"]) >= 0 if len(summary[\"policyAssignments\"]) > 0:", "0 if len(summary[\"policyAssignments\"]) > 0: assert summary[\"policyAssignments\"][0][\"results\"] is not None", "not None assert len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) >= 0 if len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) > 0:", "order_by_clause, top_clause)).get_output_in_json() assert len(events) >= 0 states = self.cmd('az policy", "\"tipkeyvaultresourcegroup\" ' ] for scope in scopes: events = self.cmd('az", "policy state summarize {} {} {} {} {}'.format( scope, from_clause,", "\"mms-wcus\"', '-s \"335cefd2-ab16-430f-b364-974a170eb1d5\"', '-d \"25bf1e2a-6004-47ad-9bd1-2a40dd6de016\"', '-a \"96e22f7846e94bb186ae3a01\"', '-a \"bc916e4f3ab54030822a11b3\" -g", "\"vaults\" -g \"omssecurityintresourcegroup\"', '--resource \"default\" --namespace \"microsoft.network\" --resource-type \"subnets\" --parent", "eq false\"' apply_clause = '--apply \"groupby((policyAssignmentId, resourceId), aggregate($count as numRecords))\"'", "'-a \"bc916e4f3ab54030822a11b3\" -g \"tipkeyvaultresourcegroup\" ' ] for scope in scopes:", "if len(summary[\"policyAssignments\"]) > 0: assert summary[\"policyAssignments\"][0][\"results\"] is not None assert", "from_clause, to_clause, filter_clause, top_clause)).get_output_in_json() assert summary[\"results\"] is not None assert", "See License.txt in the project root for license information. #", "in scopes: events = self.cmd('az policy event list {} {}", "'--top 2' filter_clause = '--filter \"isCompliant eq false\"' apply_clause =", "None assert len(summary[\"policyAssignments\"]) >= 0 if len(summary[\"policyAssignments\"]) > 0: assert", "\"omssecuritydevkeyvalut\" --namespace \"microsoft.keyvault\" --resource-type \"vaults\" -g \"omssecurityintresourcegroup\"', '--resource \"default\" --namespace", "--namespace \"microsoft.network\" --resource-type \"subnets\" --parent \"virtualnetworks/mms-wcus-vnet\" -g \"mms-wcus\"', '-s \"335cefd2-ab16-430f-b364-974a170eb1d5\"',", "summary[\"results\"] is not None assert len(summary[\"policyAssignments\"]) >= 0 if len(summary[\"policyAssignments\"])", "order_by_clause, top_clause)).get_output_in_json() assert len(states) >= 0 summary = self.cmd('az policy", "'--to \"2018-05-22T00:00:00\"' scopes = [ '-m \"azgovtest4\"', '', '-g \"defaultresourcegroup-eus\"',", "<reponame>diberry/azure-cli # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights", "\"azgovtest4\"', '', '-g \"defaultresourcegroup-eus\"', '--resource \"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/eastusnsggroup/providers/microsoft.network/networksecuritygroups/eastusnsg/securityrules/allow-joba\"', '--resource \"omssecuritydevkeyvalut\" --namespace \"microsoft.keyvault\"", "--namespace \"microsoft.keyvault\" --resource-type \"vaults\" -g \"omssecurityintresourcegroup\"', '--resource \"default\" --namespace \"microsoft.network\"", "\"groupby((policyAssignmentId, resourceId), aggregate($count as numRecords))\"' select_clause = '--select \"policyAssignmentId, resourceId,", "> 0: assert summary[\"policyAssignments\"][0][\"results\"] is not None assert len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) >=", "information. # -------------------------------------------------------------------------------------------- from azure.cli.testsdk import ScenarioTest, record_only @record_only() class", "'-d \"25bf1e2a-6004-47ad-9bd1-2a40dd6de016\"', '-a \"96e22f7846e94bb186ae3a01\"', '-a \"bc916e4f3ab54030822a11b3\" -g \"tipkeyvaultresourcegroup\" ' ]", "to_clause, filter_clause, apply_clause, select_clause, order_by_clause, top_clause)).get_output_in_json() assert len(events) >= 0", "policy state list {} {} {} {} {} {} {}", "list {} {} {} {} {} {} {} {}'.format( scope,", "' ] for scope in scopes: events = self.cmd('az policy", "'--from \"2018-04-04T00:00:00\"' to_clause = '--to \"2018-05-22T00:00:00\"' scopes = [ '-m", "\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/eastusnsggroup/providers/microsoft.network/networksecuritygroups/eastusnsg/securityrules/allow-joba\"', '--resource \"omssecuritydevkeyvalut\" --namespace \"microsoft.keyvault\" --resource-type \"vaults\" -g \"omssecurityintresourcegroup\"', '--resource", "select_clause = '--select \"policyAssignmentId, resourceId, numRecords\"' order_by_clause = '--order-by \"numRecords", "'--resource \"default\" --namespace \"microsoft.network\" --resource-type \"subnets\" --parent \"virtualnetworks/mms-wcus-vnet\" -g \"mms-wcus\"',", "resourceId), aggregate($count as numRecords))\"' select_clause = '--select \"policyAssignmentId, resourceId, numRecords\"'", "event list {} {} {} {} {} {} {} {}'.format(", "\"microsoft.keyvault\" --resource-type \"vaults\" -g \"omssecurityintresourcegroup\"', '--resource \"default\" --namespace \"microsoft.network\" --resource-type", "import ScenarioTest, record_only @record_only() class PolicyInsightsTests(ScenarioTest): def test_policy_insights(self): top_clause =", "{}'.format( scope, from_clause, to_clause, filter_clause, top_clause)).get_output_in_json() assert summary[\"results\"] is not", "events = self.cmd('az policy event list {} {} {} {}", "# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed", "assert len(summary[\"policyAssignments\"]) >= 0 if len(summary[\"policyAssignments\"]) > 0: assert summary[\"policyAssignments\"][0][\"results\"]", "for license information. # -------------------------------------------------------------------------------------------- from azure.cli.testsdk import ScenarioTest, record_only", "= '--top 2' filter_clause = '--filter \"isCompliant eq false\"' apply_clause", "{} {} {} {}'.format( scope, from_clause, to_clause, filter_clause, apply_clause, select_clause,", "MIT License. See License.txt in the project root for license", "-g \"mms-wcus\"', '-s \"335cefd2-ab16-430f-b364-974a170eb1d5\"', '-d \"25bf1e2a-6004-47ad-9bd1-2a40dd6de016\"', '-a \"96e22f7846e94bb186ae3a01\"', '-a \"bc916e4f3ab54030822a11b3\"", "as numRecords))\"' select_clause = '--select \"policyAssignmentId, resourceId, numRecords\"' order_by_clause =", "scope, from_clause, to_clause, filter_clause, apply_clause, select_clause, order_by_clause, top_clause)).get_output_in_json() assert len(states)", "= [ '-m \"azgovtest4\"', '', '-g \"defaultresourcegroup-eus\"', '--resource \"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/eastusnsggroup/providers/microsoft.network/networksecuritygroups/eastusnsg/securityrules/allow-joba\"', '--resource", "\"default\" --namespace \"microsoft.network\" --resource-type \"subnets\" --parent \"virtualnetworks/mms-wcus-vnet\" -g \"mms-wcus\"', '-s", "len(summary[\"policyAssignments\"]) > 0: assert summary[\"policyAssignments\"][0][\"results\"] is not None assert len(summary[\"policyAssignments\"][0][\"policyDefinitions\"])", "len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) >= 0 if len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) > 0: assert summary[\"policyAssignments\"][0][\"policyDefinitions\"][0][\"results\"] is", "reserved. # Licensed under the MIT License. See License.txt in", "--resource-type \"vaults\" -g \"omssecurityintresourcegroup\"', '--resource \"default\" --namespace \"microsoft.network\" --resource-type \"subnets\"", "{}'.format( scope, from_clause, to_clause, filter_clause, apply_clause, select_clause, order_by_clause, top_clause)).get_output_in_json() assert", "assert len(events) >= 0 states = self.cmd('az policy state list", "to_clause = '--to \"2018-05-22T00:00:00\"' scopes = [ '-m \"azgovtest4\"', '',", "{} {} {}'.format( scope, from_clause, to_clause, filter_clause, top_clause)).get_output_in_json() assert summary[\"results\"]", "top_clause = '--top 2' filter_clause = '--filter \"isCompliant eq false\"'", "filter_clause, apply_clause, select_clause, order_by_clause, top_clause)).get_output_in_json() assert len(states) >= 0 summary", "filter_clause = '--filter \"isCompliant eq false\"' apply_clause = '--apply \"groupby((policyAssignmentId,", ">= 0 summary = self.cmd('az policy state summarize {} {}", "top_clause)).get_output_in_json() assert summary[\"results\"] is not None assert len(summary[\"policyAssignments\"]) >= 0", "policy event list {} {} {} {} {} {} {}", "state summarize {} {} {} {} {}'.format( scope, from_clause, to_clause,", "self.cmd('az policy state list {} {} {} {} {} {}", "the project root for license information. # -------------------------------------------------------------------------------------------- from azure.cli.testsdk", "project root for license information. # -------------------------------------------------------------------------------------------- from azure.cli.testsdk import", "'-m \"azgovtest4\"', '', '-g \"defaultresourcegroup-eus\"', '--resource \"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/eastusnsggroup/providers/microsoft.network/networksecuritygroups/eastusnsg/securityrules/allow-joba\"', '--resource \"omssecuritydevkeyvalut\" --namespace", "Corporation. All rights reserved. # Licensed under the MIT License.", "# Licensed under the MIT License. See License.txt in the", "-------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. #", "\"335cefd2-ab16-430f-b364-974a170eb1d5\"', '-d \"25bf1e2a-6004-47ad-9bd1-2a40dd6de016\"', '-a \"96e22f7846e94bb186ae3a01\"', '-a \"bc916e4f3ab54030822a11b3\" -g \"tipkeyvaultresourcegroup\" '", "= self.cmd('az policy event list {} {} {} {} {}", "0 if len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) > 0: assert summary[\"policyAssignments\"][0][\"policyDefinitions\"][0][\"results\"] is not None", "filter_clause, top_clause)).get_output_in_json() assert summary[\"results\"] is not None assert len(summary[\"policyAssignments\"]) >=", "\"2018-04-04T00:00:00\"' to_clause = '--to \"2018-05-22T00:00:00\"' scopes = [ '-m \"azgovtest4\"',", "scope, from_clause, to_clause, filter_clause, apply_clause, select_clause, order_by_clause, top_clause)).get_output_in_json() assert len(events)", "License.txt in the project root for license information. # --------------------------------------------------------------------------------------------", "in the project root for license information. # -------------------------------------------------------------------------------------------- from", "select_clause, order_by_clause, top_clause)).get_output_in_json() assert len(events) >= 0 states = self.cmd('az", "self.cmd('az policy event list {} {} {} {} {} {}", "from azure.cli.testsdk import ScenarioTest, record_only @record_only() class PolicyInsightsTests(ScenarioTest): def test_policy_insights(self):", "{} {} {} {} {}'.format( scope, from_clause, to_clause, filter_clause, apply_clause,", "len(events) >= 0 states = self.cmd('az policy state list {}", "\"virtualnetworks/mms-wcus-vnet\" -g \"mms-wcus\"', '-s \"335cefd2-ab16-430f-b364-974a170eb1d5\"', '-d \"25bf1e2a-6004-47ad-9bd1-2a40dd6de016\"', '-a \"96e22f7846e94bb186ae3a01\"', '-a", "License. See License.txt in the project root for license information.", "'-s \"335cefd2-ab16-430f-b364-974a170eb1d5\"', '-d \"25bf1e2a-6004-47ad-9bd1-2a40dd6de016\"', '-a \"96e22f7846e94bb186ae3a01\"', '-a \"bc916e4f3ab54030822a11b3\" -g \"tipkeyvaultresourcegroup\"", "# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved.", "{} {} {}'.format( scope, from_clause, to_clause, filter_clause, apply_clause, select_clause, order_by_clause,", "= '--from \"2018-04-04T00:00:00\"' to_clause = '--to \"2018-05-22T00:00:00\"' scopes = [", "'--resource \"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/eastusnsggroup/providers/microsoft.network/networksecuritygroups/eastusnsg/securityrules/allow-joba\"', '--resource \"omssecuritydevkeyvalut\" --namespace \"microsoft.keyvault\" --resource-type \"vaults\" -g \"omssecurityintresourcegroup\"',", "order_by_clause = '--order-by \"numRecords desc\"' from_clause = '--from \"2018-04-04T00:00:00\"' to_clause", "--parent \"virtualnetworks/mms-wcus-vnet\" -g \"mms-wcus\"', '-s \"335cefd2-ab16-430f-b364-974a170eb1d5\"', '-d \"25bf1e2a-6004-47ad-9bd1-2a40dd6de016\"', '-a \"96e22f7846e94bb186ae3a01\"',", "-g \"tipkeyvaultresourcegroup\" ' ] for scope in scopes: events =", "len(states) >= 0 summary = self.cmd('az policy state summarize {}", "ScenarioTest, record_only @record_only() class PolicyInsightsTests(ScenarioTest): def test_policy_insights(self): top_clause = '--top", "scopes: events = self.cmd('az policy event list {} {} {}", "state list {} {} {} {} {} {} {} {}'.format(", "apply_clause = '--apply \"groupby((policyAssignmentId, resourceId), aggregate($count as numRecords))\"' select_clause =", "to_clause, filter_clause, apply_clause, select_clause, order_by_clause, top_clause)).get_output_in_json() assert len(states) >= 0", "assert len(states) >= 0 summary = self.cmd('az policy state summarize", "azure.cli.testsdk import ScenarioTest, record_only @record_only() class PolicyInsightsTests(ScenarioTest): def test_policy_insights(self): top_clause", "scope, from_clause, to_clause, filter_clause, top_clause)).get_output_in_json() assert summary[\"results\"] is not None", "\"microsoft.network\" --resource-type \"subnets\" --parent \"virtualnetworks/mms-wcus-vnet\" -g \"mms-wcus\"', '-s \"335cefd2-ab16-430f-b364-974a170eb1d5\"', '-d", "None assert len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) >= 0 if len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) > 0: assert", "{} {} {} {} {} {} {} {}'.format( scope, from_clause,", "from_clause, to_clause, filter_clause, apply_clause, select_clause, order_by_clause, top_clause)).get_output_in_json() assert len(states) >=", "] for scope in scopes: events = self.cmd('az policy event", "from_clause, to_clause, filter_clause, apply_clause, select_clause, order_by_clause, top_clause)).get_output_in_json() assert len(events) >=", "scope in scopes: events = self.cmd('az policy event list {}", "from_clause = '--from \"2018-04-04T00:00:00\"' to_clause = '--to \"2018-05-22T00:00:00\"' scopes =", "record_only @record_only() class PolicyInsightsTests(ScenarioTest): def test_policy_insights(self): top_clause = '--top 2'", "under the MIT License. See License.txt in the project root", "= '--to \"2018-05-22T00:00:00\"' scopes = [ '-m \"azgovtest4\"', '', '-g", "{} {}'.format( scope, from_clause, to_clause, filter_clause, top_clause)).get_output_in_json() assert summary[\"results\"] is", "'--select \"policyAssignmentId, resourceId, numRecords\"' order_by_clause = '--order-by \"numRecords desc\"' from_clause", "'--apply \"groupby((policyAssignmentId, resourceId), aggregate($count as numRecords))\"' select_clause = '--select \"policyAssignmentId,", "= '--order-by \"numRecords desc\"' from_clause = '--from \"2018-04-04T00:00:00\"' to_clause =", "\"numRecords desc\"' from_clause = '--from \"2018-04-04T00:00:00\"' to_clause = '--to \"2018-05-22T00:00:00\"'", "(c) Microsoft Corporation. All rights reserved. # Licensed under the", "All rights reserved. # Licensed under the MIT License. See", "\"omssecurityintresourcegroup\"', '--resource \"default\" --namespace \"microsoft.network\" --resource-type \"subnets\" --parent \"virtualnetworks/mms-wcus-vnet\" -g", ">= 0 if len(summary[\"policyAssignments\"]) > 0: assert summary[\"policyAssignments\"][0][\"results\"] is not", "{} {} {} {} {} {}'.format( scope, from_clause, to_clause, filter_clause,", "numRecords\"' order_by_clause = '--order-by \"numRecords desc\"' from_clause = '--from \"2018-04-04T00:00:00\"'", "\"25bf1e2a-6004-47ad-9bd1-2a40dd6de016\"', '-a \"96e22f7846e94bb186ae3a01\"', '-a \"bc916e4f3ab54030822a11b3\" -g \"tipkeyvaultresourcegroup\" ' ] for", "\"subnets\" --parent \"virtualnetworks/mms-wcus-vnet\" -g \"mms-wcus\"', '-s \"335cefd2-ab16-430f-b364-974a170eb1d5\"', '-d \"25bf1e2a-6004-47ad-9bd1-2a40dd6de016\"', '-a", "= '--filter \"isCompliant eq false\"' apply_clause = '--apply \"groupby((policyAssignmentId, resourceId),", "'', '-g \"defaultresourcegroup-eus\"', '--resource \"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/eastusnsggroup/providers/microsoft.network/networksecuritygroups/eastusnsg/securityrules/allow-joba\"', '--resource \"omssecuritydevkeyvalut\" --namespace \"microsoft.keyvault\" --resource-type", "\"2018-05-22T00:00:00\"' scopes = [ '-m \"azgovtest4\"', '', '-g \"defaultresourcegroup-eus\"', '--resource", "scopes = [ '-m \"azgovtest4\"', '', '-g \"defaultresourcegroup-eus\"', '--resource \"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/eastusnsggroup/providers/microsoft.network/networksecuritygroups/eastusnsg/securityrules/allow-joba\"',", "-------------------------------------------------------------------------------------------- from azure.cli.testsdk import ScenarioTest, record_only @record_only() class PolicyInsightsTests(ScenarioTest): def", "[ '-m \"azgovtest4\"', '', '-g \"defaultresourcegroup-eus\"', '--resource \"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/eastusnsggroup/providers/microsoft.network/networksecuritygroups/eastusnsg/securityrules/allow-joba\"', '--resource \"omssecuritydevkeyvalut\"", "2' filter_clause = '--filter \"isCompliant eq false\"' apply_clause = '--apply", "{} {} {} {} {} {} {}'.format( scope, from_clause, to_clause,", "0 states = self.cmd('az policy state list {} {} {}", "0 summary = self.cmd('az policy state summarize {} {} {}", ">= 0 states = self.cmd('az policy state list {} {}", "\"96e22f7846e94bb186ae3a01\"', '-a \"bc916e4f3ab54030822a11b3\" -g \"tipkeyvaultresourcegroup\" ' ] for scope in", "false\"' apply_clause = '--apply \"groupby((policyAssignmentId, resourceId), aggregate($count as numRecords))\"' select_clause", "resourceId, numRecords\"' order_by_clause = '--order-by \"numRecords desc\"' from_clause = '--from", "is not None assert len(summary[\"policyAssignments\"]) >= 0 if len(summary[\"policyAssignments\"]) >", ">= 0 if len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) > 0: assert summary[\"policyAssignments\"][0][\"policyDefinitions\"][0][\"results\"] is not", "'--filter \"isCompliant eq false\"' apply_clause = '--apply \"groupby((policyAssignmentId, resourceId), aggregate($count", "Microsoft Corporation. All rights reserved. # Licensed under the MIT", "Licensed under the MIT License. See License.txt in the project", "def test_policy_insights(self): top_clause = '--top 2' filter_clause = '--filter \"isCompliant", "-g \"omssecurityintresourcegroup\"', '--resource \"default\" --namespace \"microsoft.network\" --resource-type \"subnets\" --parent \"virtualnetworks/mms-wcus-vnet\"", "for scope in scopes: events = self.cmd('az policy event list", "summarize {} {} {} {} {}'.format( scope, from_clause, to_clause, filter_clause,", "{} {} {} {} {}'.format( scope, from_clause, to_clause, filter_clause, top_clause)).get_output_in_json()", "'--order-by \"numRecords desc\"' from_clause = '--from \"2018-04-04T00:00:00\"' to_clause = '--to", "0: assert summary[\"policyAssignments\"][0][\"results\"] is not None assert len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) >= 0", "class PolicyInsightsTests(ScenarioTest): def test_policy_insights(self): top_clause = '--top 2' filter_clause =", "\"isCompliant eq false\"' apply_clause = '--apply \"groupby((policyAssignmentId, resourceId), aggregate($count as", "'-g \"defaultresourcegroup-eus\"', '--resource \"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/eastusnsggroup/providers/microsoft.network/networksecuritygroups/eastusnsg/securityrules/allow-joba\"', '--resource \"omssecuritydevkeyvalut\" --namespace \"microsoft.keyvault\" --resource-type \"vaults\"", "summary = self.cmd('az policy state summarize {} {} {} {}", "rights reserved. # Licensed under the MIT License. See License.txt", "aggregate($count as numRecords))\"' select_clause = '--select \"policyAssignmentId, resourceId, numRecords\"' order_by_clause", "'--resource \"omssecuritydevkeyvalut\" --namespace \"microsoft.keyvault\" --resource-type \"vaults\" -g \"omssecurityintresourcegroup\"', '--resource \"default\"", "\"defaultresourcegroup-eus\"', '--resource \"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/eastusnsggroup/providers/microsoft.network/networksecuritygroups/eastusnsg/securityrules/allow-joba\"', '--resource \"omssecuritydevkeyvalut\" --namespace \"microsoft.keyvault\" --resource-type \"vaults\" -g", "PolicyInsightsTests(ScenarioTest): def test_policy_insights(self): top_clause = '--top 2' filter_clause = '--filter", "{} {}'.format( scope, from_clause, to_clause, filter_clause, apply_clause, select_clause, order_by_clause, top_clause)).get_output_in_json()", "\"bc916e4f3ab54030822a11b3\" -g \"tipkeyvaultresourcegroup\" ' ] for scope in scopes: events", "top_clause)).get_output_in_json() assert len(states) >= 0 summary = self.cmd('az policy state", "filter_clause, apply_clause, select_clause, order_by_clause, top_clause)).get_output_in_json() assert len(events) >= 0 states", "= '--select \"policyAssignmentId, resourceId, numRecords\"' order_by_clause = '--order-by \"numRecords desc\"'", "= self.cmd('az policy state summarize {} {} {} {} {}'.format(", "{} {} {} {}'.format( scope, from_clause, to_clause, filter_clause, top_clause)).get_output_in_json() assert", "assert summary[\"results\"] is not None assert len(summary[\"policyAssignments\"]) >= 0 if", "numRecords))\"' select_clause = '--select \"policyAssignmentId, resourceId, numRecords\"' order_by_clause = '--order-by", "summary[\"policyAssignments\"][0][\"results\"] is not None assert len(summary[\"policyAssignments\"][0][\"policyDefinitions\"]) >= 0 if len(summary[\"policyAssignments\"][0][\"policyDefinitions\"])", "@record_only() class PolicyInsightsTests(ScenarioTest): def test_policy_insights(self): top_clause = '--top 2' filter_clause", "--resource-type \"subnets\" --parent \"virtualnetworks/mms-wcus-vnet\" -g \"mms-wcus\"', '-s \"335cefd2-ab16-430f-b364-974a170eb1d5\"', '-d \"25bf1e2a-6004-47ad-9bd1-2a40dd6de016\"',", "license information. # -------------------------------------------------------------------------------------------- from azure.cli.testsdk import ScenarioTest, record_only @record_only()", "apply_clause, select_clause, order_by_clause, top_clause)).get_output_in_json() assert len(events) >= 0 states =", "test_policy_insights(self): top_clause = '--top 2' filter_clause = '--filter \"isCompliant eq", "self.cmd('az policy state summarize {} {} {} {} {}'.format( scope,", "root for license information. # -------------------------------------------------------------------------------------------- from azure.cli.testsdk import ScenarioTest,", "Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under", "to_clause, filter_clause, top_clause)).get_output_in_json() assert summary[\"results\"] is not None assert len(summary[\"policyAssignments\"])", "\"policyAssignmentId, resourceId, numRecords\"' order_by_clause = '--order-by \"numRecords desc\"' from_clause =", "len(summary[\"policyAssignments\"]) >= 0 if len(summary[\"policyAssignments\"]) > 0: assert summary[\"policyAssignments\"][0][\"results\"] is" ]
[ "<reponame>Aslic/rmats_turbo_4.1.0 import glob import os.path import subprocess import sys import", "self._out_dir = os.path.join(self._test_dir, 'out') self._prep_1_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_1') self._prep_2_tmp_dir =", "self._prep_2_tmp_dir, self._post_tmp_dir) def _check_results_inte_1_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with", "error = output_parser.parse_from_gtf( from_gtf_se_path) self.assertFalse(error) self.assertEqual(len(from_gtf_se_rows), 1) from_gtf_se_row = from_gtf_se_rows[0]", "[[1, 100], [401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2]", "elif self._sub_step == 'missing_input_bam': self._setup_miss_input_bam() elif self._sub_step == 'missing_prep_bam': self._setup_miss_prep_bam()", "elif self._sub_step == 'missing_prep_bam': self._setup_miss_prep_bam() def _setup_dup_input_bam(self): self._dup_input_bam_path = os.path.join(self._generated_input_dir,", "'junction_pairs': [[1, 1], [100, 200], [299, 299]], 'count': 1 }]", "'missing_input_bam': arguments.extend([ '--tmp', self._miss_input_bam_tmp_dir, '--b1', self._miss_input_bam_path, '--task', 'post', '--statoff', ])", "= glob.glob(os.path.join(tmp_dir, '*.rmats')) # filenames begin with a timestamp used", "files with no associated prep output') def _check_results_inte_1_pass(self): self._check_no_error_results() def", "not in input but associated with prep output') def _check_results_inte_2_pass(self):", "'junction_pairs': [[201, 201], [300, 400], [499, 499]], 'count': 1 }]", "tests.bam.set_read_pair_from_intervals(rep_2_read_1, rep_2_read_2, [[26, 100]], [[1, 100], [401, 425]], self._read_length) self.assertFalse(error)", "self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'inte', '--statoff', ]) elif self._sub_step ==", "os.path.join(self._test_dir, 'tmp_miss_input_bam') self._miss_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_prep_bam') tests.util.recreate_dirs([ self._generated_input_dir, self._out_dir, self._prep_1_tmp_dir,", "elif self._sub_step == 'inte_1_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2',", "50 self._sample_1_bams_path = os.path.join(self._generated_input_dir, 'b1.txt') self._sample_2_bams_path = os.path.join(self._generated_input_dir, 'b2.txt') sample_1_bam_replicate_template", "self._miss_input_bam_tmp_dir) def _setup_miss_prep_bam(self): self._miss_prep_bam_path = os.path.join(self._generated_input_dir, 'miss_prep.txt') bams = self._sample_1_bams", "# chromosome length rep_1_read_1.template_name = tests.util.template_name_str([1, 1]) rep_1_read_2 = tests.bam.Read()", "= tests.bam.BAM() rep_1_bam.path = sample_1_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path =", "] command.extend(source_paths) subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) def _check_results(self): if self._sub_step", "}]) self._cp_with_prefix('prep_2_', self._prep_2_tmp_dir, self._post_tmp_dir) def _check_results_inte_1_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name =", "inc_level_1_splits = se_mats_jc_row['IncLevel1'].split(',') self.assertEqual(len(inc_level_1_splits), 2) self.assertAlmostEqual(float(inc_level_1_splits[0]), 1) self.assertAlmostEqual(float(inc_level_1_splits[1]), 1) inc_level_2_splits", "'--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'inte', ]) elif self._sub_step ==", "'prep_post') self._generated_input_dir = os.path.join(self._test_dir, 'generated_input') self._out_dir = os.path.join(self._test_dir, 'out') self._prep_1_tmp_dir", "sub_step self._setup_sub_step() self._run_test() def _command_output_dir(self): return os.path.join(self._test_dir, 'command_output') def _rmats_arguments(self):", "self._post_tmp_dir, self._dup_input_bam_tmp_dir, self._dup_prep_bam_tmp_dir, self._miss_input_bam_tmp_dir, self._miss_prep_bam_tmp_dir, self._command_output_dir() ]) self._read_type = 'paired'", "}] }]) multis = dot_rmats_contents['multis'] if dot_rmats_i == 0: self.assertEqual(multis,", "self._sub_step == 'prep_2': arguments.extend([ '--tmp', self._prep_2_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'prep',", "self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_input_bam_tmp_dir) def _setup_dup_prep_bam(self): self._dup_prep_bam_path = os.path.join(self._generated_input_dir, 'dup_prep.txt') bams", "'--task', 'post', '--statoff', ]) return arguments def _setup_sub_step(self): if self._sub_step", "elif self._sub_step == 'post': self._check_results_post() elif self._sub_step == 'duplicate_input_bam': self._check_results_dup_input_bam()", "== 'inte_2_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'inte', '--statoff',", "'prep_1': self._check_results_prep_1() elif self._sub_step == 'inte_1_fail': self._check_results_inte_1_fail() elif self._sub_step ==", "tests.bam.Read() error = tests.bam.set_read_pair_from_intervals( rep_2_read_1, rep_2_read_2, [[26, 100]], [[201, 300],", "0] }] }]) else: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [1,", "= tests.bam.set_read_pair_from_intervals(rep_2_read_1, rep_2_read_2, [[26, 100]], [[1, 100], [401, 425]], self._read_length)", "= '1' transcript_1.strand = '+' transcript_1.gene_id = tests.util.gene_id_str(1) transcript_1.gene_name =", "os.path.join(self._generated_input_dir, 'b2.txt') sample_1_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_1_rep_{}.bam') sample_2_bam_replicate_template = os.path.join(", "rep_2_read_1.template_name = tests.util.template_name_str([2, 2]) rep_2_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_2_read_1,", "self._dup_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_input_bam') self._dup_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_prep_bam') self._miss_input_bam_tmp_dir =", "== 'inte_1_pass': self._check_results_inte_1_pass() elif self._sub_step == 'prep_2': self._check_results_prep_2() elif self._sub_step", "for bam in self._sample_1_bams: dup_bam_path = bam.path expected_error = '{}", "source_paths = self._get_dot_rmats_paths(source_dir) command = [ sys.executable, tests.test_config.CP_WITH_PREFIX, prefix, dest_dir", "self._sub_step == 'duplicate_input_bam': self._setup_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam': self._setup_dup_prep_bam() elif", "return os.path.join(self._test_dir, 'command_output') def _rmats_arguments(self): arguments = [ '--gtf', self._gtf_path,", "tests.bam.BAM() rep_2_bam.path = sample_1_replicate_template.format(2) sample_1_bams = [rep_1_bam, rep_2_bam] rep_1_read_1 =", "tests.bam.Read() rep_1_read_1.ref_seq_name = '1' # chromosome rep_1_read_1.ref_seq_len = 1000 #", "elif self._sub_step == 'missing_input_bam': arguments.extend([ '--tmp', self._miss_input_bam_tmp_dir, '--b1', self._miss_input_bam_path, '--task',", "]) elif self._sub_step == 'inte_1_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path,", "'prep', ]) elif self._sub_step == 'inte_1_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1',", "self.assertEqual(jc_raw_se_row['SJC_SAMPLE_2'], '1,1') se_mats_jc_path = os.path.join(self._out_dir, 'SE.MATS.JC.txt') se_mats_jc_header, se_mats_jc_rows, error =", "+ self._sample_2_bams self._write_bams(bams, self._miss_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_prep_bam_tmp_dir) def _create_gtf(self, gtf_path):", "self._sub_step == 'duplicate_prep_bam': self._check_results_dup_prep_bam() elif self._sub_step == 'missing_input_bam': self._check_results_miss_input_bam() elif", "tests.bam.BAM() rep_1_bam.path = sample_1_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path = sample_1_replicate_template.format(2)", "self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [{quoted_test_gene_id: [[0, 0, 2]]}]) exons", "[201, 300]], [[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads = [rep_1_read_1, rep_1_read_2]", "'tmp_prep_2') self._post_tmp_dir = os.path.join(self._test_dir, 'tmp_post') self._dup_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_input_bam') self._dup_prep_bam_tmp_dir", "prep output') def _check_results_inte_2_pass(self): self._check_no_error_results() def _check_results_post(self): self._check_no_error_results() command_stdout_file_name =", "self._generated_input_dir = os.path.join(self._test_dir, 'generated_input') self._out_dir = os.path.join(self._test_dir, 'out') self._prep_1_tmp_dir =", "def test(self): for sub_step in self._sub_steps: self._sub_step = sub_step self._setup_sub_step()", "'--b1', self._sample_2_bams_path, '--task', 'prep', ]) elif self._sub_step == 'inte_2_fail': arguments.extend([", "'--task', 'post', ]) elif self._sub_step == 'duplicate_input_bam': arguments.extend([ '--tmp', self._dup_input_bam_tmp_dir,", "= tests.util.template_name_str([1, 1]) rep_1_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2,", "'end_box': [401, 499], 'counts': [1, 0] }] }]) else: self.assertEqual(exons,", "rep_1_read_1.template_name = tests.util.template_name_str([1, 1]) rep_1_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1,", "[(1, 100), (201, 300), (401, 500)] gtf.transcripts = [transcript_1] error", "1) from_gtf_se_row = from_gtf_se_rows[0] self.assertEqual(from_gtf_se_row['GeneID'], tests.util.double_quote(tests.util.gene_id_str(1))) self.assertEqual(from_gtf_se_row['exonStart_0base'], '200') self.assertEqual(from_gtf_se_row['exonEnd'], '300')", "se_mats_jc_rows, error = output_parser.parse_mats_jc( se_mats_jc_path) self.assertFalse(error) self._check_se_mats_jc_header(se_mats_jc_header) self.assertEqual(len(se_mats_jc_rows), 1) se_mats_jc_row", "= err_f_h.readlines() dup_bam_path = self._sample_1_bams[0].path expected_error = '{} given 2", "= tests.util.transcript_id_str(1) transcript_1.exons = [(1, 100), (201, 300), (401, 500)]", "tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_dup_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name()", "import subprocess import sys import unittest import tests.bam import tests.base_test", "= float(se_mats_jc_row['FDR']) tests.util.assert_within_bounds(self, fdr, 0, 1) inc_level_1_splits = se_mats_jc_row['IncLevel1'].split(',') self.assertEqual(len(inc_level_1_splits),", "rep_2_read_1 = tests.bam.Read() rep_2_read_1.ref_seq_name = '1' # chromosome rep_2_read_1.ref_seq_len =", "self.assertEqual(dot_rmats_contents['bams'], [self._sample_1_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [dict()]) exons", "def _check_results_post(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as", "import os.path import subprocess import sys import unittest import tests.bam", "tests.util class Test(tests.base_test.BaseTest): def setUp(self): super().setUp() self._test_base_dir = tests.test_config.TEST_BASE_DIR self._test_dir", "'300') jc_raw_se_path = os.path.join(self._out_dir, 'JC.raw.input.SE.txt') jc_raw_se_header, jc_raw_se_rows, error = output_parser.parse_jc_raw(", "transcript_1.chromosome = '1' transcript_1.strand = '+' transcript_1.gene_id = tests.util.gene_id_str(1) transcript_1.gene_name", "tests.bam import tests.base_test import tests.gtf import tests.output_parser as output_parser import", "'inte_1_pass', 'prep_2', 'inte_2_fail', 'inte_2_pass', 'post', 'duplicate_input_bam', 'duplicate_prep_bam', 'missing_input_bam', 'missing_prep_bam', ]", "]) elif self._sub_step == 'missing_prep_bam': arguments.extend([ '--tmp', self._miss_prep_bam_tmp_dir, '--b1', self._miss_prep_bam_path,", "[401, 499], 'counts': [1, 0] }] }]) else: self.assertEqual(exons, [{", "output') def _check_results_inte_1_pass(self): self._check_no_error_results() def _check_results_inte_2_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name =", "_command_output_dir(self): return os.path.join(self._test_dir, 'command_output') def _rmats_arguments(self): arguments = [ '--gtf',", "self._sample_2_bams_path, '--task', 'prep', ]) elif self._sub_step == 'inte_2_fail': arguments.extend([ '--tmp',", "[{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 400], [499, 499]],", "self._sub_step == 'missing_input_bam': self._check_results_miss_input_bam() elif self._sub_step == 'missing_prep_bam': self._check_results_miss_prep_bam() else:", "self._gtf = self._create_gtf(self._gtf_path) self._sub_steps = [ 'prep_1', 'inte_1_fail', 'inte_1_pass', 'prep_2',", "super().setUp() self._test_base_dir = tests.test_config.TEST_BASE_DIR self._test_dir = os.path.join(self._test_base_dir, 'prep_post') self._generated_input_dir =", "chromosome rep_2_read_1.ref_seq_len = 1000 # chromosome length rep_2_read_1.template_name = tests.util.template_name_str([2,", "400], [499, 499]], 'count': 1 }] }]) else: self.assertEqual(multis, [{", "out_lines, 'Processing count files') from_gtf_se_path = os.path.join(self._out_dir, 'fromGTF.SE.txt') from_gtf_se_header, from_gtf_se_rows,", "from_gtf_se_rows, error = output_parser.parse_from_gtf( from_gtf_se_path) self.assertFalse(error) self.assertEqual(len(from_gtf_se_rows), 1) from_gtf_se_row =", "= '1' # chromosome rep_1_read_1.ref_seq_len = 1000 # chromosome length", "[[201, 300], [401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2]", "from_gtf_se_path = os.path.join(self._out_dir, 'fromGTF.SE.txt') from_gtf_se_header, from_gtf_se_rows, error = output_parser.parse_from_gtf( from_gtf_se_path)", "'inte', '--statoff', ]) elif self._sub_step == 'inte_2_pass': arguments.extend([ '--tmp', self._post_tmp_dir,", "sub_step in self._sub_steps: self._sub_step = sub_step self._setup_sub_step() self._run_test() def _command_output_dir(self):", "rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_2_bams, sample_2_bams_path) return sample_2_bams def _cp_with_prefix(self,", "self._prep_1_tmp_dir, self._miss_prep_bam_tmp_dir) def _create_gtf(self, gtf_path): gtf = tests.gtf.GTF() gtf.path =", "99], 'end_box': [1, 99], 'counts': [1, 0] }] }]) multis", "self._miss_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_prep_bam_tmp_dir) def _create_gtf(self, gtf_path): gtf = tests.gtf.GTF()", "# chromosome length rep_1_read_1.template_name = tests.util.template_name_str([2, 1]) rep_1_read_2 = tests.bam.Read()", "os.path.join(self._generated_input_dir, 'b1.txt') self._sample_2_bams_path = os.path.join(self._generated_input_dir, 'b2.txt') sample_1_bam_replicate_template = os.path.join( self._generated_input_dir,", "'--b2', self._sample_2_bams_path, '--task', 'post', ]) elif self._sub_step == 'duplicate_input_bam': arguments.extend([", "self._check_results_inte_1_fail() elif self._sub_step == 'inte_1_pass': self._check_results_inte_1_pass() elif self._sub_step == 'prep_2':", "0: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [401, 499], 'end_box': [401,", "'fromGTF.SE.txt') from_gtf_se_header, from_gtf_se_rows, error = output_parser.parse_from_gtf( from_gtf_se_path) self.assertFalse(error) self.assertEqual(len(from_gtf_se_rows), 1)", "}]) else: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [1, 99], 'end_box':", "[transcript_1] error = gtf.write() self.assertFalse(error) return gtf def _create_sample_1_bams(self, sample_1_bams_path,", "associated prep output') def _check_results_inte_1_pass(self): self._check_no_error_results() def _check_results_inte_2_fail(self): self.assertNotEqual(self._rmats_return_code, 0)", "os.path.join(self._test_dir, 'tmp_prep_2') self._post_tmp_dir = os.path.join(self._test_dir, 'tmp_post') self._dup_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_input_bam')", "in self._sub_steps: self._sub_step = sub_step self._setup_sub_step() self._run_test() def _command_output_dir(self): return", "subprocess import sys import unittest import tests.bam import tests.base_test import", "self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_no_line_has(self,", "def _check_results(self): if self._sub_step == 'prep_1': self._check_results_prep_1() elif self._sub_step ==", "'--b1', self._dup_prep_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step == 'missing_input_bam':", "chromosome length rep_2_read_1.template_name = tests.util.template_name_str([1, 2]) rep_2_read_2 = tests.bam.Read() error", "= '1' # chromosome rep_2_read_1.ref_seq_len = 1000 # chromosome length", "self._check_results_prep_2() elif self._sub_step == 'inte_2_fail': self._check_results_inte_2_fail() elif self._sub_step == 'inte_2_pass':", "self._sub_step == 'duplicate_prep_bam': arguments.extend([ '--tmp', self._dup_prep_bam_tmp_dir, '--b1', self._dup_prep_bam_path, '--task', 'post',", "'1' # chromosome rep_1_read_1.ref_seq_len = 1000 # chromosome length rep_1_read_1.template_name", "gtf_path transcript_1 = tests.gtf.Transcript() transcript_1.chromosome = '1' transcript_1.strand = '+'", "quoted_test_gene_id: [{ 'start_box': [1, 99], 'end_box': [1, 99], 'counts': [1,", "}]) multis = dot_rmats_contents['multis'] if dot_rmats_i == 0: self.assertEqual(multis, [{", "'inte', ]) elif self._sub_step == 'inte_1_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1',", "elif self._sub_step == 'duplicate_prep_bam': arguments.extend([ '--tmp', self._dup_prep_bam_tmp_dir, '--b1', self._dup_prep_bam_path, '--task',", "elif self._sub_step == 'missing_input_bam': self._check_results_miss_input_bam() elif self._sub_step == 'missing_prep_bam': self._check_results_miss_prep_bam()", "os.path.join(self._test_dir, 'out') self._prep_1_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_1') self._prep_2_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_2')", "= tests.util.template_name_str([2, 1]) rep_1_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2,", "def _check_results_prep_2(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as", "tests.util.template_name_str([2, 2]) rep_2_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_2_read_1, rep_2_read_2, [[26,", "= os.path.join(self._test_dir, 'tmp_miss_prep_bam') tests.util.recreate_dirs([ self._generated_input_dir, self._out_dir, self._prep_1_tmp_dir, self._prep_2_tmp_dir, self._post_tmp_dir, self._dup_input_bam_tmp_dir,", "[self._sample_1_bams[0]] self._write_bams(bams, self._dup_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_input_bam_tmp_dir) def _setup_dup_prep_bam(self): self._dup_prep_bam_path =", "self._dup_prep_bam_tmp_dir) self._cp_with_prefix('prep_1_again', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) def _setup_miss_input_bam(self): self._miss_input_bam_path = os.path.join(self._generated_input_dir, 'miss_input.txt')", "'dup_input.txt') bams = self._sample_1_bams + [self._sample_1_bams[0]] self._write_bams(bams, self._dup_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir,", "'inte_1_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'inte',", "self._write_bams(bams, self._miss_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_input_bam_tmp_dir) def _setup_miss_prep_bam(self): self._miss_prep_bam_path = os.path.join(self._generated_input_dir,", "1000 # chromosome length rep_1_read_1.template_name = tests.util.template_name_str([2, 1]) rep_1_read_2 =", "os.path.join( self._generated_input_dir, 'sample_1_rep_{}.bam') sample_2_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_2_rep_{}.bam') self._sample_1_bams =", "== 'missing_prep_bam': self._check_results_miss_prep_bam() else: self.fail('unexpected sub_step: {}'.format(self._sub_step)) def _get_dot_rmats_paths(self, tmp_dir):", "dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_1_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs,", "self.assertAlmostEqual(float(inc_level_1_splits[1]), 1) inc_level_2_splits = se_mats_jc_row['IncLevel2'].split(',') self.assertEqual(len(inc_level_2_splits), 2) self.assertAlmostEqual(float(inc_level_2_splits[0]), 0) self.assertAlmostEqual(float(inc_level_2_splits[1]),", "tests.util.assert_some_line_has( self, err_lines, 'bam files not in input but associated", "os.path.join(self._out_dir, 'JC.raw.input.SE.txt') jc_raw_se_header, jc_raw_se_rows, error = output_parser.parse_jc_raw( jc_raw_se_path) self.assertFalse(error) self.assertEqual(len(jc_raw_se_rows),", "= se_mats_jc_row['IncLevel2'].split(',') self.assertEqual(len(inc_level_2_splits), 2) self.assertAlmostEqual(float(inc_level_2_splits[0]), 0) self.assertAlmostEqual(float(inc_level_2_splits[1]), 0) self.assertAlmostEqual(float(se_mats_jc_row['IncLevelDifference']), 1)", "1 }] }]) else: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[201,", "self.assertEqual(novel_juncs, [{quoted_test_gene_id: [[0, 0, 2]]}]) exons = dot_rmats_contents['exons'] if dot_rmats_i", "'count': 1 }] }]) self._cp_with_prefix('prep_2_', self._prep_2_tmp_dir, self._post_tmp_dir) def _check_results_inte_1_fail(self): self.assertNotEqual(self._rmats_return_code,", "'0,0') self.assertEqual(jc_raw_se_row['IJC_SAMPLE_2'], '0,0') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_2'], '1,1') se_mats_jc_path = os.path.join(self._out_dir, 'SE.MATS.JC.txt') se_mats_jc_header,", "[1, 0] }] }]) else: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box':", "= [rep_2_read_1, rep_2_read_2] self._write_bams(sample_2_bams, sample_2_bams_path) return sample_2_bams def _cp_with_prefix(self, prefix,", "import tests.bam import tests.base_test import tests.gtf import tests.output_parser as output_parser", "'post': self._check_results_post() elif self._sub_step == 'duplicate_input_bam': self._check_results_dup_input_bam() elif self._sub_step ==", "0: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 400],", "]) elif self._sub_step == 'duplicate_input_bam': arguments.extend([ '--tmp', self._dup_input_bam_tmp_dir, '--b1', self._dup_input_bam_path,", "== 'duplicate_prep_bam': self._setup_dup_prep_bam() elif self._sub_step == 'missing_input_bam': self._setup_miss_input_bam() elif self._sub_step", "open(command_stdout_file_name, 'rt') as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_no_line_has(self, out_lines, 'Processing", "200], [299, 299]], 'count': 1 }] }]) else: self.assertEqual(multis, [{", "1) se_mats_jc_row = se_mats_jc_rows[0] pvalue = float(se_mats_jc_row['PValue']) tests.util.assert_within_bounds(self, pvalue, 0,", "check=True) def _check_results(self): if self._sub_step == 'prep_1': self._check_results_prep_1() elif self._sub_step", "arguments.extend([ '--tmp', self._miss_input_bam_tmp_dir, '--b1', self._miss_input_bam_path, '--task', 'post', '--statoff', ]) elif", "tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_2_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for", "_check_results_miss_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as", "expected_error = '{} not found in .rmats'.format(miss_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error)", "[{ quoted_test_gene_id: [{ 'start_box': [1, 99], 'end_box': [1, 99], 'counts':", "command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines =", "'--b1', self._sample_1_bams_path, '--task', 'inte', '--statoff', ]) elif self._sub_step == 'prep_2':", "bams = [self._sample_1_bams[0]] self._write_bams(bams, self._miss_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_input_bam_tmp_dir) def _setup_miss_prep_bam(self):", "self._sub_step == 'post': self._check_results_post() elif self._sub_step == 'duplicate_input_bam': self._check_results_dup_input_bam() elif", "se_mats_jc_rows[0] pvalue = float(se_mats_jc_row['PValue']) tests.util.assert_within_bounds(self, pvalue, 0, 1) fdr =", "open(command_stdout_file_name, 'rt') as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_some_line_has(self, out_lines, 'Processing", "tests.gtf import tests.output_parser as output_parser import tests.test_config import tests.util class", "self._sample_2_bams_path, '--task', 'inte', '--statoff', ]) elif self._sub_step == 'inte_2_pass': arguments.extend([", "self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h:", "self._miss_input_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step == 'missing_prep_bam': arguments.extend([", "'start_box': [1, 99], 'end_box': [1, 99], 'counts': [1, 0] }]", "elif self._sub_step == 'inte_2_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2',", "tests.test_config import tests.util class Test(tests.base_test.BaseTest): def setUp(self): super().setUp() self._test_base_dir =", "'inte_1_fail', 'inte_1_pass', 'prep_2', 'inte_2_fail', 'inte_2_pass', 'post', 'duplicate_input_bam', 'duplicate_prep_bam', 'missing_input_bam', 'missing_prep_bam',", "self._dup_input_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step == 'duplicate_prep_bam': arguments.extend([", "None def test(self): for sub_step in self._sub_steps: self._sub_step = sub_step", "rep_2_read_1.ref_seq_name = '1' # chromosome rep_2_read_1.ref_seq_len = 1000 # chromosome", "def _check_results_inte_2_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt')", "rep_2_read_1.template_name = tests.util.template_name_str([1, 2]) rep_2_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(", "self._check_results_inte_2_pass() elif self._sub_step == 'post': self._check_results_post() elif self._sub_step == 'duplicate_input_bam':", "= float(se_mats_jc_row['PValue']) tests.util.assert_within_bounds(self, pvalue, 0, 1) fdr = float(se_mats_jc_row['FDR']) tests.util.assert_within_bounds(self,", "self._create_sample_1_bams( self._sample_1_bams_path, sample_1_bam_replicate_template) self._sample_2_bams = self._create_sample_2_bams( self._sample_2_bams_path, sample_2_bam_replicate_template) self._gtf_path =", "os.path.join(self._test_dir, 'generated_input') self._out_dir = os.path.join(self._test_dir, 'out') self._prep_1_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_1')", "gtf.write() self.assertFalse(error) return gtf def _create_sample_1_bams(self, sample_1_bams_path, sample_1_replicate_template): rep_1_bam =", "_create_sample_1_bams(self, sample_1_bams_path, sample_1_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path = sample_1_replicate_template.format(1) rep_2_bam", "self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() tests.util.assert_some_line_has(", "_setup_dup_prep_bam(self): self._dup_prep_bam_path = os.path.join(self._generated_input_dir, 'dup_prep.txt') bams = self._sample_1_bams self._write_bams(bams, self._dup_prep_bam_path)", "'out') self._prep_1_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_1') self._prep_2_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_2') self._post_tmp_dir", "'--tmp', self._miss_prep_bam_tmp_dir, '--b1', self._miss_prep_bam_path, '--task', 'post', '--statoff', ]) return arguments", "err_lines = err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'input bam files with", "err_lines, expected_error) def _check_results_dup_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with", "= os.path.join(self._test_dir, 'tmp_miss_input_bam') self._miss_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_prep_bam') tests.util.recreate_dirs([ self._generated_input_dir, self._out_dir,", "self.assertAlmostEqual(float(inc_level_1_splits[0]), 1) self.assertAlmostEqual(float(inc_level_1_splits[1]), 1) inc_level_2_splits = se_mats_jc_row['IncLevel2'].split(',') self.assertEqual(len(inc_level_2_splits), 2) self.assertAlmostEqual(float(inc_level_2_splits[0]),", "[{ 'junction_pairs': [[1, 1], [100, 400], [499, 499]], 'count': 1", "quoted_test_gene_id: [{ 'start_box': [401, 499], 'end_box': [401, 499], 'counts': [1,", "self._sample_2_bams: miss_bam_path = bam.path expected_error = '{} not found in", "self.assertFalse(error) return gtf def _create_sample_1_bams(self, sample_1_bams_path, sample_1_replicate_template): rep_1_bam = tests.bam.BAM()", "= [rep_1_read_1, rep_1_read_2] rep_2_read_1 = tests.bam.Read() rep_2_read_1.ref_seq_name = '1' #", "tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [201, 300]], [[401, 475]], self._read_length) self.assertFalse(error)", "'duplicate_input_bam': self._setup_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam': self._setup_dup_prep_bam() elif self._sub_step ==", "= os.path.join(self._generated_input_dir, 'dup_prep.txt') bams = self._sample_1_bams self._write_bams(bams, self._dup_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir,", "source_dir, dest_dir): source_paths = self._get_dot_rmats_paths(source_dir) command = [ sys.executable, tests.test_config.CP_WITH_PREFIX,", "= [ sys.executable, tests.test_config.CP_WITH_PREFIX, prefix, dest_dir ] command.extend(source_paths) subprocess.run(command, stdout=subprocess.PIPE,", "count files') from_gtf_se_path = os.path.join(self._out_dir, 'fromGTF.SE.txt') from_gtf_se_header, from_gtf_se_rows, error =", "400], [499, 499]], 'count': 1 }] }]) self._cp_with_prefix('prep_2_', self._prep_2_tmp_dir, self._post_tmp_dir)", "tests.output_parser as output_parser import tests.test_config import tests.util class Test(tests.base_test.BaseTest): def", "length rep_2_read_1.template_name = tests.util.template_name_str([2, 2]) rep_2_read_2 = tests.bam.Read() error =", "else: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[201, 201], [300, 400],", "'--task', 'prep', ]) elif self._sub_step == 'inte_1_fail': arguments.extend([ '--tmp', self._post_tmp_dir,", "_setup_miss_input_bam(self): self._miss_input_bam_path = os.path.join(self._generated_input_dir, 'miss_input.txt') bams = [self._sample_1_bams[0]] self._write_bams(bams, self._miss_input_bam_path)", "arguments.extend([ '--tmp', self._dup_input_bam_tmp_dir, '--b1', self._dup_input_bam_path, '--task', 'post', '--statoff', ]) elif", "with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() for bam", "self._check_results_inte_2_fail() elif self._sub_step == 'inte_2_pass': self._check_results_inte_2_pass() elif self._sub_step == 'post':", "_check_results_post(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as out_f_h:", "= os.path.join(self._generated_input_dir, 'b2.txt') sample_1_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_1_rep_{}.bam') sample_2_bam_replicate_template =", "self._miss_prep_bam_path = os.path.join(self._generated_input_dir, 'miss_prep.txt') bams = self._sample_1_bams + self._sample_2_bams self._write_bams(bams,", "sys import unittest import tests.bam import tests.base_test import tests.gtf import", "import sys import unittest import tests.bam import tests.base_test import tests.gtf", "arguments.extend([ '--tmp', self._prep_1_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'prep', ]) elif self._sub_step", "self._cp_with_prefix('prep_2_', self._prep_2_tmp_dir, self._post_tmp_dir) def _check_results_inte_1_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name()", "0: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 200],", "'prep_2': self._check_results_prep_2() elif self._sub_step == 'inte_2_fail': self._check_results_inte_2_fail() elif self._sub_step ==", "err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'bam files not in input but", "Test(tests.base_test.BaseTest): def setUp(self): super().setUp() self._test_base_dir = tests.test_config.TEST_BASE_DIR self._test_dir = os.path.join(self._test_base_dir,", "_rmats_arguments(self): arguments = [ '--gtf', self._gtf_path, '--od', self._out_dir, '-t', self._read_type,", "length rep_1_read_1.template_name = tests.util.template_name_str([2, 1]) rep_1_read_2 = tests.bam.Read() error =", "def _setup_dup_prep_bam(self): self._dup_prep_bam_path = os.path.join(self._generated_input_dir, 'dup_prep.txt') bams = self._sample_1_bams self._write_bams(bams,", "== 'duplicate_input_bam': self._setup_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam': self._setup_dup_prep_bam() elif self._sub_step", "error = output_parser.parse_jc_raw( jc_raw_se_path) self.assertFalse(error) self.assertEqual(len(jc_raw_se_rows), 1) jc_raw_se_row = jc_raw_se_rows[0]", "rep_2_read_2] self._write_bams(sample_1_bams, sample_1_bams_path) return sample_1_bams def _create_sample_2_bams(self, sample_2_bams_path, sample_2_replicate_template): rep_1_bam", "tests.util.transcript_id_str(1) transcript_1.exons = [(1, 100), (201, 300), (401, 500)] gtf.transcripts", "'missing_prep_bam': self._setup_miss_prep_bam() def _setup_dup_input_bam(self): self._dup_input_bam_path = os.path.join(self._generated_input_dir, 'dup_input.txt') bams =", "self._post_tmp_dir) def _check_results_prep_2(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt')", "err_lines = err_f_h.readlines() for bam in self._sample_1_bams: dup_bam_path = bam.path", "}]) else: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100,", "499]], 'count': 1 }] }]) self._cp_with_prefix('prep_2_', self._prep_2_tmp_dir, self._post_tmp_dir) def _check_results_inte_1_fail(self):", "= [(1, 100), (201, 300), (401, 500)] gtf.transcripts = [transcript_1]", "self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) self._cp_with_prefix('prep_1_again', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) def _setup_miss_input_bam(self): self._miss_input_bam_path = os.path.join(self._generated_input_dir,", "'tmp_dup_input_bam') self._dup_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_prep_bam') self._miss_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_input_bam') self._miss_prep_bam_tmp_dir", "tests.util.assert_within_bounds(self, fdr, 0, 1) inc_level_1_splits = se_mats_jc_row['IncLevel1'].split(',') self.assertEqual(len(inc_level_1_splits), 2) self.assertAlmostEqual(float(inc_level_1_splits[0]),", "as err_f_h: err_lines = err_f_h.readlines() dup_bam_path = self._sample_1_bams[0].path expected_error =", "[{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 200], [299, 299]],", "length rep_1_read_1.template_name = tests.util.template_name_str([1, 1]) rep_1_read_2 = tests.bam.Read() error =", "'--tmp', self._post_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'inte', '--statoff', ]) elif self._sub_step", "'sample_1_rep_{}.bam') sample_2_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_2_rep_{}.bam') self._sample_1_bams = self._create_sample_1_bams( self._sample_1_bams_path,", "self._sub_step == 'inte_1_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'inte',", "se_mats_jc_row['IncLevel2'].split(',') self.assertEqual(len(inc_level_2_splits), 2) self.assertAlmostEqual(float(inc_level_2_splits[0]), 0) self.assertAlmostEqual(float(inc_level_2_splits[1]), 0) self.assertAlmostEqual(float(se_mats_jc_row['IncLevelDifference']), 1) def", "'-t', self._read_type, '--readLength', str(self._read_length), ] if self._sub_step == 'prep_1': arguments.extend([", "'post': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'post',", "quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_1_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i", "'--readLength', str(self._read_length), ] if self._sub_step == 'prep_1': arguments.extend([ '--tmp', self._prep_1_tmp_dir,", "os.path.join(self._out_dir, 'fromGTF.SE.txt') from_gtf_se_header, from_gtf_se_rows, error = output_parser.parse_from_gtf( from_gtf_se_path) self.assertFalse(error) self.assertEqual(len(from_gtf_se_rows),", "= os.path.join(self._out_dir, 'SE.MATS.JC.txt') se_mats_jc_header, se_mats_jc_rows, error = output_parser.parse_mats_jc( se_mats_jc_path) self.assertFalse(error)", "_check_results_dup_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as", "in .rmats'.format(miss_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) if __name__ == '__main__': unittest.main(verbosity=2)", "'200') self.assertEqual(from_gtf_se_row['exonEnd'], '300') jc_raw_se_path = os.path.join(self._out_dir, 'JC.raw.input.SE.txt') jc_raw_se_header, jc_raw_se_rows, error", "self._prep_1_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'prep', ]) elif self._sub_step == 'inte_1_fail':", "= tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [401, 500]], [[401, 475]], self._read_length)", "'--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'post', ]) elif", "== 'inte_1_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'inte', '--statoff',", "self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'post', ]) elif self._sub_step", "self._sample_1_bams + self._sample_2_bams self._write_bams(bams, self._miss_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_prep_bam_tmp_dir) def _create_gtf(self,", "[401, 500]], [[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads = [rep_1_read_1, rep_1_read_2]", "se_mats_jc_path) self.assertFalse(error) self._check_se_mats_jc_header(se_mats_jc_header) self.assertEqual(len(se_mats_jc_rows), 1) se_mats_jc_row = se_mats_jc_rows[0] pvalue =", "= os.path.join( self._generated_input_dir, 'sample_2_rep_{}.bam') self._sample_1_bams = self._create_sample_1_bams( self._sample_1_bams_path, sample_1_bam_replicate_template) self._sample_2_bams", "self._miss_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_input_bam_tmp_dir) def _setup_miss_prep_bam(self): self._miss_prep_bam_path = os.path.join(self._generated_input_dir, 'miss_prep.txt')", "= self._get_dot_rmats_paths(self._prep_1_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in range(2): dot_rmats_contents, error", "'inte', ]) elif self._sub_step == 'post': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1',", "'--b1', self._miss_input_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step == 'missing_prep_bam':", "}] }]) self._cp_with_prefix('prep_2_', self._prep_2_tmp_dir, self._post_tmp_dir) def _check_results_inte_1_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name", "'{} found 2 times in .rmats'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def", "'duplicate_input_bam': arguments.extend([ '--tmp', self._dup_input_bam_tmp_dir, '--b1', self._dup_input_bam_path, '--task', 'post', '--statoff', ])", "== 'missing_prep_bam': arguments.extend([ '--tmp', self._miss_prep_bam_tmp_dir, '--b1', self._miss_prep_bam_path, '--task', 'post', '--statoff',", "sample_1_bams_path) return sample_1_bams def _create_sample_2_bams(self, sample_2_bams_path, sample_2_replicate_template): rep_1_bam = tests.bam.BAM()", "return sample_2_bams def _cp_with_prefix(self, prefix, source_dir, dest_dir): source_paths = self._get_dot_rmats_paths(source_dir)", "sample_1_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path = sample_1_replicate_template.format(1) rep_2_bam = tests.bam.BAM()", "'{} given 2 times'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_dup_prep_bam(self): self.assertNotEqual(self._rmats_return_code,", "self._check_results_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam': self._check_results_dup_prep_bam() elif self._sub_step == 'missing_input_bam':", "out_f_h: out_lines = out_f_h.readlines() tests.util.assert_some_line_has(self, out_lines, 'Processing count files') from_gtf_se_path", "elif self._sub_step == 'inte_1_pass': self._check_results_inte_1_pass() elif self._sub_step == 'prep_2': self._check_results_prep_2()", "'prep_2': arguments.extend([ '--tmp', self._prep_2_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'prep', ]) elif", "[{ quoted_test_gene_id: [{ 'start_box': [401, 499], 'end_box': [401, 499], 'counts':", "as err_f_h: err_lines = err_f_h.readlines() for bam in self._sample_1_bams: dup_bam_path", "prep output') def _check_results_inte_1_pass(self): self._check_no_error_results() def _check_results_inte_2_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name", "1) fdr = float(se_mats_jc_row['FDR']) tests.util.assert_within_bounds(self, fdr, 0, 1) inc_level_1_splits =", "'inte_2_pass', 'post', 'duplicate_input_bam', 'duplicate_prep_bam', 'missing_input_bam', 'missing_prep_bam', ] self._sub_step = None", "length rep_2_read_1.template_name = tests.util.template_name_str([1, 2]) rep_2_read_2 = tests.bam.Read() error =", "fdr = float(se_mats_jc_row['FDR']) tests.util.assert_within_bounds(self, fdr, 0, 1) inc_level_1_splits = se_mats_jc_row['IncLevel1'].split(',')", "= from_gtf_se_rows[0] self.assertEqual(from_gtf_se_row['GeneID'], tests.util.double_quote(tests.util.gene_id_str(1))) self.assertEqual(from_gtf_se_row['exonStart_0base'], '200') self.assertEqual(from_gtf_se_row['exonEnd'], '300') jc_raw_se_path =", "'tmp_post') self._dup_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_input_bam') self._dup_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_prep_bam') self._miss_input_bam_tmp_dir", "2]]}]) exons = dot_rmats_contents['exons'] if dot_rmats_i == 0: self.assertEqual(exons, [{", "self._miss_prep_bam_path, '--task', 'post', '--statoff', ]) return arguments def _setup_sub_step(self): if", "sample_2_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path = sample_2_replicate_template.format(2) sample_2_bams = [rep_1_bam,", "err_lines, 'bam files not in input but associated with prep", "'--task', 'post', '--statoff', ]) elif self._sub_step == 'missing_prep_bam': arguments.extend([ '--tmp',", "[self._sample_1_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [dict()]) exons =", "transcript_1.gene_id = tests.util.gene_id_str(1) transcript_1.gene_name = tests.util.gene_name_str(1) transcript_1.transcript_id = tests.util.transcript_id_str(1) transcript_1.exons", "sample_2_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path = sample_2_replicate_template.format(1) rep_2_bam = tests.bam.BAM()", "= tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [201, 300]],", "error = tests.bam.set_read_pair_from_intervals( rep_2_read_1, rep_2_read_2, [[26, 100]], [[201, 300], [401,", "= sample_2_replicate_template.format(2) sample_2_bams = [rep_1_bam, rep_2_bam] rep_1_read_1 = tests.bam.Read() rep_1_read_1.ref_seq_name", "== 'prep_1': self._check_results_prep_1() elif self._sub_step == 'inte_1_fail': self._check_results_inte_1_fail() elif self._sub_step", "[100, 200], [299, 299]], 'count': 1 }] }]) else: self.assertEqual(multis,", "'post', '--statoff', ]) elif self._sub_step == 'duplicate_prep_bam': arguments.extend([ '--tmp', self._dup_prep_bam_tmp_dir,", "self._dup_prep_bam_tmp_dir) def _setup_miss_input_bam(self): self._miss_input_bam_path = os.path.join(self._generated_input_dir, 'miss_input.txt') bams = [self._sample_1_bams[0]]", "self._setup_miss_input_bam() elif self._sub_step == 'missing_prep_bam': self._setup_miss_prep_bam() def _setup_dup_input_bam(self): self._dup_input_bam_path =", "self._setup_miss_prep_bam() def _setup_dup_input_bam(self): self._dup_input_bam_path = os.path.join(self._generated_input_dir, 'dup_input.txt') bams = self._sample_1_bams", "self._write_bams(sample_1_bams, sample_1_bams_path) return sample_1_bams def _create_sample_2_bams(self, sample_2_bams_path, sample_2_replicate_template): rep_1_bam =", "expected_error = '{} found 2 times in .rmats'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines,", "== 'post': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task',", "= self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines()", "[[201, 201], [300, 400], [499, 499]], 'count': 1 }] }])", "= os.path.join(self._generated_input_dir, 'b1.txt') self._sample_2_bams_path = os.path.join(self._generated_input_dir, 'b2.txt') sample_1_bam_replicate_template = os.path.join(", "rep_2_read_1, rep_2_read_2, [[26, 100]], [[201, 300], [401, 425]], self._read_length) self.assertFalse(error)", "_check_results_inte_2_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as", "rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_1_bams, sample_1_bams_path) return sample_1_bams def _create_sample_2_bams(self,", "'1,1') se_mats_jc_path = os.path.join(self._out_dir, 'SE.MATS.JC.txt') se_mats_jc_header, se_mats_jc_rows, error = output_parser.parse_mats_jc(", "return sample_1_bams def _create_sample_2_bams(self, sample_2_bams_path, sample_2_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path", "self._prep_2_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'prep', ]) elif self._sub_step == 'inte_2_fail':", "[dict()]) exons = dot_rmats_contents['exons'] if dot_rmats_i == 0: self.assertEqual(exons, [{", "os.path.join( self._generated_input_dir, 'sample_2_rep_{}.bam') self._sample_1_bams = self._create_sample_1_bams( self._sample_1_bams_path, sample_1_bam_replicate_template) self._sample_2_bams =", "elif self._sub_step == 'duplicate_prep_bam': self._check_results_dup_prep_bam() elif self._sub_step == 'missing_input_bam': self._check_results_miss_input_bam()", "err_f_h: err_lines = err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'input bam files", "self._dup_prep_bam_tmp_dir, self._miss_input_bam_tmp_dir, self._miss_prep_bam_tmp_dir, self._command_output_dir() ]) self._read_type = 'paired' self._read_length =", "command = [ sys.executable, tests.test_config.CP_WITH_PREFIX, prefix, dest_dir ] command.extend(source_paths) subprocess.run(command,", "= tests.bam.Read() rep_1_read_1.ref_seq_name = '1' # chromosome rep_1_read_1.ref_seq_len = 1000", "err_lines, 'input bam files with no associated prep output') def", "'--task', 'post', '--statoff', ]) elif self._sub_step == 'duplicate_prep_bam': arguments.extend([ '--tmp',", "rep_1_read_1.ref_seq_len = 1000 # chromosome length rep_1_read_1.template_name = tests.util.template_name_str([2, 1])", "_cp_with_prefix(self, prefix, source_dir, dest_dir): source_paths = self._get_dot_rmats_paths(source_dir) command = [", "self._check_no_error_results() def _check_results_post(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt')", "self._cp_with_prefix('prep_1_', self._prep_1_tmp_dir, self._post_tmp_dir) def _check_results_prep_2(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with", "= os.path.join(self._test_dir, 'tmp_dup_input_bam') self._dup_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_prep_bam') self._miss_input_bam_tmp_dir = os.path.join(self._test_dir,", "rep_1_read_1.ref_seq_name = '1' # chromosome rep_1_read_1.ref_seq_len = 1000 # chromosome", "rep_2_bam.path = sample_2_replicate_template.format(2) sample_2_bams = [rep_1_bam, rep_2_bam] rep_1_read_1 = tests.bam.Read()", "(401, 500)] gtf.transcripts = [transcript_1] error = gtf.write() self.assertFalse(error) return", "100]], [[1, 100], [401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1,", "1) inc_level_2_splits = se_mats_jc_row['IncLevel2'].split(',') self.assertEqual(len(inc_level_2_splits), 2) self.assertAlmostEqual(float(inc_level_2_splits[0]), 0) self.assertAlmostEqual(float(inc_level_2_splits[1]), 0)", "import tests.gtf import tests.output_parser as output_parser import tests.test_config import tests.util", "'+' transcript_1.gene_id = tests.util.gene_id_str(1) transcript_1.gene_name = tests.util.gene_name_str(1) transcript_1.transcript_id = tests.util.transcript_id_str(1)", "== 'prep_1': arguments.extend([ '--tmp', self._prep_1_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'prep', ])", "err_f_h: err_lines = err_f_h.readlines() dup_bam_path = self._sample_1_bams[0].path expected_error = '{}", "sample_1_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_1_rep_{}.bam') sample_2_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_2_rep_{}.bam')", "inc_level_2_splits = se_mats_jc_row['IncLevel2'].split(',') self.assertEqual(len(inc_level_2_splits), 2) self.assertAlmostEqual(float(inc_level_2_splits[0]), 0) self.assertAlmostEqual(float(inc_level_2_splits[1]), 0) self.assertAlmostEqual(float(se_mats_jc_row['IncLevelDifference']),", "}] }]) else: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1],", "unittest import tests.bam import tests.base_test import tests.gtf import tests.output_parser as", "pvalue = float(se_mats_jc_row['PValue']) tests.util.assert_within_bounds(self, pvalue, 0, 1) fdr = float(se_mats_jc_row['FDR'])", "def _check_results_dup_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt')", "os.path.join(self._generated_input_dir, 'miss_prep.txt') bams = self._sample_1_bams + self._sample_2_bams self._write_bams(bams, self._miss_prep_bam_path) self._cp_with_prefix('prep_1',", "for sub_step in self._sub_steps: self._sub_step = sub_step self._setup_sub_step() self._run_test() def", "= 50 self._sample_1_bams_path = os.path.join(self._generated_input_dir, 'b1.txt') self._sample_2_bams_path = os.path.join(self._generated_input_dir, 'b2.txt')", "transcript_1.strand = '+' transcript_1.gene_id = tests.util.gene_id_str(1) transcript_1.gene_name = tests.util.gene_name_str(1) transcript_1.transcript_id", "'rt') as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_some_line_has(self, out_lines, 'Processing count", "self._sample_2_bams self._write_bams(bams, self._miss_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_prep_bam_tmp_dir) def _create_gtf(self, gtf_path): gtf", "tests.bam.set_read_pair_from_intervals( rep_2_read_1, rep_2_read_2, [[26, 100]], [[201, 300], [401, 425]], self._read_length)", "range(2): dot_rmats_contents, error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_2_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'],", "'sample_2_rep_{}.bam') self._sample_1_bams = self._create_sample_1_bams( self._sample_1_bams_path, sample_1_bam_replicate_template) self._sample_2_bams = self._create_sample_2_bams( self._sample_2_bams_path,", "quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 400], [499, 499]], 'count':", "= self._create_sample_1_bams( self._sample_1_bams_path, sample_1_bam_replicate_template) self._sample_2_bams = self._create_sample_2_bams( self._sample_2_bams_path, sample_2_bam_replicate_template) self._gtf_path", "]) elif self._sub_step == 'inte_2_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_2_bams_path,", "out_lines, 'Processing count files') test_gene_id = tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id)", "import tests.base_test import tests.gtf import tests.output_parser as output_parser import tests.test_config", "'b2.txt') sample_1_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_1_rep_{}.bam') sample_2_bam_replicate_template = os.path.join( self._generated_input_dir,", "= [ '--gtf', self._gtf_path, '--od', self._out_dir, '-t', self._read_type, '--readLength', str(self._read_length),", "'missing_prep_bam': self._check_results_miss_prep_bam() else: self.fail('unexpected sub_step: {}'.format(self._sub_step)) def _get_dot_rmats_paths(self, tmp_dir): dot_rmats_file_paths", "self._get_dot_rmats_paths(self._prep_1_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in range(2): dot_rmats_contents, error =", "bam files with no associated prep output') def _check_results_inte_1_pass(self): self._check_no_error_results()", "input but associated with prep output') def _check_results_inte_2_pass(self): self._check_no_error_results() def", "= tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_1_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2)", "tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_2_read_1, rep_2_read_2, [[26, 100]], [[1, 100], [401,", "output') def _check_results_inte_2_pass(self): self._check_no_error_results() def _check_results_post(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name()", "self._prep_1_tmp_dir, self._dup_input_bam_tmp_dir) def _setup_dup_prep_bam(self): self._dup_prep_bam_path = os.path.join(self._generated_input_dir, 'dup_prep.txt') bams =", "error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_1_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs", "self.assertFalse(error) rep_1_bam.reads = [rep_1_read_1, rep_1_read_2] rep_2_read_1 = tests.bam.Read() rep_2_read_1.ref_seq_name =", "arguments.extend([ '--tmp', self._prep_2_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'prep', ]) elif self._sub_step", "def _check_results_prep_1(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as", "= output_parser.parse_mats_jc( se_mats_jc_path) self.assertFalse(error) self._check_se_mats_jc_header(se_mats_jc_header) self.assertEqual(len(se_mats_jc_rows), 1) se_mats_jc_row = se_mats_jc_rows[0]", "expected_error) def _check_results_miss_input_bam(self): self._check_no_error_results() def _check_results_miss_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name =", "jc_raw_se_header, jc_raw_se_rows, error = output_parser.parse_jc_raw( jc_raw_se_path) self.assertFalse(error) self.assertEqual(len(jc_raw_se_rows), 1) jc_raw_se_row", "chromosome rep_1_read_1.ref_seq_len = 1000 # chromosome length rep_1_read_1.template_name = tests.util.template_name_str([1,", "self._get_dot_rmats_paths(self._prep_2_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in range(2): dot_rmats_contents, error =", "in range(2): dot_rmats_contents, error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_2_bams[dot_rmats_i].path])", "self._sample_1_bams = self._create_sample_1_bams( self._sample_1_bams_path, sample_1_bam_replicate_template) self._sample_2_bams = self._create_sample_2_bams( self._sample_2_bams_path, sample_2_bam_replicate_template)", "tests.util.double_quote(tests.util.gene_id_str(1))) self.assertEqual(from_gtf_se_row['exonStart_0base'], '200') self.assertEqual(from_gtf_se_row['exonEnd'], '300') jc_raw_se_path = os.path.join(self._out_dir, 'JC.raw.input.SE.txt') jc_raw_se_header,", "= os.path.join(self._out_dir, 'fromGTF.SE.txt') from_gtf_se_header, from_gtf_se_rows, error = output_parser.parse_from_gtf( from_gtf_se_path) self.assertFalse(error)", "output_parser.parse_jc_raw( jc_raw_se_path) self.assertFalse(error) self.assertEqual(len(jc_raw_se_rows), 1) jc_raw_se_row = jc_raw_se_rows[0] self.assertEqual(jc_raw_se_row['ID'], from_gtf_se_row['ID'])", "test(self): for sub_step in self._sub_steps: self._sub_step = sub_step self._setup_sub_step() self._run_test()", "self._check_se_mats_jc_header(se_mats_jc_header) self.assertEqual(len(se_mats_jc_rows), 1) se_mats_jc_row = se_mats_jc_rows[0] pvalue = float(se_mats_jc_row['PValue']) tests.util.assert_within_bounds(self,", "def _setup_miss_prep_bam(self): self._miss_prep_bam_path = os.path.join(self._generated_input_dir, 'miss_prep.txt') bams = self._sample_1_bams +", "if dot_rmats_i == 0: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1,", "rep_1_bam = tests.bam.BAM() rep_1_bam.path = sample_2_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path", "= tests.test_config.TEST_BASE_DIR self._test_dir = os.path.join(self._test_base_dir, 'prep_post') self._generated_input_dir = os.path.join(self._test_dir, 'generated_input')", "as output_parser import tests.test_config import tests.util class Test(tests.base_test.BaseTest): def setUp(self):", "self._sub_step = sub_step self._setup_sub_step() self._run_test() def _command_output_dir(self): return os.path.join(self._test_dir, 'command_output')", "_create_gtf(self, gtf_path): gtf = tests.gtf.GTF() gtf.path = gtf_path transcript_1 =", "self._check_results_post() elif self._sub_step == 'duplicate_input_bam': self._check_results_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam':", "in .rmats'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_miss_input_bam(self): self._check_no_error_results() def _check_results_miss_prep_bam(self):", "= sample_1_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path = sample_1_replicate_template.format(2) sample_1_bams =", "'start_box': [401, 499], 'end_box': [401, 499], 'counts': [1, 0] }]", "'b1.txt') self._sample_2_bams_path = os.path.join(self._generated_input_dir, 'b2.txt') sample_1_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_1_rep_{}.bam')", "elif self._sub_step == 'duplicate_input_bam': self._check_results_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam': self._check_results_dup_prep_bam()", "tests.base_test import tests.gtf import tests.output_parser as output_parser import tests.test_config import", "import tests.util class Test(tests.base_test.BaseTest): def setUp(self): super().setUp() self._test_base_dir = tests.test_config.TEST_BASE_DIR", "[[26, 100]], [[1, 100], [401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads =", "self._sub_steps = [ 'prep_1', 'inte_1_fail', 'inte_1_pass', 'prep_2', 'inte_2_fail', 'inte_2_pass', 'post',", "'duplicate_prep_bam': arguments.extend([ '--tmp', self._dup_prep_bam_tmp_dir, '--b1', self._dup_prep_bam_path, '--task', 'post', '--statoff', ])", "}]) self._cp_with_prefix('prep_1_', self._prep_1_tmp_dir, self._post_tmp_dir) def _check_results_prep_2(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name()", "self.assertEqual(len(inc_level_1_splits), 2) self.assertAlmostEqual(float(inc_level_1_splits[0]), 1) self.assertAlmostEqual(float(inc_level_1_splits[1]), 1) inc_level_2_splits = se_mats_jc_row['IncLevel2'].split(',') self.assertEqual(len(inc_level_2_splits),", "'--task', 'inte', '--statoff', ]) elif self._sub_step == 'prep_2': arguments.extend([ '--tmp',", "= [rep_2_read_1, rep_2_read_2] self._write_bams(sample_1_bams, sample_1_bams_path) return sample_1_bams def _create_sample_2_bams(self, sample_2_bams_path,", "]) elif self._sub_step == 'prep_2': arguments.extend([ '--tmp', self._prep_2_tmp_dir, '--b1', self._sample_2_bams_path,", "'1' transcript_1.strand = '+' transcript_1.gene_id = tests.util.gene_id_str(1) transcript_1.gene_name = tests.util.gene_name_str(1)", "= tests.util.gene_id_str(1) transcript_1.gene_name = tests.util.gene_name_str(1) transcript_1.transcript_id = tests.util.transcript_id_str(1) transcript_1.exons =", "self._sub_step == 'missing_prep_bam': self._check_results_miss_prep_bam() else: self.fail('unexpected sub_step: {}'.format(self._sub_step)) def _get_dot_rmats_paths(self,", "= bam.path expected_error = '{} found 2 times in .rmats'.format(dup_bam_path)", "'1' # chromosome rep_2_read_1.ref_seq_len = 1000 # chromosome length rep_2_read_1.template_name", "= tests.util.template_name_str([2, 2]) rep_2_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_2_read_1, rep_2_read_2,", "self._sub_step == 'inte_2_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'inte',", "err_lines = err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'bam files not in", "self._gtf_path, '--od', self._out_dir, '-t', self._read_type, '--readLength', str(self._read_length), ] if self._sub_step", "self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() dup_bam_path", "[499, 499]], 'count': 1 }] }]) else: self.assertEqual(multis, [{ quoted_test_gene_id:", "self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'post', ]) elif self._sub_step == 'duplicate_input_bam':", "out_lines = out_f_h.readlines() tests.util.assert_some_line_has(self, out_lines, 'Processing count files') from_gtf_se_path =", "'inte_2_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'inte',", "= tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [401, 500]],", "] self._sub_step = None def test(self): for sub_step in self._sub_steps:", "= err_f_h.readlines() for bam in self._sample_1_bams: dup_bam_path = bam.path expected_error", "tests.util.template_name_str([2, 1]) rep_1_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76,", "'--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'inte', ]) elif", "tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [401, 500]], [[401, 475]], self._read_length) self.assertFalse(error)", "] if self._sub_step == 'prep_1': arguments.extend([ '--tmp', self._prep_1_tmp_dir, '--b1', self._sample_1_bams_path,", "return sorted(dot_rmats_file_paths) def _check_results_prep_1(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name,", "[[0, 0, 2]]}]) exons = dot_rmats_contents['exons'] if dot_rmats_i == 0:", "'rt') as err_f_h: err_lines = err_f_h.readlines() for bam in self._sample_1_bams:", "self._write_bams(bams, self._dup_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_input_bam_tmp_dir) def _setup_dup_prep_bam(self): self._dup_prep_bam_path = os.path.join(self._generated_input_dir,", "dest_dir): source_paths = self._get_dot_rmats_paths(source_dir) command = [ sys.executable, tests.test_config.CP_WITH_PREFIX, prefix,", "'--b1', self._sample_1_bams_path, '--task', 'prep', ]) elif self._sub_step == 'inte_1_fail': arguments.extend([", "self.assertEqual(novel_juncs, [dict()]) exons = dot_rmats_contents['exons'] if dot_rmats_i == 0: self.assertEqual(exons,", "sample_1_bams def _create_sample_2_bams(self, sample_2_bams_path, sample_2_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path =", "glob.glob(os.path.join(tmp_dir, '*.rmats')) # filenames begin with a timestamp used for", "err_lines, expected_error) def _check_results_miss_input_bam(self): self._check_no_error_results() def _check_results_miss_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name", "def _get_dot_rmats_paths(self, tmp_dir): dot_rmats_file_paths = glob.glob(os.path.join(tmp_dir, '*.rmats')) # filenames begin", "'bam files not in input but associated with prep output')", "'inte_2_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'inte', '--statoff', ])", "bams = self._sample_1_bams self._write_bams(bams, self._dup_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) self._cp_with_prefix('prep_1_again', self._prep_1_tmp_dir,", "'inte_1_fail': self._check_results_inte_1_fail() elif self._sub_step == 'inte_1_pass': self._check_results_inte_1_pass() elif self._sub_step ==", "error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_2_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs", "with prep output') def _check_results_inte_2_pass(self): self._check_no_error_results() def _check_results_post(self): self._check_no_error_results() command_stdout_file_name", "'{} not found in .rmats'.format(miss_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) if __name__", "self._check_results_miss_prep_bam() else: self.fail('unexpected sub_step: {}'.format(self._sub_step)) def _get_dot_rmats_paths(self, tmp_dir): dot_rmats_file_paths =", "self.assertEqual(from_gtf_se_row['exonEnd'], '300') jc_raw_se_path = os.path.join(self._out_dir, 'JC.raw.input.SE.txt') jc_raw_se_header, jc_raw_se_rows, error =", "100], [201, 300]], [[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads = [rep_1_read_1,", "self.fail('unexpected sub_step: {}'.format(self._sub_step)) def _get_dot_rmats_paths(self, tmp_dir): dot_rmats_file_paths = glob.glob(os.path.join(tmp_dir, '*.rmats'))", "quoted_test_gene_id: [{ 'junction_pairs': [[201, 201], [300, 400], [499, 499]], 'count':", "'--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'post', ]) elif self._sub_step ==", "def _check_results_miss_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt')", "tests.bam.BAM() rep_1_bam.path = sample_2_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path = sample_2_replicate_template.format(2)", "1) inc_level_1_splits = se_mats_jc_row['IncLevel1'].split(',') self.assertEqual(len(inc_level_1_splits), 2) self.assertAlmostEqual(float(inc_level_1_splits[0]), 1) self.assertAlmostEqual(float(inc_level_1_splits[1]), 1)", "err_f_h.readlines() dup_bam_path = self._sample_1_bams[0].path expected_error = '{} given 2 times'.format(dup_bam_path)", "'post', 'duplicate_input_bam', 'duplicate_prep_bam', 'missing_input_bam', 'missing_prep_bam', ] self._sub_step = None def", "open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines,", "'--task', 'inte', ]) elif self._sub_step == 'post': arguments.extend([ '--tmp', self._post_tmp_dir,", "self.assertAlmostEqual(float(inc_level_2_splits[0]), 0) self.assertAlmostEqual(float(inc_level_2_splits[1]), 0) self.assertAlmostEqual(float(se_mats_jc_row['IncLevelDifference']), 1) def _check_results_dup_input_bam(self): self.assertNotEqual(self._rmats_return_code, 0)", "from_gtf_se_row = from_gtf_se_rows[0] self.assertEqual(from_gtf_se_row['GeneID'], tests.util.double_quote(tests.util.gene_id_str(1))) self.assertEqual(from_gtf_se_row['exonStart_0base'], '200') self.assertEqual(from_gtf_se_row['exonEnd'], '300') jc_raw_se_path", "= output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_2_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs =", "_setup_sub_step(self): if self._sub_step == 'duplicate_input_bam': self._setup_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam':", "output_parser.parse_mats_jc( se_mats_jc_path) self.assertFalse(error) self._check_se_mats_jc_header(se_mats_jc_header) self.assertEqual(len(se_mats_jc_rows), 1) se_mats_jc_row = se_mats_jc_rows[0] pvalue", "'inte_2_fail', 'inte_2_pass', 'post', 'duplicate_input_bam', 'duplicate_prep_bam', 'missing_input_bam', 'missing_prep_bam', ] self._sub_step =", "= sub_step self._setup_sub_step() self._run_test() def _command_output_dir(self): return os.path.join(self._test_dir, 'command_output') def", "self._sample_2_bams_path, '--task', 'inte', ]) elif self._sub_step == 'inte_1_pass': arguments.extend([ '--tmp',", "rep_1_bam = tests.bam.BAM() rep_1_bam.path = sample_1_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path", "{}'.format(self._sub_step)) def _get_dot_rmats_paths(self, tmp_dir): dot_rmats_file_paths = glob.glob(os.path.join(tmp_dir, '*.rmats')) # filenames", "no associated prep output') def _check_results_inte_1_pass(self): self._check_no_error_results() def _check_results_inte_2_fail(self): self.assertNotEqual(self._rmats_return_code,", "tests.util.assert_some_line_has(self, out_lines, 'Processing count files') from_gtf_se_path = os.path.join(self._out_dir, 'fromGTF.SE.txt') from_gtf_se_header,", "'test.gtf') self._gtf = self._create_gtf(self._gtf_path) self._sub_steps = [ 'prep_1', 'inte_1_fail', 'inte_1_pass',", "sample_1_replicate_template.format(2) sample_1_bams = [rep_1_bam, rep_2_bam] rep_1_read_1 = tests.bam.Read() rep_1_read_1.ref_seq_name =", "1]) rep_1_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100],", "elif self._sub_step == 'prep_2': self._check_results_prep_2() elif self._sub_step == 'inte_2_fail': self._check_results_inte_2_fail()", "== 'missing_input_bam': self._check_results_miss_input_bam() elif self._sub_step == 'missing_prep_bam': self._check_results_miss_prep_bam() else: self.fail('unexpected", "tests.util.assert_no_line_has(self, out_lines, 'Processing count files') test_gene_id = tests.util.gene_id_str(1) quoted_test_gene_id =", "self._test_base_dir = tests.test_config.TEST_BASE_DIR self._test_dir = os.path.join(self._test_base_dir, 'prep_post') self._generated_input_dir = os.path.join(self._test_dir,", "1 }] }]) else: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1,", "]) return arguments def _setup_sub_step(self): if self._sub_step == 'duplicate_input_bam': self._setup_dup_input_bam()", "self.assertEqual(dot_rmats_contents['bams'], [self._sample_2_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [{quoted_test_gene_id: [[0,", "'--task', 'inte', ]) elif self._sub_step == 'inte_1_pass': arguments.extend([ '--tmp', self._post_tmp_dir,", "}] }]) else: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[201, 201],", "open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() for bam in", "out_f_h.readlines() tests.util.assert_no_line_has(self, out_lines, 'Processing count files') test_gene_id = tests.util.gene_id_str(1) quoted_test_gene_id", "= None def test(self): for sub_step in self._sub_steps: self._sub_step =", "[[1, 1], [100, 200], [299, 299]], 'count': 1 }] }])", "tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [201, 300]], [[401,", "sample_1_bam_replicate_template) self._sample_2_bams = self._create_sample_2_bams( self._sample_2_bams_path, sample_2_bam_replicate_template) self._gtf_path = os.path.join(self._generated_input_dir, 'test.gtf')", "error = gtf.write() self.assertFalse(error) return gtf def _create_sample_1_bams(self, sample_1_bams_path, sample_1_replicate_template):", "multis = dot_rmats_contents['multis'] if dot_rmats_i == 0: self.assertEqual(multis, [{ quoted_test_gene_id:", "== 0: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [401, 499], 'end_box':", "self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 400], [499,", "1], [100, 400], [499, 499]], 'count': 1 }] }]) else:", "prefix, source_dir, dest_dir): source_paths = self._get_dot_rmats_paths(source_dir) command = [ sys.executable,", "[499, 499]], 'count': 1 }] }]) self._cp_with_prefix('prep_2_', self._prep_2_tmp_dir, self._post_tmp_dir) def", "arguments def _setup_sub_step(self): if self._sub_step == 'duplicate_input_bam': self._setup_dup_input_bam() elif self._sub_step", "self._gtf_path = os.path.join(self._generated_input_dir, 'test.gtf') self._gtf = self._create_gtf(self._gtf_path) self._sub_steps = [", "= tests.bam.BAM() rep_2_bam.path = sample_2_replicate_template.format(2) sample_2_bams = [rep_1_bam, rep_2_bam] rep_1_read_1", "sample_2_bams = [rep_1_bam, rep_2_bam] rep_1_read_1 = tests.bam.Read() rep_1_read_1.ref_seq_name = '1'", "dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_2_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs,", "found in .rmats'.format(miss_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) if __name__ == '__main__':", "def _setup_miss_input_bam(self): self._miss_input_bam_path = os.path.join(self._generated_input_dir, 'miss_input.txt') bams = [self._sample_1_bams[0]] self._write_bams(bams,", "= os.path.join(self._test_dir, 'out') self._prep_1_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_1') self._prep_2_tmp_dir = os.path.join(self._test_dir,", "self._sub_step == 'prep_1': arguments.extend([ '--tmp', self._prep_1_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'prep',", "'--tmp', self._prep_1_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'prep', ]) elif self._sub_step ==", "stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) def _check_results(self): if self._sub_step == 'prep_1': self._check_results_prep_1()", "test_gene_id = tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_1_tmp_dir) self.assertEqual(len(dot_rmats_paths),", "sys.executable, tests.test_config.CP_WITH_PREFIX, prefix, dest_dir ] command.extend(source_paths) subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)", "'prep_2', 'inte_2_fail', 'inte_2_pass', 'post', 'duplicate_input_bam', 'duplicate_prep_bam', 'missing_input_bam', 'missing_prep_bam', ] self._sub_step", "dot_rmats_contents['multis'] if dot_rmats_i == 0: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs':", "def setUp(self): super().setUp() self._test_base_dir = tests.test_config.TEST_BASE_DIR self._test_dir = os.path.join(self._test_base_dir, 'prep_post')", "for alphanumeric sort return sorted(dot_rmats_file_paths) def _check_results_prep_1(self): self._check_no_error_results() command_stdout_file_name =", "expected_error = '{} given 2 times'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def", "self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() for", "'count': 1 }] }]) else: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs':", "'*.rmats')) # filenames begin with a timestamp used for alphanumeric", "self._sample_1_bams_path, sample_1_bam_replicate_template) self._sample_2_bams = self._create_sample_2_bams( self._sample_2_bams_path, sample_2_bam_replicate_template) self._gtf_path = os.path.join(self._generated_input_dir,", "[{ 'junction_pairs': [[1, 1], [100, 200], [299, 299]], 'count': 1", "os.path.join(self._test_dir, 'command_output') def _rmats_arguments(self): arguments = [ '--gtf', self._gtf_path, '--od',", "self._prep_2_tmp_dir, self._post_tmp_dir, self._dup_input_bam_tmp_dir, self._dup_prep_bam_tmp_dir, self._miss_input_bam_tmp_dir, self._miss_prep_bam_tmp_dir, self._command_output_dir() ]) self._read_type =", "= out_f_h.readlines() tests.util.assert_no_line_has(self, out_lines, 'Processing count files') test_gene_id = tests.util.gene_id_str(1)", "# chromosome length rep_2_read_1.template_name = tests.util.template_name_str([1, 2]) rep_2_read_2 = tests.bam.Read()", "1) jc_raw_se_row = jc_raw_se_rows[0] self.assertEqual(jc_raw_se_row['ID'], from_gtf_se_row['ID']) self.assertEqual(jc_raw_se_row['IJC_SAMPLE_1'], '1,1') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_1'], '0,0')", "self._sample_1_bams[0].path expected_error = '{} given 2 times'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error)", "stderr=subprocess.PIPE, check=True) def _check_results(self): if self._sub_step == 'prep_1': self._check_results_prep_1() elif", "100]], [[201, 300], [401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1,", "error = output_parser.parse_mats_jc( se_mats_jc_path) self.assertFalse(error) self._check_se_mats_jc_header(se_mats_jc_header) self.assertEqual(len(se_mats_jc_rows), 1) se_mats_jc_row =", "self.assertEqual(len(inc_level_2_splits), 2) self.assertAlmostEqual(float(inc_level_2_splits[0]), 0) self.assertAlmostEqual(float(inc_level_2_splits[1]), 0) self.assertAlmostEqual(float(se_mats_jc_row['IncLevelDifference']), 1) def _check_results_dup_input_bam(self):", "self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 200], [299,", "'Processing count files') from_gtf_se_path = os.path.join(self._out_dir, 'fromGTF.SE.txt') from_gtf_se_header, from_gtf_se_rows, error", "from_gtf_se_row['ID']) self.assertEqual(jc_raw_se_row['IJC_SAMPLE_1'], '1,1') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_1'], '0,0') self.assertEqual(jc_raw_se_row['IJC_SAMPLE_2'], '0,0') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_2'], '1,1') se_mats_jc_path", "'paired' self._read_length = 50 self._sample_1_bams_path = os.path.join(self._generated_input_dir, 'b1.txt') self._sample_2_bams_path =", "self._generated_input_dir, self._out_dir, self._prep_1_tmp_dir, self._prep_2_tmp_dir, self._post_tmp_dir, self._dup_input_bam_tmp_dir, self._dup_prep_bam_tmp_dir, self._miss_input_bam_tmp_dir, self._miss_prep_bam_tmp_dir, self._command_output_dir()", "= err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'input bam files with no", "self.assertEqual(len(from_gtf_se_rows), 1) from_gtf_se_row = from_gtf_se_rows[0] self.assertEqual(from_gtf_se_row['GeneID'], tests.util.double_quote(tests.util.gene_id_str(1))) self.assertEqual(from_gtf_se_row['exonStart_0base'], '200') self.assertEqual(from_gtf_se_row['exonEnd'],", "self._sub_step == 'inte_1_fail': self._check_results_inte_1_fail() elif self._sub_step == 'inte_1_pass': self._check_results_inte_1_pass() elif", "'inte_2_pass': self._check_results_inte_2_pass() elif self._sub_step == 'post': self._check_results_post() elif self._sub_step ==", "= os.path.join(self._generated_input_dir, 'test.gtf') self._gtf = self._create_gtf(self._gtf_path) self._sub_steps = [ 'prep_1',", "# chromosome rep_1_read_1.ref_seq_len = 1000 # chromosome length rep_1_read_1.template_name =", "jc_raw_se_rows[0] self.assertEqual(jc_raw_se_row['ID'], from_gtf_se_row['ID']) self.assertEqual(jc_raw_se_row['IJC_SAMPLE_1'], '1,1') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_1'], '0,0') self.assertEqual(jc_raw_se_row['IJC_SAMPLE_2'], '0,0') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_2'],", "as err_f_h: err_lines = err_f_h.readlines() for bam in self._sample_2_bams: miss_bam_path", "'inte_1_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'inte', '--statoff', ])", "tests.util.assert_within_bounds(self, pvalue, 0, 1) fdr = float(se_mats_jc_row['FDR']) tests.util.assert_within_bounds(self, fdr, 0,", "err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'input bam files with no associated", "'post', '--statoff', ]) elif self._sub_step == 'missing_prep_bam': arguments.extend([ '--tmp', self._miss_prep_bam_tmp_dir,", "tests.gtf.GTF() gtf.path = gtf_path transcript_1 = tests.gtf.Transcript() transcript_1.chromosome = '1'", "transcript_1 = tests.gtf.Transcript() transcript_1.chromosome = '1' transcript_1.strand = '+' transcript_1.gene_id", "[rep_1_bam, rep_2_bam] rep_1_read_1 = tests.bam.Read() rep_1_read_1.ref_seq_name = '1' # chromosome", "sort return sorted(dot_rmats_file_paths) def _check_results_prep_1(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with", "gtf_path): gtf = tests.gtf.GTF() gtf.path = gtf_path transcript_1 = tests.gtf.Transcript()", "err_f_h.readlines() for bam in self._sample_1_bams: dup_bam_path = bam.path expected_error =", "'inte_1_pass': self._check_results_inte_1_pass() elif self._sub_step == 'prep_2': self._check_results_prep_2() elif self._sub_step ==", "sample_2_bam_replicate_template) self._gtf_path = os.path.join(self._generated_input_dir, 'test.gtf') self._gtf = self._create_gtf(self._gtf_path) self._sub_steps =", "= output_parser.parse_jc_raw( jc_raw_se_path) self.assertFalse(error) self.assertEqual(len(jc_raw_se_rows), 1) jc_raw_se_row = jc_raw_se_rows[0] self.assertEqual(jc_raw_se_row['ID'],", "]) elif self._sub_step == 'missing_input_bam': arguments.extend([ '--tmp', self._miss_input_bam_tmp_dir, '--b1', self._miss_input_bam_path,", "sample_2_bams_path, sample_2_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path = sample_2_replicate_template.format(1) rep_2_bam =", "_check_results_prep_1(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as out_f_h:", "subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) def _check_results(self): if self._sub_step == 'prep_1':", "err_f_h.readlines() for bam in self._sample_2_bams: miss_bam_path = bam.path expected_error =", "filenames begin with a timestamp used for alphanumeric sort return", "count files') test_gene_id = tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths =", "self._miss_prep_bam_tmp_dir, self._command_output_dir() ]) self._read_type = 'paired' self._read_length = 50 self._sample_1_bams_path", "'--statoff', ]) return arguments def _setup_sub_step(self): if self._sub_step == 'duplicate_input_bam':", "with open(command_stdout_file_name, 'rt') as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_no_line_has(self, out_lines,", "def _setup_dup_input_bam(self): self._dup_input_bam_path = os.path.join(self._generated_input_dir, 'dup_input.txt') bams = self._sample_1_bams +", "= os.path.join(self._test_dir, 'tmp_post') self._dup_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_input_bam') self._dup_prep_bam_tmp_dir = os.path.join(self._test_dir,", "arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'post', ])", "rep_1_read_2] rep_2_read_1 = tests.bam.Read() rep_2_read_1.ref_seq_name = '1' # chromosome rep_2_read_1.ref_seq_len", "arguments = [ '--gtf', self._gtf_path, '--od', self._out_dir, '-t', self._read_type, '--readLength',", "self._create_gtf(self._gtf_path) self._sub_steps = [ 'prep_1', 'inte_1_fail', 'inte_1_pass', 'prep_2', 'inte_2_fail', 'inte_2_pass',", "with a timestamp used for alphanumeric sort return sorted(dot_rmats_file_paths) def", "associated with prep output') def _check_results_inte_2_pass(self): self._check_no_error_results() def _check_results_post(self): self._check_no_error_results()", "arguments.extend([ '--tmp', self._dup_prep_bam_tmp_dir, '--b1', self._dup_prep_bam_path, '--task', 'post', '--statoff', ]) elif", "= tests.util.gene_name_str(1) transcript_1.transcript_id = tests.util.transcript_id_str(1) transcript_1.exons = [(1, 100), (201,", "sample_1_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path = sample_1_replicate_template.format(2) sample_1_bams = [rep_1_bam,", "dot_rmats_i == 0: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [401, 499],", "'count': 1 }] }]) self._cp_with_prefix('prep_1_', self._prep_1_tmp_dir, self._post_tmp_dir) def _check_results_prep_2(self): self._check_no_error_results()", "chromosome length rep_1_read_1.template_name = tests.util.template_name_str([2, 1]) rep_1_read_2 = tests.bam.Read() error", "= dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [{quoted_test_gene_id: [[0, 0, 2]]}]) exons = dot_rmats_contents['exons']", "'missing_input_bam': self._setup_miss_input_bam() elif self._sub_step == 'missing_prep_bam': self._setup_miss_prep_bam() def _setup_dup_input_bam(self): self._dup_input_bam_path", "= self._sample_1_bams + self._sample_2_bams self._write_bams(bams, self._miss_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_prep_bam_tmp_dir) def", "'rt') as err_f_h: err_lines = err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'bam", "[rep_1_read_1, rep_1_read_2] rep_2_read_1 = tests.bam.Read() rep_2_read_1.ref_seq_name = '1' # chromosome", "self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_1_bams, sample_1_bams_path) return sample_1_bams", "alphanumeric sort return sorted(dot_rmats_file_paths) def _check_results_prep_1(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name()", "dot_rmats_contents, error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_2_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length)", "'--b1', self._miss_prep_bam_path, '--task', 'post', '--statoff', ]) return arguments def _setup_sub_step(self):", "timestamp used for alphanumeric sort return sorted(dot_rmats_file_paths) def _check_results_prep_1(self): self._check_no_error_results()", "= dot_rmats_contents['exons'] if dot_rmats_i == 0: self.assertEqual(exons, [{ quoted_test_gene_id: [{", "== 'inte_1_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task',", "= tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_2_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2)", "bam in self._sample_1_bams: dup_bam_path = bam.path expected_error = '{} found", "= tests.bam.Read() rep_2_read_1.ref_seq_name = '1' # chromosome rep_2_read_1.ref_seq_len = 1000", "'duplicate_prep_bam': self._check_results_dup_prep_bam() elif self._sub_step == 'missing_input_bam': self._check_results_miss_input_bam() elif self._sub_step ==", "99], 'counts': [1, 0] }] }]) multis = dot_rmats_contents['multis'] if", "def _check_results_inte_1_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt')", "self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_1_bams, sample_1_bams_path) return sample_1_bams def", "for bam in self._sample_2_bams: miss_bam_path = bam.path expected_error = '{}", "as err_f_h: err_lines = err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'bam files", "= 'paired' self._read_length = 50 self._sample_1_bams_path = os.path.join(self._generated_input_dir, 'b1.txt') self._sample_2_bams_path", "def _create_sample_1_bams(self, sample_1_bams_path, sample_1_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path = sample_1_replicate_template.format(1)", "os.path.join(self._test_dir, 'tmp_dup_prep_bam') self._miss_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_input_bam') self._miss_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_prep_bam')", "self._sample_1_bams_path, '--task', 'prep', ]) elif self._sub_step == 'inte_1_fail': arguments.extend([ '--tmp',", "'prep', ]) elif self._sub_step == 'inte_2_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1',", "[ sys.executable, tests.test_config.CP_WITH_PREFIX, prefix, dest_dir ] command.extend(source_paths) subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,", "tmp_dir): dot_rmats_file_paths = glob.glob(os.path.join(tmp_dir, '*.rmats')) # filenames begin with a", "self._sub_step == 'missing_input_bam': self._setup_miss_input_bam() elif self._sub_step == 'missing_prep_bam': self._setup_miss_prep_bam() def", "= '{} not found in .rmats'.format(miss_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) if", "== 'missing_prep_bam': self._setup_miss_prep_bam() def _setup_dup_input_bam(self): self._dup_input_bam_path = os.path.join(self._generated_input_dir, 'dup_input.txt') bams", "== 'inte_2_fail': self._check_results_inte_2_fail() elif self._sub_step == 'inte_2_pass': self._check_results_inte_2_pass() elif self._sub_step", "= tests.gtf.GTF() gtf.path = gtf_path transcript_1 = tests.gtf.Transcript() transcript_1.chromosome =", "err_lines = err_f_h.readlines() dup_bam_path = self._sample_1_bams[0].path expected_error = '{} given", "_create_sample_2_bams(self, sample_2_bams_path, sample_2_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path = sample_2_replicate_template.format(1) rep_2_bam", "self._dup_input_bam_tmp_dir, self._dup_prep_bam_tmp_dir, self._miss_input_bam_tmp_dir, self._miss_prep_bam_tmp_dir, self._command_output_dir() ]) self._read_type = 'paired' self._read_length", "0) self.assertAlmostEqual(float(inc_level_2_splits[1]), 0) self.assertAlmostEqual(float(se_mats_jc_row['IncLevelDifference']), 1) def _check_results_dup_input_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name", "_check_results_dup_input_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as", "(201, 300), (401, 500)] gtf.transcripts = [transcript_1] error = gtf.write()", "rep_1_read_1.template_name = tests.util.template_name_str([2, 1]) rep_1_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1,", "= os.path.join( self._generated_input_dir, 'sample_1_rep_{}.bam') sample_2_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_2_rep_{}.bam') self._sample_1_bams", "else: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [1, 99], 'end_box': [1,", "= 1000 # chromosome length rep_2_read_1.template_name = tests.util.template_name_str([1, 2]) rep_2_read_2", "os.path.join(self._test_dir, 'tmp_dup_input_bam') self._dup_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_prep_bam') self._miss_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_input_bam')", "self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_2_bams, sample_2_bams_path) return sample_2_bams", "found 2 times in .rmats'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_miss_input_bam(self):", "self._prep_2_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_2') self._post_tmp_dir = os.path.join(self._test_dir, 'tmp_post') self._dup_input_bam_tmp_dir =", "output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_1_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs']", "files') test_gene_id = tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_1_tmp_dir)", "2 times in .rmats'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_miss_input_bam(self): self._check_no_error_results()", "self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [1, 99], 'end_box': [1, 99],", "'--od', self._out_dir, '-t', self._read_type, '--readLength', str(self._read_length), ] if self._sub_step ==", "command.extend(source_paths) subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) def _check_results(self): if self._sub_step ==", "= gtf_path transcript_1 = tests.gtf.Transcript() transcript_1.chromosome = '1' transcript_1.strand =", "== 0: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100,", "= self._get_dot_rmats_paths(self._prep_2_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in range(2): dot_rmats_contents, error", "self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_some_line_has(self,", "= self._create_gtf(self._gtf_path) self._sub_steps = [ 'prep_1', 'inte_1_fail', 'inte_1_pass', 'prep_2', 'inte_2_fail',", "'--statoff', ]) elif self._sub_step == 'missing_prep_bam': arguments.extend([ '--tmp', self._miss_prep_bam_tmp_dir, '--b1',", "in self._sample_1_bams: dup_bam_path = bam.path expected_error = '{} found 2", "self._sub_step == 'missing_prep_bam': self._setup_miss_prep_bam() def _setup_dup_input_bam(self): self._dup_input_bam_path = os.path.join(self._generated_input_dir, 'dup_input.txt')", "[1, 99], 'end_box': [1, 99], 'counts': [1, 0] }] }])", "class Test(tests.base_test.BaseTest): def setUp(self): super().setUp() self._test_base_dir = tests.test_config.TEST_BASE_DIR self._test_dir =", "== 'inte_1_fail': self._check_results_inte_1_fail() elif self._sub_step == 'inte_1_pass': self._check_results_inte_1_pass() elif self._sub_step", "self._dup_prep_bam_path = os.path.join(self._generated_input_dir, 'dup_prep.txt') bams = self._sample_1_bams self._write_bams(bams, self._dup_prep_bam_path) self._cp_with_prefix('prep_1',", "self._dup_prep_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step == 'missing_input_bam': arguments.extend([", "os.path import subprocess import sys import unittest import tests.bam import", "self._miss_prep_bam_tmp_dir) def _create_gtf(self, gtf_path): gtf = tests.gtf.GTF() gtf.path = gtf_path", "def _create_sample_2_bams(self, sample_2_bams_path, sample_2_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path = sample_2_replicate_template.format(1)", "dot_rmats_paths = self._get_dot_rmats_paths(self._prep_1_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in range(2): dot_rmats_contents,", "self._sample_1_bams self._write_bams(bams, self._dup_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) self._cp_with_prefix('prep_1_again', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) def", "self._run_test() def _command_output_dir(self): return os.path.join(self._test_dir, 'command_output') def _rmats_arguments(self): arguments =", "'--statoff', ]) elif self._sub_step == 'inte_2_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1',", "float(se_mats_jc_row['PValue']) tests.util.assert_within_bounds(self, pvalue, 0, 1) fdr = float(se_mats_jc_row['FDR']) tests.util.assert_within_bounds(self, fdr,", "with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() dup_bam_path =", "'--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'inte', '--statoff', ]) elif self._sub_step", "dup_bam_path = self._sample_1_bams[0].path expected_error = '{} given 2 times'.format(dup_bam_path) tests.util.assert_some_line_has(self,", "output_parser.parse_from_gtf( from_gtf_se_path) self.assertFalse(error) self.assertEqual(len(from_gtf_se_rows), 1) from_gtf_se_row = from_gtf_se_rows[0] self.assertEqual(from_gtf_se_row['GeneID'], tests.util.double_quote(tests.util.gene_id_str(1)))", "error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [201, 300]], [[401, 475]],", "self._sub_step == 'post': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path,", "dot_rmats_paths = self._get_dot_rmats_paths(self._prep_2_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in range(2): dot_rmats_contents,", "self._read_type, '--readLength', str(self._read_length), ] if self._sub_step == 'prep_1': arguments.extend([ '--tmp',", "self.assertEqual(len(jc_raw_se_rows), 1) jc_raw_se_row = jc_raw_se_rows[0] self.assertEqual(jc_raw_se_row['ID'], from_gtf_se_row['ID']) self.assertEqual(jc_raw_se_row['IJC_SAMPLE_1'], '1,1') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_1'],", "chromosome length rep_1_read_1.template_name = tests.util.template_name_str([1, 1]) rep_1_read_2 = tests.bam.Read() error", "self._miss_input_bam_tmp_dir, '--b1', self._miss_input_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step ==", "self._read_length) self.assertFalse(error) rep_1_bam.reads = [rep_1_read_1, rep_1_read_2] rep_2_read_1 = tests.bam.Read() rep_2_read_1.ref_seq_name", "quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_2_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i", "[self._sample_1_bams[0]] self._write_bams(bams, self._miss_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_input_bam_tmp_dir) def _setup_miss_prep_bam(self): self._miss_prep_bam_path =", "sample_2_bams_path) return sample_2_bams def _cp_with_prefix(self, prefix, source_dir, dest_dir): source_paths =", "bams = self._sample_1_bams + self._sample_2_bams self._write_bams(bams, self._miss_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_prep_bam_tmp_dir)", "tests.util.assert_some_line_has( self, err_lines, 'input bam files with no associated prep", "range(2): dot_rmats_contents, error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_1_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'],", "]) elif self._sub_step == 'inte_2_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path,", "'rt') as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_no_line_has(self, out_lines, 'Processing count", "if self._sub_step == 'prep_1': arguments.extend([ '--tmp', self._prep_1_tmp_dir, '--b1', self._sample_1_bams_path, '--task',", "_setup_dup_input_bam(self): self._dup_input_bam_path = os.path.join(self._generated_input_dir, 'dup_input.txt') bams = self._sample_1_bams + [self._sample_1_bams[0]]", "2) self.assertAlmostEqual(float(inc_level_1_splits[0]), 1) self.assertAlmostEqual(float(inc_level_1_splits[1]), 1) inc_level_2_splits = se_mats_jc_row['IncLevel2'].split(',') self.assertEqual(len(inc_level_2_splits), 2)", "rep_2_read_1.ref_seq_len = 1000 # chromosome length rep_2_read_1.template_name = tests.util.template_name_str([1, 2])", "elif self._sub_step == 'missing_prep_bam': self._check_results_miss_prep_bam() else: self.fail('unexpected sub_step: {}'.format(self._sub_step)) def", "out_f_h: out_lines = out_f_h.readlines() tests.util.assert_no_line_has(self, out_lines, 'Processing count files') test_gene_id", "'duplicate_input_bam': self._check_results_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam': self._check_results_dup_prep_bam() elif self._sub_step ==", "tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_1_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in range(2):", "500)] gtf.transcripts = [transcript_1] error = gtf.write() self.assertFalse(error) return gtf", "0, 1) fdr = float(se_mats_jc_row['FDR']) tests.util.assert_within_bounds(self, fdr, 0, 1) inc_level_1_splits", "== 'duplicate_prep_bam': self._check_results_dup_prep_bam() elif self._sub_step == 'missing_input_bam': self._check_results_miss_input_bam() elif self._sub_step", "'prep_1': arguments.extend([ '--tmp', self._prep_1_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'prep', ]) elif", "== 'inte_2_pass': self._check_results_inte_2_pass() elif self._sub_step == 'post': self._check_results_post() elif self._sub_step", "self._prep_1_tmp_dir, self._post_tmp_dir) def _check_results_prep_2(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name,", "= self._sample_1_bams self._write_bams(bams, self._dup_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) self._cp_with_prefix('prep_1_again', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir)", "'--tmp', self._dup_prep_bam_tmp_dir, '--b1', self._dup_prep_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step", "times in .rmats'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_miss_input_bam(self): self._check_no_error_results() def", "self._test_dir = os.path.join(self._test_base_dir, 'prep_post') self._generated_input_dir = os.path.join(self._test_dir, 'generated_input') self._out_dir =", "1 }] }]) self._cp_with_prefix('prep_2_', self._prep_2_tmp_dir, self._post_tmp_dir) def _check_results_inte_1_fail(self): self.assertNotEqual(self._rmats_return_code, 0)", "files not in input but associated with prep output') def", "0, 1) inc_level_1_splits = se_mats_jc_row['IncLevel1'].split(',') self.assertEqual(len(inc_level_1_splits), 2) self.assertAlmostEqual(float(inc_level_1_splits[0]), 1) self.assertAlmostEqual(float(inc_level_1_splits[1]),", "self._sub_step == 'duplicate_input_bam': arguments.extend([ '--tmp', self._dup_input_bam_tmp_dir, '--b1', self._dup_input_bam_path, '--task', 'post',", "if dot_rmats_i == 0: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [401,", "arguments.extend([ '--tmp', self._miss_prep_bam_tmp_dir, '--b1', self._miss_prep_bam_path, '--task', 'post', '--statoff', ]) return", "setUp(self): super().setUp() self._test_base_dir = tests.test_config.TEST_BASE_DIR self._test_dir = os.path.join(self._test_base_dir, 'prep_post') self._generated_input_dir", "self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'inte', ]) elif self._sub_step == 'inte_1_pass':", "self._dup_input_bam_tmp_dir, '--b1', self._dup_input_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step ==", "'--b1', self._dup_input_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step == 'duplicate_prep_bam':", "= os.path.join(self._generated_input_dir, 'miss_prep.txt') bams = self._sample_1_bams + self._sample_2_bams self._write_bams(bams, self._miss_prep_bam_path)", "= tests.bam.BAM() rep_2_bam.path = sample_1_replicate_template.format(2) sample_1_bams = [rep_1_bam, rep_2_bam] rep_1_read_1", "def _cp_with_prefix(self, prefix, source_dir, dest_dir): source_paths = self._get_dot_rmats_paths(source_dir) command =", "_check_results(self): if self._sub_step == 'prep_1': self._check_results_prep_1() elif self._sub_step == 'inte_1_fail':", "self._sub_step == 'prep_1': self._check_results_prep_1() elif self._sub_step == 'inte_1_fail': self._check_results_inte_1_fail() elif", "= output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_1_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs =", "self._post_tmp_dir) def _check_results_inte_1_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name,", "= os.path.join(self._test_dir, 'tmp_prep_1') self._prep_2_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_2') self._post_tmp_dir = os.path.join(self._test_dir,", "'0,0') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_2'], '1,1') se_mats_jc_path = os.path.join(self._out_dir, 'SE.MATS.JC.txt') se_mats_jc_header, se_mats_jc_rows, error", "pvalue, 0, 1) fdr = float(se_mats_jc_row['FDR']) tests.util.assert_within_bounds(self, fdr, 0, 1)", "self._read_length = 50 self._sample_1_bams_path = os.path.join(self._generated_input_dir, 'b1.txt') self._sample_2_bams_path = os.path.join(self._generated_input_dir,", "self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'inte', ]) elif self._sub_step", "_check_results_miss_input_bam(self): self._check_no_error_results() def _check_results_miss_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with", "bam in self._sample_2_bams: miss_bam_path = bam.path expected_error = '{} not", "= self._get_dot_rmats_paths(source_dir) command = [ sys.executable, tests.test_config.CP_WITH_PREFIX, prefix, dest_dir ]", "self.assertEqual(from_gtf_se_row['GeneID'], tests.util.double_quote(tests.util.gene_id_str(1))) self.assertEqual(from_gtf_se_row['exonStart_0base'], '200') self.assertEqual(from_gtf_se_row['exonEnd'], '300') jc_raw_se_path = os.path.join(self._out_dir, 'JC.raw.input.SE.txt')", "elif self._sub_step == 'duplicate_input_bam': arguments.extend([ '--tmp', self._dup_input_bam_tmp_dir, '--b1', self._dup_input_bam_path, '--task',", "'--task', 'inte', '--statoff', ]) elif self._sub_step == 'inte_2_pass': arguments.extend([ '--tmp',", "1000 # chromosome length rep_2_read_1.template_name = tests.util.template_name_str([2, 2]) rep_2_read_2 =", "= tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_1_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in", "self.assertFalse(error) self._check_se_mats_jc_header(se_mats_jc_header) self.assertEqual(len(se_mats_jc_rows), 1) se_mats_jc_row = se_mats_jc_rows[0] pvalue = float(se_mats_jc_row['PValue'])", "tests.test_config.CP_WITH_PREFIX, prefix, dest_dir ] command.extend(source_paths) subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) def", "'--tmp', self._prep_2_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'prep', ]) elif self._sub_step ==", "= se_mats_jc_row['IncLevel1'].split(',') self.assertEqual(len(inc_level_1_splits), 2) self.assertAlmostEqual(float(inc_level_1_splits[0]), 1) self.assertAlmostEqual(float(inc_level_1_splits[1]), 1) inc_level_2_splits =", "tests.gtf.Transcript() transcript_1.chromosome = '1' transcript_1.strand = '+' transcript_1.gene_id = tests.util.gene_id_str(1)", "fdr, 0, 1) inc_level_1_splits = se_mats_jc_row['IncLevel1'].split(',') self.assertEqual(len(inc_level_1_splits), 2) self.assertAlmostEqual(float(inc_level_1_splits[0]), 1)", "rep_1_bam.reads = [rep_1_read_1, rep_1_read_2] rep_2_read_1 = tests.bam.Read() rep_2_read_1.ref_seq_name = '1'", "== 'missing_input_bam': self._setup_miss_input_bam() elif self._sub_step == 'missing_prep_bam': self._setup_miss_prep_bam() def _setup_dup_input_bam(self):", "out_lines = out_f_h.readlines() tests.util.assert_no_line_has(self, out_lines, 'Processing count files') test_gene_id =", "tests.bam.Read() rep_2_read_1.ref_seq_name = '1' # chromosome rep_2_read_1.ref_seq_len = 1000 #", "os.path.join(self._test_base_dir, 'prep_post') self._generated_input_dir = os.path.join(self._test_dir, 'generated_input') self._out_dir = os.path.join(self._test_dir, 'out')", "elif self._sub_step == 'post': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2',", "300], [401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_1_bams,", "os.path.join(self._generated_input_dir, 'dup_prep.txt') bams = self._sample_1_bams self._write_bams(bams, self._dup_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir)", "self._sub_step == 'prep_2': self._check_results_prep_2() elif self._sub_step == 'inte_2_fail': self._check_results_inte_2_fail() elif", "= out_f_h.readlines() tests.util.assert_some_line_has(self, out_lines, 'Processing count files') from_gtf_se_path = os.path.join(self._out_dir,", "arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'inte', ])", "1 }] }]) self._cp_with_prefix('prep_1_', self._prep_1_tmp_dir, self._post_tmp_dir) def _check_results_prep_2(self): self._check_no_error_results() command_stdout_file_name", "'rt') as err_f_h: err_lines = err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'input", "jc_raw_se_row = jc_raw_se_rows[0] self.assertEqual(jc_raw_se_row['ID'], from_gtf_se_row['ID']) self.assertEqual(jc_raw_se_row['IJC_SAMPLE_1'], '1,1') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_1'], '0,0') self.assertEqual(jc_raw_se_row['IJC_SAMPLE_2'],", "= output_parser.parse_from_gtf( from_gtf_se_path) self.assertFalse(error) self.assertEqual(len(from_gtf_se_rows), 1) from_gtf_se_row = from_gtf_se_rows[0] self.assertEqual(from_gtf_se_row['GeneID'],", "self._check_results_dup_prep_bam() elif self._sub_step == 'missing_input_bam': self._check_results_miss_input_bam() elif self._sub_step == 'missing_prep_bam':", "= tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_2_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in", "with open(command_stdout_file_name, 'rt') as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_some_line_has(self, out_lines,", "if self._sub_step == 'prep_1': self._check_results_prep_1() elif self._sub_step == 'inte_1_fail': self._check_results_inte_1_fail()", "'post', '--statoff', ]) return arguments def _setup_sub_step(self): if self._sub_step ==", "self._sub_step == 'duplicate_input_bam': self._check_results_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam': self._check_results_dup_prep_bam() elif", "= err_f_h.readlines() for bam in self._sample_2_bams: miss_bam_path = bam.path expected_error", "0) self.assertAlmostEqual(float(se_mats_jc_row['IncLevelDifference']), 1) def _check_results_dup_input_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name()", "self._prep_1_tmp_dir, self._miss_input_bam_tmp_dir) def _setup_miss_prep_bam(self): self._miss_prep_bam_path = os.path.join(self._generated_input_dir, 'miss_prep.txt') bams =", "= '+' transcript_1.gene_id = tests.util.gene_id_str(1) transcript_1.gene_name = tests.util.gene_name_str(1) transcript_1.transcript_id =", "self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_prep_bam_tmp_dir) def _create_gtf(self, gtf_path): gtf = tests.gtf.GTF() gtf.path", "rep_2_read_2, [[26, 100]], [[1, 100], [401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads", "0] }] }]) multis = dot_rmats_contents['multis'] if dot_rmats_i == 0:", "output_parser import tests.test_config import tests.util class Test(tests.base_test.BaseTest): def setUp(self): super().setUp()", "self._sample_2_bams_path, sample_2_bam_replicate_template) self._gtf_path = os.path.join(self._generated_input_dir, 'test.gtf') self._gtf = self._create_gtf(self._gtf_path) self._sub_steps", "'counts': [1, 0] }] }]) else: self.assertEqual(exons, [{ quoted_test_gene_id: [{", "]) elif self._sub_step == 'post': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path,", "as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_no_line_has(self, out_lines, 'Processing count files')", "'tmp_miss_input_bam') self._miss_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_prep_bam') tests.util.recreate_dirs([ self._generated_input_dir, self._out_dir, self._prep_1_tmp_dir, self._prep_2_tmp_dir,", "self._out_dir, self._prep_1_tmp_dir, self._prep_2_tmp_dir, self._post_tmp_dir, self._dup_input_bam_tmp_dir, self._dup_prep_bam_tmp_dir, self._miss_input_bam_tmp_dir, self._miss_prep_bam_tmp_dir, self._command_output_dir() ])", "elif self._sub_step == 'inte_1_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--task',", "[rep_2_read_1, rep_2_read_2] self._write_bams(sample_1_bams, sample_1_bams_path) return sample_1_bams def _create_sample_2_bams(self, sample_2_bams_path, sample_2_replicate_template):", "tests.util.template_name_str([1, 2]) rep_2_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals( rep_2_read_1, rep_2_read_2,", "1], [100, 400], [499, 499]], 'count': 1 }] }]) self._cp_with_prefix('prep_2_',", "_check_results_inte_1_pass(self): self._check_no_error_results() def _check_results_inte_2_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with", "glob import os.path import subprocess import sys import unittest import", "miss_bam_path = bam.path expected_error = '{} not found in .rmats'.format(miss_bam_path)", "= self._sample_1_bams + [self._sample_1_bams[0]] self._write_bams(bams, self._dup_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_input_bam_tmp_dir) def", "chromosome rep_2_read_1.ref_seq_len = 1000 # chromosome length rep_2_read_1.template_name = tests.util.template_name_str([1,", "tests.util.template_name_str([1, 1]) rep_1_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76,", "transcript_1.gene_name = tests.util.gene_name_str(1) transcript_1.transcript_id = tests.util.transcript_id_str(1) transcript_1.exons = [(1, 100),", "[300, 400], [499, 499]], 'count': 1 }] }]) self._cp_with_prefix('prep_1_', self._prep_1_tmp_dir,", "jc_raw_se_path) self.assertFalse(error) self.assertEqual(len(jc_raw_se_rows), 1) jc_raw_se_row = jc_raw_se_rows[0] self.assertEqual(jc_raw_se_row['ID'], from_gtf_se_row['ID']) self.assertEqual(jc_raw_se_row['IJC_SAMPLE_1'],", "== 'duplicate_prep_bam': arguments.extend([ '--tmp', self._dup_prep_bam_tmp_dir, '--b1', self._dup_prep_bam_path, '--task', 'post', '--statoff',", "500]], [[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads = [rep_1_read_1, rep_1_read_2] rep_2_read_1", "arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'inte', '--statoff', ]) elif", "self._dup_input_bam_tmp_dir) def _setup_dup_prep_bam(self): self._dup_prep_bam_path = os.path.join(self._generated_input_dir, 'dup_prep.txt') bams = self._sample_1_bams", "self._miss_input_bam_tmp_dir, self._miss_prep_bam_tmp_dir, self._command_output_dir() ]) self._read_type = 'paired' self._read_length = 50", "]) elif self._sub_step == 'inte_1_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path,", "dup_bam_path = bam.path expected_error = '{} found 2 times in", "for dot_rmats_i in range(2): dot_rmats_contents, error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error)", "== 'missing_input_bam': arguments.extend([ '--tmp', self._miss_input_bam_tmp_dir, '--b1', self._miss_input_bam_path, '--task', 'post', '--statoff',", "'prep_1', 'inte_1_fail', 'inte_1_pass', 'prep_2', 'inte_2_fail', 'inte_2_pass', 'post', 'duplicate_input_bam', 'duplicate_prep_bam', 'missing_input_bam',", "self._write_bams(bams, self._miss_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_prep_bam_tmp_dir) def _create_gtf(self, gtf_path): gtf =", "self.assertEqual(jc_raw_se_row['ID'], from_gtf_se_row['ID']) self.assertEqual(jc_raw_se_row['IJC_SAMPLE_1'], '1,1') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_1'], '0,0') self.assertEqual(jc_raw_se_row['IJC_SAMPLE_2'], '0,0') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_2'], '1,1')", "sample_2_bams def _cp_with_prefix(self, prefix, source_dir, dest_dir): source_paths = self._get_dot_rmats_paths(source_dir) command", "self._cp_with_prefix('prep_1_again', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) def _setup_miss_input_bam(self): self._miss_input_bam_path = os.path.join(self._generated_input_dir, 'miss_input.txt') bams", "rep_2_bam = tests.bam.BAM() rep_2_bam.path = sample_2_replicate_template.format(2) sample_2_bams = [rep_1_bam, rep_2_bam]", "self.assertFalse(error) self.assertEqual(len(jc_raw_se_rows), 1) jc_raw_se_row = jc_raw_se_rows[0] self.assertEqual(jc_raw_se_row['ID'], from_gtf_se_row['ID']) self.assertEqual(jc_raw_se_row['IJC_SAMPLE_1'], '1,1')", "rep_2_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals( rep_2_read_1, rep_2_read_2, [[26, 100]],", "'post', ]) elif self._sub_step == 'duplicate_input_bam': arguments.extend([ '--tmp', self._dup_input_bam_tmp_dir, '--b1',", "from_gtf_se_header, from_gtf_se_rows, error = output_parser.parse_from_gtf( from_gtf_se_path) self.assertFalse(error) self.assertEqual(len(from_gtf_se_rows), 1) from_gtf_se_row", "'tmp_miss_prep_bam') tests.util.recreate_dirs([ self._generated_input_dir, self._out_dir, self._prep_1_tmp_dir, self._prep_2_tmp_dir, self._post_tmp_dir, self._dup_input_bam_tmp_dir, self._dup_prep_bam_tmp_dir, self._miss_input_bam_tmp_dir,", "'miss_prep.txt') bams = self._sample_1_bams + self._sample_2_bams self._write_bams(bams, self._miss_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir,", "self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_input_bam_tmp_dir) def _setup_miss_prep_bam(self): self._miss_prep_bam_path = os.path.join(self._generated_input_dir, 'miss_prep.txt') bams", "sample_1_bams = [rep_1_bam, rep_2_bam] rep_1_read_1 = tests.bam.Read() rep_1_read_1.ref_seq_name = '1'", "exons = dot_rmats_contents['exons'] if dot_rmats_i == 0: self.assertEqual(exons, [{ quoted_test_gene_id:", "[{quoted_test_gene_id: [[0, 0, 2]]}]) exons = dot_rmats_contents['exons'] if dot_rmats_i ==", "1) def _check_results_dup_input_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name,", "bam.path expected_error = '{} not found in .rmats'.format(miss_bam_path) tests.util.assert_some_line_has(self, err_lines,", "400], [499, 499]], 'count': 1 }] }]) self._cp_with_prefix('prep_1_', self._prep_1_tmp_dir, self._post_tmp_dir)", "[[1, 1], [100, 400], [499, 499]], 'count': 1 }] }])", "def _check_results_inte_2_pass(self): self._check_no_error_results() def _check_results_post(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with", "err_f_h: err_lines = err_f_h.readlines() for bam in self._sample_2_bams: miss_bam_path =", "prefix, dest_dir ] command.extend(source_paths) subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) def _check_results(self):", "= '{} found 2 times in .rmats'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error)", "'--gtf', self._gtf_path, '--od', self._out_dir, '-t', self._read_type, '--readLength', str(self._read_length), ] if", "err_f_h: err_lines = err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'bam files not", "self._dup_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_prep_bam') self._miss_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_input_bam') self._miss_prep_bam_tmp_dir =", "self._generated_input_dir, 'sample_1_rep_{}.bam') sample_2_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_2_rep_{}.bam') self._sample_1_bams = self._create_sample_1_bams(", "rep_2_bam = tests.bam.BAM() rep_2_bam.path = sample_1_replicate_template.format(2) sample_1_bams = [rep_1_bam, rep_2_bam]", "str(self._read_length), ] if self._sub_step == 'prep_1': arguments.extend([ '--tmp', self._prep_1_tmp_dir, '--b1',", "rep_1_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [201,", "'tmp_dup_prep_bam') self._miss_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_input_bam') self._miss_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_prep_bam') tests.util.recreate_dirs([", "dest_dir ] command.extend(source_paths) subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) def _check_results(self): if", "self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in range(2): dot_rmats_contents, error = output_parser.parse_dot_rmats(", "as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_some_line_has(self, out_lines, 'Processing count files')", "= bam.path expected_error = '{} not found in .rmats'.format(miss_bam_path) tests.util.assert_some_line_has(self,", "self.assertFalse(error) self.assertEqual(len(from_gtf_se_rows), 1) from_gtf_se_row = from_gtf_se_rows[0] self.assertEqual(from_gtf_se_row['GeneID'], tests.util.double_quote(tests.util.gene_id_str(1))) self.assertEqual(from_gtf_se_row['exonStart_0base'], '200')", "= self._create_sample_2_bams( self._sample_2_bams_path, sample_2_bam_replicate_template) self._gtf_path = os.path.join(self._generated_input_dir, 'test.gtf') self._gtf =", "elif self._sub_step == 'duplicate_prep_bam': self._setup_dup_prep_bam() elif self._sub_step == 'missing_input_bam': self._setup_miss_input_bam()", "elif self._sub_step == 'inte_1_fail': self._check_results_inte_1_fail() elif self._sub_step == 'inte_1_pass': self._check_results_inte_1_pass()", "== 'inte_2_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task',", "[[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads = [rep_1_read_1, rep_1_read_2] rep_2_read_1 =", "[self._sample_2_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [{quoted_test_gene_id: [[0, 0,", "dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [dict()]) exons = dot_rmats_contents['exons'] if dot_rmats_i == 0:", "== 'duplicate_input_bam': arguments.extend([ '--tmp', self._dup_input_bam_tmp_dir, '--b1', self._dup_input_bam_path, '--task', 'post', '--statoff',", "= dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [dict()]) exons = dot_rmats_contents['exons'] if dot_rmats_i ==", "[1, 99], 'counts': [1, 0] }] }]) multis = dot_rmats_contents['multis']", "self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) def _setup_miss_input_bam(self): self._miss_input_bam_path = os.path.join(self._generated_input_dir, 'miss_input.txt') bams =", "'rt') as err_f_h: err_lines = err_f_h.readlines() for bam in self._sample_2_bams:", "499], 'end_box': [401, 499], 'counts': [1, 0] }] }]) else:", "'counts': [1, 0] }] }]) multis = dot_rmats_contents['multis'] if dot_rmats_i", "'SE.MATS.JC.txt') se_mats_jc_header, se_mats_jc_rows, error = output_parser.parse_mats_jc( se_mats_jc_path) self.assertFalse(error) self._check_se_mats_jc_header(se_mats_jc_header) self.assertEqual(len(se_mats_jc_rows),", "self._write_bams(bams, self._dup_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) self._cp_with_prefix('prep_1_again', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) def _setup_miss_input_bam(self):", "files') from_gtf_se_path = os.path.join(self._out_dir, 'fromGTF.SE.txt') from_gtf_se_header, from_gtf_se_rows, error = output_parser.parse_from_gtf(", "rep_1_read_2, [[76, 100], [401, 500]], [[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads", "given 2 times'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_dup_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0)", "# chromosome length rep_2_read_1.template_name = tests.util.template_name_str([2, 2]) rep_2_read_2 = tests.bam.Read()", "rep_2_bam] rep_1_read_1 = tests.bam.Read() rep_1_read_1.ref_seq_name = '1' # chromosome rep_1_read_1.ref_seq_len", "self._sample_2_bams_path, '--task', 'inte', ]) elif self._sub_step == 'post': arguments.extend([ '--tmp',", "def _rmats_arguments(self): arguments = [ '--gtf', self._gtf_path, '--od', self._out_dir, '-t',", "499]], 'count': 1 }] }]) self._cp_with_prefix('prep_1_', self._prep_1_tmp_dir, self._post_tmp_dir) def _check_results_prep_2(self):", "self, err_lines, 'bam files not in input but associated with", "1) self.assertAlmostEqual(float(inc_level_1_splits[1]), 1) inc_level_2_splits = se_mats_jc_row['IncLevel2'].split(',') self.assertEqual(len(inc_level_2_splits), 2) self.assertAlmostEqual(float(inc_level_2_splits[0]), 0)", "== 'post': self._check_results_post() elif self._sub_step == 'duplicate_input_bam': self._check_results_dup_input_bam() elif self._sub_step", "rep_2_read_1.ref_seq_len = 1000 # chromosome length rep_2_read_1.template_name = tests.util.template_name_str([2, 2])", "error = tests.bam.set_read_pair_from_intervals(rep_2_read_1, rep_2_read_2, [[26, 100]], [[1, 100], [401, 425]],", "_check_results_inte_2_pass(self): self._check_no_error_results() def _check_results_post(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name,", "elif self._sub_step == 'missing_prep_bam': arguments.extend([ '--tmp', self._miss_prep_bam_tmp_dir, '--b1', self._miss_prep_bam_path, '--task',", "'1,1') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_1'], '0,0') self.assertEqual(jc_raw_se_row['IJC_SAMPLE_2'], '0,0') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_2'], '1,1') se_mats_jc_path = os.path.join(self._out_dir,", "self.assertEqual(len(se_mats_jc_rows), 1) se_mats_jc_row = se_mats_jc_rows[0] pvalue = float(se_mats_jc_row['PValue']) tests.util.assert_within_bounds(self, pvalue,", "float(se_mats_jc_row['FDR']) tests.util.assert_within_bounds(self, fdr, 0, 1) inc_level_1_splits = se_mats_jc_row['IncLevel1'].split(',') self.assertEqual(len(inc_level_1_splits), 2)", "'inte', '--statoff', ]) elif self._sub_step == 'prep_2': arguments.extend([ '--tmp', self._prep_2_tmp_dir,", "self._prep_1_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_1') self._prep_2_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_2') self._post_tmp_dir =", "= [transcript_1] error = gtf.write() self.assertFalse(error) return gtf def _create_sample_1_bams(self,", "= self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as out_f_h: out_lines = out_f_h.readlines()", "dot_rmats_i in range(2): dot_rmats_contents, error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'],", "dot_rmats_i == 0: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1],", "= jc_raw_se_rows[0] self.assertEqual(jc_raw_se_row['ID'], from_gtf_se_row['ID']) self.assertEqual(jc_raw_se_row['IJC_SAMPLE_1'], '1,1') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_1'], '0,0') self.assertEqual(jc_raw_se_row['IJC_SAMPLE_2'], '0,0')", "= se_mats_jc_rows[0] pvalue = float(se_mats_jc_row['PValue']) tests.util.assert_within_bounds(self, pvalue, 0, 1) fdr", "tests.util.gene_name_str(1) transcript_1.transcript_id = tests.util.transcript_id_str(1) transcript_1.exons = [(1, 100), (201, 300),", "rep_1_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [401,", "[401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_2_bams, sample_2_bams_path)", "self._generated_input_dir, 'sample_2_rep_{}.bam') self._sample_1_bams = self._create_sample_1_bams( self._sample_1_bams_path, sample_1_bam_replicate_template) self._sample_2_bams = self._create_sample_2_bams(", "rep_2_read_2, [[26, 100]], [[201, 300], [401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads", "= os.path.join(self._test_dir, 'tmp_prep_2') self._post_tmp_dir = os.path.join(self._test_dir, 'tmp_post') self._dup_input_bam_tmp_dir = os.path.join(self._test_dir,", "'input bam files with no associated prep output') def _check_results_inte_1_pass(self):", ".rmats'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_miss_input_bam(self): self._check_no_error_results() def _check_results_miss_prep_bam(self): self.assertNotEqual(self._rmats_return_code,", "100), (201, 300), (401, 500)] gtf.transcripts = [transcript_1] error =", "# filenames begin with a timestamp used for alphanumeric sort", "self.assertEqual(jc_raw_se_row['IJC_SAMPLE_1'], '1,1') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_1'], '0,0') self.assertEqual(jc_raw_se_row['IJC_SAMPLE_2'], '0,0') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_2'], '1,1') se_mats_jc_path =", "self._sample_1_bams: dup_bam_path = bam.path expected_error = '{} found 2 times", "in self._sample_2_bams: miss_bam_path = bam.path expected_error = '{} not found", "[{ 'junction_pairs': [[201, 201], [300, 400], [499, 499]], 'count': 1", "== 'prep_2': arguments.extend([ '--tmp', self._prep_2_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'prep', ])", "'--b1', self._sample_2_bams_path, '--task', 'inte', '--statoff', ]) elif self._sub_step == 'inte_2_pass':", "self._setup_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam': self._setup_dup_prep_bam() elif self._sub_step == 'missing_input_bam':", "self._sub_step == 'missing_input_bam': arguments.extend([ '--tmp', self._miss_input_bam_tmp_dir, '--b1', self._miss_input_bam_path, '--task', 'post',", "= [self._sample_1_bams[0]] self._write_bams(bams, self._miss_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_input_bam_tmp_dir) def _setup_miss_prep_bam(self): self._miss_prep_bam_path", "else: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 400],", "self._check_no_error_results() def _check_results_inte_2_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name,", "= '{} given 2 times'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_dup_prep_bam(self):", "elif self._sub_step == 'inte_2_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_2_bams_path, '--task',", "out_f_h.readlines() tests.util.assert_some_line_has(self, out_lines, 'Processing count files') from_gtf_se_path = os.path.join(self._out_dir, 'fromGTF.SE.txt')", "'JC.raw.input.SE.txt') jc_raw_se_header, jc_raw_se_rows, error = output_parser.parse_jc_raw( jc_raw_se_path) self.assertFalse(error) self.assertEqual(len(jc_raw_se_rows), 1)", "rep_2_read_2] self._write_bams(sample_2_bams, sample_2_bams_path) return sample_2_bams def _cp_with_prefix(self, prefix, source_dir, dest_dir):", "= os.path.join(self._generated_input_dir, 'dup_input.txt') bams = self._sample_1_bams + [self._sample_1_bams[0]] self._write_bams(bams, self._dup_input_bam_path)", "'inte_2_fail': self._check_results_inte_2_fail() elif self._sub_step == 'inte_2_pass': self._check_results_inte_2_pass() elif self._sub_step ==", "= os.path.join(self._generated_input_dir, 'miss_input.txt') bams = [self._sample_1_bams[0]] self._write_bams(bams, self._miss_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir,", "'Processing count files') test_gene_id = tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths", "os.path.join(self._generated_input_dir, 'test.gtf') self._gtf = self._create_gtf(self._gtf_path) self._sub_steps = [ 'prep_1', 'inte_1_fail',", "100], [401, 500]], [[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads = [rep_1_read_1,", "self._dup_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) self._cp_with_prefix('prep_1_again', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) def _setup_miss_input_bam(self): self._miss_input_bam_path", "self._check_no_error_results() def _check_results_miss_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name,", "import unittest import tests.bam import tests.base_test import tests.gtf import tests.output_parser", "'missing_input_bam', 'missing_prep_bam', ] self._sub_step = None def test(self): for sub_step", "os.path.join(self._generated_input_dir, 'dup_input.txt') bams = self._sample_1_bams + [self._sample_1_bams[0]] self._write_bams(bams, self._dup_input_bam_path) self._cp_with_prefix('prep_1',", "_check_results_prep_2(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as out_f_h:", "import tests.output_parser as output_parser import tests.test_config import tests.util class Test(tests.base_test.BaseTest):", "2 times'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_dup_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name", "[[76, 100], [201, 300]], [[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads =", "se_mats_jc_path = os.path.join(self._out_dir, 'SE.MATS.JC.txt') se_mats_jc_header, se_mats_jc_rows, error = output_parser.parse_mats_jc( se_mats_jc_path)", "= os.path.join(self._test_dir, 'generated_input') self._out_dir = os.path.join(self._test_dir, 'out') self._prep_1_tmp_dir = os.path.join(self._test_dir,", "2) for dot_rmats_i in range(2): dot_rmats_contents, error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i])", "= tests.gtf.Transcript() transcript_1.chromosome = '1' transcript_1.strand = '+' transcript_1.gene_id =", "[{ quoted_test_gene_id: [{ 'junction_pairs': [[201, 201], [300, 400], [499, 499]],", "= tests.bam.BAM() rep_1_bam.path = sample_2_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path =", "tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_2_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in range(2):", "499]], 'count': 1 }] }]) else: self.assertEqual(multis, [{ quoted_test_gene_id: [{", "'post', '--statoff', ]) elif self._sub_step == 'missing_input_bam': arguments.extend([ '--tmp', self._miss_input_bam_tmp_dir,", "rep_1_read_2, [[76, 100], [201, 300]], [[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads", "self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [401, 499], 'end_box': [401, 499],", "self._sample_2_bams = self._create_sample_2_bams( self._sample_2_bams_path, sample_2_bam_replicate_template) self._gtf_path = os.path.join(self._generated_input_dir, 'test.gtf') self._gtf", "'rt') as err_f_h: err_lines = err_f_h.readlines() dup_bam_path = self._sample_1_bams[0].path expected_error", "'dup_prep.txt') bams = self._sample_1_bams self._write_bams(bams, self._dup_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) self._cp_with_prefix('prep_1_again',", "= [rep_1_bam, rep_2_bam] rep_1_read_1 = tests.bam.Read() rep_1_read_1.ref_seq_name = '1' #", "100], [401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_2_bams,", "'duplicate_prep_bam', 'missing_input_bam', 'missing_prep_bam', ] self._sub_step = None def test(self): for", "self, err_lines, 'input bam files with no associated prep output')", "self._sample_1_bams_path = os.path.join(self._generated_input_dir, 'b1.txt') self._sample_2_bams_path = os.path.join(self._generated_input_dir, 'b2.txt') sample_1_bam_replicate_template =", "425]], self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_1_bams, sample_1_bams_path) return", "if self._sub_step == 'duplicate_input_bam': self._setup_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam': self._setup_dup_prep_bam()", "return gtf def _create_sample_1_bams(self, sample_1_bams_path, sample_1_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path", "'--b2', self._sample_2_bams_path, '--task', 'inte', ]) elif self._sub_step == 'post': arguments.extend([", "self._sub_steps: self._sub_step = sub_step self._setup_sub_step() self._run_test() def _command_output_dir(self): return os.path.join(self._test_dir,", "self._dup_prep_bam_tmp_dir, '--b1', self._dup_prep_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step ==", "sample_2_replicate_template.format(2) sample_2_bams = [rep_1_bam, rep_2_bam] rep_1_read_1 = tests.bam.Read() rep_1_read_1.ref_seq_name =", "= sample_2_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path = sample_2_replicate_template.format(2) sample_2_bams =", "self._read_type = 'paired' self._read_length = 50 self._sample_1_bams_path = os.path.join(self._generated_input_dir, 'b1.txt')", "self._out_dir, '-t', self._read_type, '--readLength', str(self._read_length), ] if self._sub_step == 'prep_1':", "_get_dot_rmats_paths(self, tmp_dir): dot_rmats_file_paths = glob.glob(os.path.join(tmp_dir, '*.rmats')) # filenames begin with", "used for alphanumeric sort return sorted(dot_rmats_file_paths) def _check_results_prep_1(self): self._check_no_error_results() command_stdout_file_name", "[299, 299]], 'count': 1 }] }]) else: self.assertEqual(multis, [{ quoted_test_gene_id:", "self.assertEqual(jc_raw_se_row['SJC_SAMPLE_1'], '0,0') self.assertEqual(jc_raw_se_row['IJC_SAMPLE_2'], '0,0') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_2'], '1,1') se_mats_jc_path = os.path.join(self._out_dir, 'SE.MATS.JC.txt')", "elif self._sub_step == 'prep_2': arguments.extend([ '--tmp', self._prep_2_tmp_dir, '--b1', self._sample_2_bams_path, '--task',", "self._miss_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_prep_bam') tests.util.recreate_dirs([ self._generated_input_dir, self._out_dir, self._prep_1_tmp_dir, self._prep_2_tmp_dir, self._post_tmp_dir,", "= [ 'prep_1', 'inte_1_fail', 'inte_1_pass', 'prep_2', 'inte_2_fail', 'inte_2_pass', 'post', 'duplicate_input_bam',", "}] }]) self._cp_with_prefix('prep_1_', self._prep_1_tmp_dir, self._post_tmp_dir) def _check_results_prep_2(self): self._check_no_error_results() command_stdout_file_name =", "else: self.fail('unexpected sub_step: {}'.format(self._sub_step)) def _get_dot_rmats_paths(self, tmp_dir): dot_rmats_file_paths = glob.glob(os.path.join(tmp_dir,", "self._sub_step == 'inte_2_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path,", "return arguments def _setup_sub_step(self): if self._sub_step == 'duplicate_input_bam': self._setup_dup_input_bam() elif", "0, 2]]}]) exons = dot_rmats_contents['exons'] if dot_rmats_i == 0: self.assertEqual(exons,", "os.path.join(self._test_dir, 'tmp_miss_prep_bam') tests.util.recreate_dirs([ self._generated_input_dir, self._out_dir, self._prep_1_tmp_dir, self._prep_2_tmp_dir, self._post_tmp_dir, self._dup_input_bam_tmp_dir, self._dup_prep_bam_tmp_dir,", "transcript_1.exons = [(1, 100), (201, 300), (401, 500)] gtf.transcripts =", "[401, 499], 'end_box': [401, 499], 'counts': [1, 0] }] }])", "299]], 'count': 1 }] }]) else: self.assertEqual(multis, [{ quoted_test_gene_id: [{", "201], [300, 400], [499, 499]], 'count': 1 }] }]) self._cp_with_prefix('prep_1_',", "[100, 400], [499, 499]], 'count': 1 }] }]) else: self.assertEqual(multis,", "2) self.assertAlmostEqual(float(inc_level_2_splits[0]), 0) self.assertAlmostEqual(float(inc_level_2_splits[1]), 0) self.assertAlmostEqual(float(se_mats_jc_row['IncLevelDifference']), 1) def _check_results_dup_input_bam(self): self.assertNotEqual(self._rmats_return_code,", "= sample_1_replicate_template.format(2) sample_1_bams = [rep_1_bam, rep_2_bam] rep_1_read_1 = tests.bam.Read() rep_1_read_1.ref_seq_name", "in range(2): dot_rmats_contents, error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_1_bams[dot_rmats_i].path])", "def _check_results_miss_input_bam(self): self._check_no_error_results() def _check_results_miss_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name()", "jc_raw_se_path = os.path.join(self._out_dir, 'JC.raw.input.SE.txt') jc_raw_se_header, jc_raw_se_rows, error = output_parser.parse_jc_raw( jc_raw_se_path)", "self.assertAlmostEqual(float(inc_level_2_splits[1]), 0) self.assertAlmostEqual(float(se_mats_jc_row['IncLevelDifference']), 1) def _check_results_dup_input_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name =", "self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'inte', ]) elif self._sub_step == 'post':", "1000 # chromosome length rep_2_read_1.template_name = tests.util.template_name_str([1, 2]) rep_2_read_2 =", "self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as out_f_h: out_lines", "}]) else: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[201, 201], [300,", "begin with a timestamp used for alphanumeric sort return sorted(dot_rmats_file_paths)", "0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines", "= 1000 # chromosome length rep_2_read_1.template_name = tests.util.template_name_str([2, 2]) rep_2_read_2", "'duplicate_input_bam', 'duplicate_prep_bam', 'missing_input_bam', 'missing_prep_bam', ] self._sub_step = None def test(self):", "rep_2_bam.path = sample_1_replicate_template.format(2) sample_1_bams = [rep_1_bam, rep_2_bam] rep_1_read_1 = tests.bam.Read()", "quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 200], [299, 299]], 'count':", "self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [dict()]) exons = dot_rmats_contents['exons'] if", "self.assertAlmostEqual(float(se_mats_jc_row['IncLevelDifference']), 1) def _check_results_dup_input_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with", "tests.test_config.TEST_BASE_DIR self._test_dir = os.path.join(self._test_base_dir, 'prep_post') self._generated_input_dir = os.path.join(self._test_dir, 'generated_input') self._out_dir", "rep_2_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_2_read_1, rep_2_read_2, [[26, 100]], [[1,", "self._setup_sub_step() self._run_test() def _command_output_dir(self): return os.path.join(self._test_dir, 'command_output') def _rmats_arguments(self): arguments", "def _check_results_inte_1_pass(self): self._check_no_error_results() def _check_results_inte_2_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name()", "se_mats_jc_header, se_mats_jc_rows, error = output_parser.parse_mats_jc( se_mats_jc_path) self.assertFalse(error) self._check_se_mats_jc_header(se_mats_jc_header) self.assertEqual(len(se_mats_jc_rows), 1)", "]) self._read_type = 'paired' self._read_length = 50 self._sample_1_bams_path = os.path.join(self._generated_input_dir,", "[ '--gtf', self._gtf_path, '--od', self._out_dir, '-t', self._read_type, '--readLength', str(self._read_length), ]", "tests.bam.BAM() rep_2_bam.path = sample_2_replicate_template.format(2) sample_2_bams = [rep_1_bam, rep_2_bam] rep_1_read_1 =", "def _check_results_dup_input_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt')", "self._setup_dup_prep_bam() elif self._sub_step == 'missing_input_bam': self._setup_miss_input_bam() elif self._sub_step == 'missing_prep_bam':", "self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_2_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [{quoted_test_gene_id:", "rep_1_bam.path = sample_1_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path = sample_1_replicate_template.format(2) sample_1_bams", "== 'duplicate_input_bam': self._check_results_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam': self._check_results_dup_prep_bam() elif self._sub_step", "[499, 499]], 'count': 1 }] }]) self._cp_with_prefix('prep_1_', self._prep_1_tmp_dir, self._post_tmp_dir) def", "tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_1_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for", "'--task', 'post', '--statoff', ]) elif self._sub_step == 'missing_input_bam': arguments.extend([ '--tmp',", "= tests.bam.Read() error = tests.bam.set_read_pair_from_intervals( rep_2_read_1, rep_2_read_2, [[26, 100]], [[201,", "]) elif self._sub_step == 'duplicate_prep_bam': arguments.extend([ '--tmp', self._dup_prep_bam_tmp_dir, '--b1', self._dup_prep_bam_path,", "def _create_gtf(self, gtf_path): gtf = tests.gtf.GTF() gtf.path = gtf_path transcript_1", "self._sub_step == 'inte_2_pass': self._check_results_inte_2_pass() elif self._sub_step == 'post': self._check_results_post() elif", "jc_raw_se_rows, error = output_parser.parse_jc_raw( jc_raw_se_path) self.assertFalse(error) self.assertEqual(len(jc_raw_se_rows), 1) jc_raw_se_row =", "+ [self._sample_1_bams[0]] self._write_bams(bams, self._dup_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_input_bam_tmp_dir) def _setup_dup_prep_bam(self): self._dup_prep_bam_path", "425]], self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_2_bams, sample_2_bams_path) return", "self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_2_bams, sample_2_bams_path) return sample_2_bams def", "os.path.join(self._test_dir, 'tmp_prep_1') self._prep_2_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_2') self._post_tmp_dir = os.path.join(self._test_dir, 'tmp_post')", "'--statoff', ]) elif self._sub_step == 'missing_input_bam': arguments.extend([ '--tmp', self._miss_input_bam_tmp_dir, '--b1',", "self._sample_2_bams_path = os.path.join(self._generated_input_dir, 'b2.txt') sample_1_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_1_rep_{}.bam') sample_2_bam_replicate_template", "files') test_gene_id = tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_2_tmp_dir)", "import glob import os.path import subprocess import sys import unittest", "tests.util.gene_id_str(1) transcript_1.gene_name = tests.util.gene_name_str(1) transcript_1.transcript_id = tests.util.transcript_id_str(1) transcript_1.exons = [(1,", "'missing_prep_bam': arguments.extend([ '--tmp', self._miss_prep_bam_tmp_dir, '--b1', self._miss_prep_bam_path, '--task', 'post', '--statoff', ])", "= tests.bam.set_read_pair_from_intervals( rep_2_read_1, rep_2_read_2, [[26, 100]], [[201, 300], [401, 425]],", "1], [100, 200], [299, 299]], 'count': 1 }] }]) else:", "elif self._sub_step == 'inte_2_pass': self._check_results_inte_2_pass() elif self._sub_step == 'post': self._check_results_post()", "self._miss_input_bam_path = os.path.join(self._generated_input_dir, 'miss_input.txt') bams = [self._sample_1_bams[0]] self._write_bams(bams, self._miss_input_bam_path) self._cp_with_prefix('prep_1',", "_setup_miss_prep_bam(self): self._miss_prep_bam_path = os.path.join(self._generated_input_dir, 'miss_prep.txt') bams = self._sample_1_bams + self._sample_2_bams", "dot_rmats_file_paths = glob.glob(os.path.join(tmp_dir, '*.rmats')) # filenames begin with a timestamp", "= 1000 # chromosome length rep_1_read_1.template_name = tests.util.template_name_str([1, 1]) rep_1_read_2", "}] }]) else: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [1, 99],", "# chromosome rep_2_read_1.ref_seq_len = 1000 # chromosome length rep_2_read_1.template_name =", "dot_rmats_contents['exons'] if dot_rmats_i == 0: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box':", "err_lines = err_f_h.readlines() for bam in self._sample_2_bams: miss_bam_path = bam.path", "'--b2', self._sample_2_bams_path, '--task', 'inte', ]) elif self._sub_step == 'inte_1_pass': arguments.extend([", "chromosome length rep_2_read_1.template_name = tests.util.template_name_str([2, 2]) rep_2_read_2 = tests.bam.Read() error", "499], 'counts': [1, 0] }] }]) else: self.assertEqual(exons, [{ quoted_test_gene_id:", "rep_1_read_1 = tests.bam.Read() rep_1_read_1.ref_seq_name = '1' # chromosome rep_1_read_1.ref_seq_len =", "a timestamp used for alphanumeric sort return sorted(dot_rmats_file_paths) def _check_results_prep_1(self):", "from_gtf_se_rows[0] self.assertEqual(from_gtf_se_row['GeneID'], tests.util.double_quote(tests.util.gene_id_str(1))) self.assertEqual(from_gtf_se_row['exonStart_0base'], '200') self.assertEqual(from_gtf_se_row['exonEnd'], '300') jc_raw_se_path = os.path.join(self._out_dir,", "_check_results_inte_1_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as", "1000 # chromosome length rep_1_read_1.template_name = tests.util.template_name_str([1, 1]) rep_1_read_2 =", "self._write_bams(sample_2_bams, sample_2_bams_path) return sample_2_bams def _cp_with_prefix(self, prefix, source_dir, dest_dir): source_paths", "self._miss_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_input_bam') self._miss_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_prep_bam') tests.util.recreate_dirs([ self._generated_input_dir,", "'missing_prep_bam', ] self._sub_step = None def test(self): for sub_step in", "novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [dict()]) exons = dot_rmats_contents['exons'] if dot_rmats_i", "= tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [201, 300]], [[401, 475]], self._read_length)", "rep_1_read_1.ref_seq_len = 1000 # chromosome length rep_1_read_1.template_name = tests.util.template_name_str([1, 1])", "= dot_rmats_contents['multis'] if dot_rmats_i == 0: self.assertEqual(multis, [{ quoted_test_gene_id: [{", "[ 'prep_1', 'inte_1_fail', 'inte_1_pass', 'prep_2', 'inte_2_fail', 'inte_2_pass', 'post', 'duplicate_input_bam', 'duplicate_prep_bam',", "self._miss_prep_bam_tmp_dir, '--b1', self._miss_prep_bam_path, '--task', 'post', '--statoff', ]) return arguments def", "self._sample_2_bams_path, '--task', 'post', ]) elif self._sub_step == 'duplicate_input_bam': arguments.extend([ '--tmp',", "bams = self._sample_1_bams + [self._sample_1_bams[0]] self._write_bams(bams, self._dup_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_input_bam_tmp_dir)", "self.assertEqual(jc_raw_se_row['IJC_SAMPLE_2'], '0,0') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_2'], '1,1') se_mats_jc_path = os.path.join(self._out_dir, 'SE.MATS.JC.txt') se_mats_jc_header, se_mats_jc_rows,", "'miss_input.txt') bams = [self._sample_1_bams[0]] self._write_bams(bams, self._miss_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_input_bam_tmp_dir) def", "'--statoff', ]) elif self._sub_step == 'prep_2': arguments.extend([ '--tmp', self._prep_2_tmp_dir, '--b1',", "def _setup_sub_step(self): if self._sub_step == 'duplicate_input_bam': self._setup_dup_input_bam() elif self._sub_step ==", "def _command_output_dir(self): return os.path.join(self._test_dir, 'command_output') def _rmats_arguments(self): arguments = [", "sample_1_bams_path, sample_1_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path = sample_1_replicate_template.format(1) rep_2_bam =", "'missing_input_bam': self._check_results_miss_input_bam() elif self._sub_step == 'missing_prep_bam': self._check_results_miss_prep_bam() else: self.fail('unexpected sub_step:", "self._sample_1_bams + [self._sample_1_bams[0]] self._write_bams(bams, self._dup_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_input_bam_tmp_dir) def _setup_dup_prep_bam(self):", "arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'inte', '--statoff', ]) elif", "'--task', 'prep', ]) elif self._sub_step == 'inte_2_fail': arguments.extend([ '--tmp', self._post_tmp_dir,", "sample_2_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_2_rep_{}.bam') self._sample_1_bams = self._create_sample_1_bams( self._sample_1_bams_path, sample_1_bam_replicate_template)", "self._sub_step == 'missing_prep_bam': arguments.extend([ '--tmp', self._miss_prep_bam_tmp_dir, '--b1', self._miss_prep_bam_path, '--task', 'post',", "[[26, 100]], [[201, 300], [401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads =", "with no associated prep output') def _check_results_inte_1_pass(self): self._check_no_error_results() def _check_results_inte_2_fail(self):", "self._post_tmp_dir = os.path.join(self._test_dir, 'tmp_post') self._dup_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_input_bam') self._dup_prep_bam_tmp_dir =", "= os.path.join(self._out_dir, 'JC.raw.input.SE.txt') jc_raw_se_header, jc_raw_se_rows, error = output_parser.parse_jc_raw( jc_raw_se_path) self.assertFalse(error)", "self._check_results_miss_input_bam() elif self._sub_step == 'missing_prep_bam': self._check_results_miss_prep_bam() else: self.fail('unexpected sub_step: {}'.format(self._sub_step))", "'command_output') def _rmats_arguments(self): arguments = [ '--gtf', self._gtf_path, '--od', self._out_dir,", "'--tmp', self._miss_input_bam_tmp_dir, '--b1', self._miss_input_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step", "os.path.join(self._generated_input_dir, 'miss_input.txt') bams = [self._sample_1_bams[0]] self._write_bams(bams, self._miss_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_input_bam_tmp_dir)", "gtf def _create_sample_1_bams(self, sample_1_bams_path, sample_1_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path =", "as err_f_h: err_lines = err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'input bam", "tests.util.recreate_dirs([ self._generated_input_dir, self._out_dir, self._prep_1_tmp_dir, self._prep_2_tmp_dir, self._post_tmp_dir, self._dup_input_bam_tmp_dir, self._dup_prep_bam_tmp_dir, self._miss_input_bam_tmp_dir, self._miss_prep_bam_tmp_dir,", "elif self._sub_step == 'inte_2_fail': self._check_results_inte_2_fail() elif self._sub_step == 'inte_2_pass': self._check_results_inte_2_pass()", "300), (401, 500)] gtf.transcripts = [transcript_1] error = gtf.write() self.assertFalse(error)", "self._check_results_prep_1() elif self._sub_step == 'inte_1_fail': self._check_results_inte_1_fail() elif self._sub_step == 'inte_1_pass':", "[100, 400], [499, 499]], 'count': 1 }] }]) self._cp_with_prefix('prep_2_', self._prep_2_tmp_dir,", "= tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_2_read_1, rep_2_read_2, [[26, 100]], [[1, 100],", "self.assertEqual(from_gtf_se_row['exonStart_0base'], '200') self.assertEqual(from_gtf_se_row['exonEnd'], '300') jc_raw_se_path = os.path.join(self._out_dir, 'JC.raw.input.SE.txt') jc_raw_se_header, jc_raw_se_rows,", "os.path.join(self._out_dir, 'SE.MATS.JC.txt') se_mats_jc_header, se_mats_jc_rows, error = output_parser.parse_mats_jc( se_mats_jc_path) self.assertFalse(error) self._check_se_mats_jc_header(se_mats_jc_header)", "in input but associated with prep output') def _check_results_inte_2_pass(self): self._check_no_error_results()", "= tests.util.template_name_str([1, 2]) rep_2_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals( rep_2_read_1,", "475]], self._read_length) self.assertFalse(error) rep_1_bam.reads = [rep_1_read_1, rep_1_read_2] rep_2_read_1 = tests.bam.Read()", "from_gtf_se_path) self.assertFalse(error) self.assertEqual(len(from_gtf_se_rows), 1) from_gtf_se_row = from_gtf_se_rows[0] self.assertEqual(from_gtf_se_row['GeneID'], tests.util.double_quote(tests.util.gene_id_str(1))) self.assertEqual(from_gtf_se_row['exonStart_0base'],", "'tmp_prep_1') self._prep_2_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_2') self._post_tmp_dir = os.path.join(self._test_dir, 'tmp_post') self._dup_input_bam_tmp_dir", "bam.path expected_error = '{} found 2 times in .rmats'.format(dup_bam_path) tests.util.assert_some_line_has(self,", "self._create_sample_2_bams( self._sample_2_bams_path, sample_2_bam_replicate_template) self._gtf_path = os.path.join(self._generated_input_dir, 'test.gtf') self._gtf = self._create_gtf(self._gtf_path)", "= os.path.join(self._test_dir, 'tmp_dup_prep_bam') self._miss_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_input_bam') self._miss_prep_bam_tmp_dir = os.path.join(self._test_dir,", "self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [dict()]) exons = dot_rmats_contents['exons']", "self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_1_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [dict()])", "but associated with prep output') def _check_results_inte_2_pass(self): self._check_no_error_results() def _check_results_post(self):", "self._sample_1_bams_path, '--task', 'inte', '--statoff', ]) elif self._sub_step == 'prep_2': arguments.extend([", "tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_miss_input_bam(self): self._check_no_error_results() def _check_results_miss_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0)", "se_mats_jc_row = se_mats_jc_rows[0] pvalue = float(se_mats_jc_row['PValue']) tests.util.assert_within_bounds(self, pvalue, 0, 1)", "self._get_dot_rmats_paths(source_dir) command = [ sys.executable, tests.test_config.CP_WITH_PREFIX, prefix, dest_dir ] command.extend(source_paths)", "'--statoff', ]) elif self._sub_step == 'duplicate_prep_bam': arguments.extend([ '--tmp', self._dup_prep_bam_tmp_dir, '--b1',", "self._post_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'inte', '--statoff', ]) elif self._sub_step ==", "tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [401, 500]], [[401,", "= os.path.join(self._test_base_dir, 'prep_post') self._generated_input_dir = os.path.join(self._test_dir, 'generated_input') self._out_dir = os.path.join(self._test_dir,", "error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [401, 500]], [[401, 475]],", "sorted(dot_rmats_file_paths) def _check_results_prep_1(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt')", "self._prep_1_tmp_dir, self._prep_2_tmp_dir, self._post_tmp_dir, self._dup_input_bam_tmp_dir, self._dup_prep_bam_tmp_dir, self._miss_input_bam_tmp_dir, self._miss_prep_bam_tmp_dir, self._command_output_dir() ]) self._read_type", "self._sub_step == 'inte_2_fail': self._check_results_inte_2_fail() elif self._sub_step == 'inte_2_pass': self._check_results_inte_2_pass() elif", "= 1000 # chromosome length rep_1_read_1.template_name = tests.util.template_name_str([2, 1]) rep_1_read_2", "self._check_results_inte_1_pass() elif self._sub_step == 'prep_2': self._check_results_prep_2() elif self._sub_step == 'inte_2_fail':", "self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) self._cp_with_prefix('prep_1_again', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) def _setup_miss_input_bam(self): self._miss_input_bam_path =", "self._command_output_dir() ]) self._read_type = 'paired' self._read_length = 50 self._sample_1_bams_path =", "self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[201, 201], [300, 400], [499,", "'junction_pairs': [[1, 1], [100, 400], [499, 499]], 'count': 1 }]", "[{ 'start_box': [401, 499], 'end_box': [401, 499], 'counts': [1, 0]", "== 'prep_2': self._check_results_prep_2() elif self._sub_step == 'inte_2_fail': self._check_results_inte_2_fail() elif self._sub_step", "os.path.join(self._test_dir, 'tmp_post') self._dup_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_input_bam') self._dup_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_prep_bam')", "= self._sample_1_bams[0].path expected_error = '{} given 2 times'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines,", "times'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_dup_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name =", "import tests.test_config import tests.util class Test(tests.base_test.BaseTest): def setUp(self): super().setUp() self._test_base_dir", "chromosome rep_1_read_1.ref_seq_len = 1000 # chromosome length rep_1_read_1.template_name = tests.util.template_name_str([2,", "sub_step: {}'.format(self._sub_step)) def _get_dot_rmats_paths(self, tmp_dir): dot_rmats_file_paths = glob.glob(os.path.join(tmp_dir, '*.rmats')) #", "self._dup_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_input_bam_tmp_dir) def _setup_dup_prep_bam(self): self._dup_prep_bam_path = os.path.join(self._generated_input_dir, 'dup_prep.txt')", "output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_2_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs']", "2]) rep_2_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals( rep_2_read_1, rep_2_read_2, [[26,", "[[76, 100], [401, 500]], [[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads =", "test_gene_id = tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_2_tmp_dir) self.assertEqual(len(dot_rmats_paths),", "self._sub_step == 'duplicate_prep_bam': self._setup_dup_prep_bam() elif self._sub_step == 'missing_input_bam': self._setup_miss_input_bam() elif", "gtf.path = gtf_path transcript_1 = tests.gtf.Transcript() transcript_1.chromosome = '1' transcript_1.strand", "command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as out_f_h: out_lines =", "= err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'bam files not in input", "self._dup_input_bam_path = os.path.join(self._generated_input_dir, 'dup_input.txt') bams = self._sample_1_bams + [self._sample_1_bams[0]] self._write_bams(bams,", "dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [{quoted_test_gene_id: [[0, 0, 2]]}]) exons = dot_rmats_contents['exons'] if", "with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() tests.util.assert_some_line_has( self,", "'--tmp', self._dup_input_bam_tmp_dir, '--b1', self._dup_input_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step", "self._sub_step = None def test(self): for sub_step in self._sub_steps: self._sub_step", "300]], [[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads = [rep_1_read_1, rep_1_read_2] rep_2_read_1", "rep_1_bam.path = sample_2_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path = sample_2_replicate_template.format(2) sample_2_bams", "expected_error) def _check_results_dup_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name,", "gtf.transcripts = [transcript_1] error = gtf.write() self.assertFalse(error) return gtf def", "transcript_1.transcript_id = tests.util.transcript_id_str(1) transcript_1.exons = [(1, 100), (201, 300), (401,", "[1, 0] }] }]) multis = dot_rmats_contents['multis'] if dot_rmats_i ==", "self._sub_step == 'inte_1_pass': self._check_results_inte_1_pass() elif self._sub_step == 'prep_2': self._check_results_prep_2() elif", "[rep_2_read_1, rep_2_read_2] self._write_bams(sample_2_bams, sample_2_bams_path) return sample_2_bams def _cp_with_prefix(self, prefix, source_dir,", "se_mats_jc_row['IncLevel1'].split(',') self.assertEqual(len(inc_level_1_splits), 2) self.assertAlmostEqual(float(inc_level_1_splits[0]), 1) self.assertAlmostEqual(float(inc_level_1_splits[1]), 1) inc_level_2_splits = se_mats_jc_row['IncLevel2'].split(',')", "gtf = tests.gtf.GTF() gtf.path = gtf_path transcript_1 = tests.gtf.Transcript() transcript_1.chromosome", "open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() dup_bam_path = self._sample_1_bams[0].path", "[{ 'start_box': [1, 99], 'end_box': [1, 99], 'counts': [1, 0]", "'duplicate_prep_bam': self._setup_dup_prep_bam() elif self._sub_step == 'missing_input_bam': self._setup_miss_input_bam() elif self._sub_step ==", "= gtf.write() self.assertFalse(error) return gtf def _create_sample_1_bams(self, sample_1_bams_path, sample_1_replicate_template): rep_1_bam", "'generated_input') self._out_dir = os.path.join(self._test_dir, 'out') self._prep_1_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_1') self._prep_2_tmp_dir", "self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [{quoted_test_gene_id: [[0, 0, 2]]}])", "not found in .rmats'.format(miss_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) if __name__ ==", "self._sub_step == 'inte_1_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path,", "2]) rep_2_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_2_read_1, rep_2_read_2, [[26, 100]],", "dot_rmats_contents, error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_1_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length)", "[401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_1_bams, sample_1_bams_path)", "novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [{quoted_test_gene_id: [[0, 0, 2]]}]) exons =", "'end_box': [1, 99], 'counts': [1, 0] }] }]) multis =", "err_f_h: err_lines = err_f_h.readlines() for bam in self._sample_1_bams: dup_bam_path =" ]
[ "tuples) into pharaoh text format. >>> alignment = [(0, 0),", "7-5' >>> pharaohtext2tuples(pharaoh_text) Alignment([(0, 0), (2, 1), (7, 5), (9,", ":rtype: Alignment :return: An Alignment object that contains a list", "# Natural Language Toolkit: Aligner Utilities # # Copyright (C)", "integer tuples :rtype: str :return: the word alignment outputs in", "Utilities # # Copyright (C) 2001-2015 NLTK Project # Author:", "import Alignment def pharaohtext2tuples(pharaoh_text): \"\"\" Converts pharaoh text format into", "# Author: <NAME> # URL: <http://www.nltk.org/> # For license information,", "# Copyright (C) 2001-2015 NLTK Project # Author: <NAME> #", "Alignment object (a list of tuples). >>> pharaoh_text = '0-0", "an Alignment object (a list of tuples) into pharaoh text", "of integer tuples \"\"\" # Converts integers to strings for", ":return: the word alignment outputs in the pharaoh output format", "4), (7, 5)] >>> alignment2pharaohtext(alignment) '0-0 2-1 9-2 21-3 10-4", "text format into an Alignment object (a list of tuples).", "= [tuple(map(int,a.split('-'))) for a in pharaoh_text.split()] return Alignment(list_of_tuples) def alignment2pharaohtext(alignment):", "a word alignment point. list_of_tuples = [tuple(map(int,a.split('-'))) for a in", "pharaohtext2tuples(pharaoh_text): \"\"\" Converts pharaoh text format into an Alignment object", "For license information, see LICENSE.TXT from nltk.align.api import Alignment def", "Converts an Alignment object (a list of tuples) into pharaoh", "a list of integer tuples \"\"\" # Converts integers to", "(9, 2), (10, 4), (21, 3)]) :type pharaoh_text: str :param", "(21, 3), (10, 4), (7, 5)] >>> alignment2pharaohtext(alignment) '0-0 2-1", ":type alignment: Alignment :param alignment: An Alignment object that contains", "from nltk.align.api import Alignment def pharaohtext2tuples(pharaoh_text): \"\"\" Converts pharaoh text", "strings for a word alignment point. list_of_tuples = [tuple(map(int,a.split('-'))) for", "see LICENSE.TXT from nltk.align.api import Alignment def pharaohtext2tuples(pharaoh_text): \"\"\" Converts", "Language Toolkit: Aligner Utilities # # Copyright (C) 2001-2015 NLTK", "4), (21, 3)]) :type pharaoh_text: str :param pharaoh_text: the word", "Alignment object that contains a list of integer tuples \"\"\"", ":param alignment: An Alignment object that contains a list of", "21-3 10-4 7-5' >>> pharaohtext2tuples(pharaoh_text) Alignment([(0, 0), (2, 1), (7,", "Alignment :return: An Alignment object that contains a list of", "contains a list of integer tuples \"\"\" # Converts integers", "information, see LICENSE.TXT from nltk.align.api import Alignment def pharaohtext2tuples(pharaoh_text): \"\"\"", "Alignment([(0, 0), (2, 1), (7, 5), (9, 2), (10, 4),", "[tuple(map(int,a.split('-'))) for a in pharaoh_text.split()] return Alignment(list_of_tuples) def alignment2pharaohtext(alignment): \"\"\"", "pharaoh_text: str :param pharaoh_text: the word alignment outputs in the", "3), (10, 4), (7, 5)] >>> alignment2pharaohtext(alignment) '0-0 2-1 9-2", "'0-0 2-1 9-2 21-3 10-4 7-5' :type alignment: Alignment :param", "pharaoh_text: the word alignment outputs in the pharaoh output format", "'.join(str(i) + \"-\" + str(j) for i,j in alignment) return", "point. list_of_tuples = [tuple(map(int,a.split('-'))) for a in pharaoh_text.split()] return Alignment(list_of_tuples)", "pharaoh output format \"\"\" pharaoh_text = ' '.join(str(i) + \"-\"", "nltk.align.api import Alignment def pharaohtext2tuples(pharaoh_text): \"\"\" Converts pharaoh text format", "pharaoh_text.split()] return Alignment(list_of_tuples) def alignment2pharaohtext(alignment): \"\"\" Converts an Alignment object", "alignment = [(0, 0), (2, 1), (9, 2), (21, 3),", "object (a list of tuples). >>> pharaoh_text = '0-0 2-1", "(10, 4), (21, 3)]) :type pharaoh_text: str :param pharaoh_text: the", "Alignment object (a list of tuples) into pharaoh text format.", "format \"\"\" pharaoh_text = ' '.join(str(i) + \"-\" + str(j)", "(7, 5), (9, 2), (10, 4), (21, 3)]) :type pharaoh_text:", "0), (2, 1), (7, 5), (9, 2), (10, 4), (21,", "URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT from nltk.align.api", ":param pharaoh_text: the word alignment outputs in the pharaoh output", "list of tuples) into pharaoh text format. >>> alignment =", "alignment2pharaohtext(alignment) '0-0 2-1 9-2 21-3 10-4 7-5' :type alignment: Alignment", "output format \"\"\" pharaoh_text = ' '.join(str(i) + \"-\" +", "pharaoh text format. >>> alignment = [(0, 0), (2, 1),", ":rtype: str :return: the word alignment outputs in the pharaoh", "(C) 2001-2015 NLTK Project # Author: <NAME> # URL: <http://www.nltk.org/>", "in the pharaoh output format :rtype: Alignment :return: An Alignment", "\"\"\" Converts pharaoh text format into an Alignment object (a", "Converts pharaoh text format into an Alignment object (a list", "an Alignment object (a list of tuples). >>> pharaoh_text =", "(2, 1), (7, 5), (9, 2), (10, 4), (21, 3)])", "outputs in the pharaoh output format :rtype: Alignment :return: An", "integers to strings for a word alignment point. list_of_tuples =", "pharaoh_text = '0-0 2-1 9-2 21-3 10-4 7-5' >>> pharaohtext2tuples(pharaoh_text)", "Natural Language Toolkit: Aligner Utilities # # Copyright (C) 2001-2015", "7-5' :type alignment: Alignment :param alignment: An Alignment object that", "1), (9, 2), (21, 3), (10, 4), (7, 5)] >>>", "= [(0, 0), (2, 1), (9, 2), (21, 3), (10,", "def alignment2pharaohtext(alignment): \"\"\" Converts an Alignment object (a list of", "contains a list of integer tuples :rtype: str :return: the", "Alignment :param alignment: An Alignment object that contains a list", "list of tuples). >>> pharaoh_text = '0-0 2-1 9-2 21-3", "in the pharaoh output format \"\"\" pharaoh_text = ' '.join(str(i)", "2001-2015 NLTK Project # Author: <NAME> # URL: <http://www.nltk.org/> #", "Project # Author: <NAME> # URL: <http://www.nltk.org/> # For license", "into an Alignment object (a list of tuples). >>> pharaoh_text", "tuples \"\"\" # Converts integers to strings for a word", "'0-0 2-1 9-2 21-3 10-4 7-5' >>> pharaohtext2tuples(pharaoh_text) Alignment([(0, 0),", "2-1 9-2 21-3 10-4 7-5' :type alignment: Alignment :param alignment:", "<http://www.nltk.org/> # For license information, see LICENSE.TXT from nltk.align.api import", "pharaoh output format :rtype: Alignment :return: An Alignment object that", "5), (9, 2), (10, 4), (21, 3)]) :type pharaoh_text: str", "1), (7, 5), (9, 2), (10, 4), (21, 3)]) :type", ":type pharaoh_text: str :param pharaoh_text: the word alignment outputs in", ">>> alignment = [(0, 0), (2, 1), (9, 2), (21,", "2), (21, 3), (10, 4), (7, 5)] >>> alignment2pharaohtext(alignment) '0-0", "into pharaoh text format. >>> alignment = [(0, 0), (2,", "+ \"-\" + str(j) for i,j in alignment) return pharaoh_text", ">>> alignment2pharaohtext(alignment) '0-0 2-1 9-2 21-3 10-4 7-5' :type alignment:", "str :param pharaoh_text: the word alignment outputs in the pharaoh", "Aligner Utilities # # Copyright (C) 2001-2015 NLTK Project #", "pharaoh text format into an Alignment object (a list of", "alignment: Alignment :param alignment: An Alignment object that contains a", "of integer tuples :rtype: str :return: the word alignment outputs", "alignment outputs in the pharaoh output format :rtype: Alignment :return:", "# Converts integers to strings for a word alignment point.", "Converts integers to strings for a word alignment point. list_of_tuples", "license information, see LICENSE.TXT from nltk.align.api import Alignment def pharaohtext2tuples(pharaoh_text):", "Alignment def pharaohtext2tuples(pharaoh_text): \"\"\" Converts pharaoh text format into an", "for a in pharaoh_text.split()] return Alignment(list_of_tuples) def alignment2pharaohtext(alignment): \"\"\" Converts", "object (a list of tuples) into pharaoh text format. >>>", "pharaoh_text = ' '.join(str(i) + \"-\" + str(j) for i,j", "# URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT from", "= '0-0 2-1 9-2 21-3 10-4 7-5' >>> pharaohtext2tuples(pharaoh_text) Alignment([(0,", "to strings for a word alignment point. list_of_tuples = [tuple(map(int,a.split('-')))", "# # Copyright (C) 2001-2015 NLTK Project # Author: <NAME>", "9-2 21-3 10-4 7-5' :type alignment: Alignment :param alignment: An", "the pharaoh output format \"\"\" pharaoh_text = ' '.join(str(i) +", "2), (10, 4), (21, 3)]) :type pharaoh_text: str :param pharaoh_text:", "format. >>> alignment = [(0, 0), (2, 1), (9, 2),", "alignment2pharaohtext(alignment): \"\"\" Converts an Alignment object (a list of tuples)", "5)] >>> alignment2pharaohtext(alignment) '0-0 2-1 9-2 21-3 10-4 7-5' :type", "format into an Alignment object (a list of tuples). >>>", "10-4 7-5' :type alignment: Alignment :param alignment: An Alignment object", ">>> pharaohtext2tuples(pharaoh_text) Alignment([(0, 0), (2, 1), (7, 5), (9, 2),", "Author: <NAME> # URL: <http://www.nltk.org/> # For license information, see", "<NAME> # URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT", "2-1 9-2 21-3 10-4 7-5' >>> pharaohtext2tuples(pharaoh_text) Alignment([(0, 0), (2,", "of tuples) into pharaoh text format. >>> alignment = [(0,", "(21, 3)]) :type pharaoh_text: str :param pharaoh_text: the word alignment", "9-2 21-3 10-4 7-5' >>> pharaohtext2tuples(pharaoh_text) Alignment([(0, 0), (2, 1),", "for a word alignment point. list_of_tuples = [tuple(map(int,a.split('-'))) for a", "of tuples). >>> pharaoh_text = '0-0 2-1 9-2 21-3 10-4", "that contains a list of integer tuples :rtype: str :return:", "format :rtype: Alignment :return: An Alignment object that contains a", "alignment point. list_of_tuples = [tuple(map(int,a.split('-'))) for a in pharaoh_text.split()] return", "a in pharaoh_text.split()] return Alignment(list_of_tuples) def alignment2pharaohtext(alignment): \"\"\" Converts an", "' '.join(str(i) + \"-\" + str(j) for i,j in alignment)", "word alignment outputs in the pharaoh output format \"\"\" pharaoh_text", "Alignment(list_of_tuples) def alignment2pharaohtext(alignment): \"\"\" Converts an Alignment object (a list", "a list of integer tuples :rtype: str :return: the word", "list of integer tuples :rtype: str :return: the word alignment", "(7, 5)] >>> alignment2pharaohtext(alignment) '0-0 2-1 9-2 21-3 10-4 7-5'", ":return: An Alignment object that contains a list of integer", "21-3 10-4 7-5' :type alignment: Alignment :param alignment: An Alignment", "tuples :rtype: str :return: the word alignment outputs in the", "(a list of tuples). >>> pharaoh_text = '0-0 2-1 9-2", "the word alignment outputs in the pharaoh output format :rtype:", "outputs in the pharaoh output format \"\"\" pharaoh_text = '", "Copyright (C) 2001-2015 NLTK Project # Author: <NAME> # URL:", "(10, 4), (7, 5)] >>> alignment2pharaohtext(alignment) '0-0 2-1 9-2 21-3", "Alignment object that contains a list of integer tuples :rtype:", "LICENSE.TXT from nltk.align.api import Alignment def pharaohtext2tuples(pharaoh_text): \"\"\" Converts pharaoh", "list_of_tuples = [tuple(map(int,a.split('-'))) for a in pharaoh_text.split()] return Alignment(list_of_tuples) def", "\"\"\" # Converts integers to strings for a word alignment", "alignment: An Alignment object that contains a list of integer", "\"\"\" Converts an Alignment object (a list of tuples) into", "= ' '.join(str(i) + \"-\" + str(j) for i,j in", "# For license information, see LICENSE.TXT from nltk.align.api import Alignment", "integer tuples \"\"\" # Converts integers to strings for a", "object that contains a list of integer tuples :rtype: str", ">>> pharaoh_text = '0-0 2-1 9-2 21-3 10-4 7-5' >>>", "the pharaoh output format :rtype: Alignment :return: An Alignment object", "Toolkit: Aligner Utilities # # Copyright (C) 2001-2015 NLTK Project", "(2, 1), (9, 2), (21, 3), (10, 4), (7, 5)]", "word alignment point. list_of_tuples = [tuple(map(int,a.split('-'))) for a in pharaoh_text.split()]", "that contains a list of integer tuples \"\"\" # Converts", "3)]) :type pharaoh_text: str :param pharaoh_text: the word alignment outputs", "list of integer tuples \"\"\" # Converts integers to strings", "alignment outputs in the pharaoh output format \"\"\" pharaoh_text =", "NLTK Project # Author: <NAME> # URL: <http://www.nltk.org/> # For", "10-4 7-5' >>> pharaohtext2tuples(pharaoh_text) Alignment([(0, 0), (2, 1), (7, 5),", "return Alignment(list_of_tuples) def alignment2pharaohtext(alignment): \"\"\" Converts an Alignment object (a", "def pharaohtext2tuples(pharaoh_text): \"\"\" Converts pharaoh text format into an Alignment", "0), (2, 1), (9, 2), (21, 3), (10, 4), (7,", "tuples). >>> pharaoh_text = '0-0 2-1 9-2 21-3 10-4 7-5'", "object that contains a list of integer tuples \"\"\" #", "in pharaoh_text.split()] return Alignment(list_of_tuples) def alignment2pharaohtext(alignment): \"\"\" Converts an Alignment", "pharaohtext2tuples(pharaoh_text) Alignment([(0, 0), (2, 1), (7, 5), (9, 2), (10,", "word alignment outputs in the pharaoh output format :rtype: Alignment", "the word alignment outputs in the pharaoh output format \"\"\"", "str :return: the word alignment outputs in the pharaoh output", "\"\"\" pharaoh_text = ' '.join(str(i) + \"-\" + str(j) for", "(a list of tuples) into pharaoh text format. >>> alignment", "text format. >>> alignment = [(0, 0), (2, 1), (9,", "[(0, 0), (2, 1), (9, 2), (21, 3), (10, 4),", "An Alignment object that contains a list of integer tuples", "output format :rtype: Alignment :return: An Alignment object that contains", "(9, 2), (21, 3), (10, 4), (7, 5)] >>> alignment2pharaohtext(alignment)" ]
[ "with self.assertRaises(db.UnknownGRRUserError) as context: self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"quux\") self.assertEqual(context.exception.username, \"quux\") def testWriteYaraSignatureReferenceDuplicated(self):", "def testWriteYaraSignatureReferenceIncorrectUsername(self): blob_id = rdf_objects.BlobID(os.urandom(32)) with self.assertRaises(db.UnknownGRRUserError) as context: self.db.WriteYaraSignatureReference(blob_id=blob_id,", "= rdf_objects.BlobID(os.urandom(32)) self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.assertTrue(self.db.VerifyYaraSignatureReference(blob_id)) def testVerifyYaraSignatureReferenceIncorrect(self): blob_id = rdf_objects.BlobID(os.urandom(32))", "-*- encoding: utf-8 -*- \"\"\"A module with test cases for", "<gh_stars>1000+ #!/usr/bin/env python # -*- encoding: utf-8 -*- \"\"\"A module", "self.db.WriteGRRUser(\"foo\") blob_id = rdf_objects.BlobID(os.urandom(32)) # Writing duplicated signatures is possible,", "cases for the YARA database method.\"\"\" import os from grr_response_server.databases", "# -*- encoding: utf-8 -*- \"\"\"A module with test cases", "def testWriteYaraSignatureReferenceDuplicated(self): self.db.WriteGRRUser(\"foo\") blob_id = rdf_objects.BlobID(os.urandom(32)) # Writing duplicated signatures", "blob_id = rdf_objects.BlobID(os.urandom(32)) self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.assertTrue(self.db.VerifyYaraSignatureReference(blob_id)) def testVerifyYaraSignatureReferenceIncorrect(self): blob_id =", "from grr_response_server.databases import db from grr_response_server.rdfvalues import objects as rdf_objects", "as rdf_objects class DatabaseTestYaraMixin(object): \"\"\"A mixin class for testing YARA", "import os from grr_response_server.databases import db from grr_response_server.rdfvalues import objects", "class DatabaseTestYaraMixin(object): \"\"\"A mixin class for testing YARA methods of", "self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"quux\") self.assertEqual(context.exception.username, \"quux\") def testWriteYaraSignatureReferenceDuplicated(self): self.db.WriteGRRUser(\"foo\") blob_id = rdf_objects.BlobID(os.urandom(32))", "testWriteYaraSignatureReferenceDuplicated(self): self.db.WriteGRRUser(\"foo\") blob_id = rdf_objects.BlobID(os.urandom(32)) # Writing duplicated signatures is", "YARA database method.\"\"\" import os from grr_response_server.databases import db from", "from grr_response_server.rdfvalues import objects as rdf_objects class DatabaseTestYaraMixin(object): \"\"\"A mixin", "= rdf_objects.BlobID(os.urandom(32)) # Writing duplicated signatures is possible, it should", "= rdf_objects.BlobID(os.urandom(32)) with self.assertRaises(db.UnknownGRRUserError) as context: self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"quux\") self.assertEqual(context.exception.username, \"quux\")", "signatures is possible, it should not raise. self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.db.WriteYaraSignatureReference(blob_id=blob_id,", "grr_response_server.databases import db from grr_response_server.rdfvalues import objects as rdf_objects class", "Writing duplicated signatures is possible, it should not raise. self.db.WriteYaraSignatureReference(blob_id=blob_id,", "blob_id = rdf_objects.BlobID(os.urandom(32)) # Writing duplicated signatures is possible, it", "testing YARA methods of database implementations.\"\"\" def testWriteYaraSignatureReferenceIncorrectUsername(self): blob_id =", "context: self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"quux\") self.assertEqual(context.exception.username, \"quux\") def testWriteYaraSignatureReferenceDuplicated(self): self.db.WriteGRRUser(\"foo\") blob_id =", "\"\"\"A module with test cases for the YARA database method.\"\"\"", "self.assertEqual(context.exception.username, \"quux\") def testWriteYaraSignatureReferenceDuplicated(self): self.db.WriteGRRUser(\"foo\") blob_id = rdf_objects.BlobID(os.urandom(32)) # Writing", "database implementations.\"\"\" def testWriteYaraSignatureReferenceIncorrectUsername(self): blob_id = rdf_objects.BlobID(os.urandom(32)) with self.assertRaises(db.UnknownGRRUserError) as", "self.db.WriteGRRUser(\"foo\") blob_id = rdf_objects.BlobID(os.urandom(32)) self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.assertTrue(self.db.VerifyYaraSignatureReference(blob_id)) def testVerifyYaraSignatureReferenceIncorrect(self): blob_id", "rdf_objects.BlobID(os.urandom(32)) with self.assertRaises(db.UnknownGRRUserError) as context: self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"quux\") self.assertEqual(context.exception.username, \"quux\") def", "self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") def testVerifyYaraSignatureReferenceSimple(self): self.db.WriteGRRUser(\"foo\") blob_id = rdf_objects.BlobID(os.urandom(32)) self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\")", "YARA methods of database implementations.\"\"\" def testWriteYaraSignatureReferenceIncorrectUsername(self): blob_id = rdf_objects.BlobID(os.urandom(32))", "objects as rdf_objects class DatabaseTestYaraMixin(object): \"\"\"A mixin class for testing", "possible, it should not raise. self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") def", "module with test cases for the YARA database method.\"\"\" import", "def testVerifyYaraSignatureReferenceSimple(self): self.db.WriteGRRUser(\"foo\") blob_id = rdf_objects.BlobID(os.urandom(32)) self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.assertTrue(self.db.VerifyYaraSignatureReference(blob_id)) def", "username=\"quux\") self.assertEqual(context.exception.username, \"quux\") def testWriteYaraSignatureReferenceDuplicated(self): self.db.WriteGRRUser(\"foo\") blob_id = rdf_objects.BlobID(os.urandom(32)) #", "rdf_objects.BlobID(os.urandom(32)) self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.assertTrue(self.db.VerifyYaraSignatureReference(blob_id)) def testVerifyYaraSignatureReferenceIncorrect(self): blob_id = rdf_objects.BlobID(os.urandom(32)) self.assertFalse(self.db.VerifyYaraSignatureReference(blob_id))", "username=\"foo\") def testVerifyYaraSignatureReferenceSimple(self): self.db.WriteGRRUser(\"foo\") blob_id = rdf_objects.BlobID(os.urandom(32)) self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.assertTrue(self.db.VerifyYaraSignatureReference(blob_id))", "of database implementations.\"\"\" def testWriteYaraSignatureReferenceIncorrectUsername(self): blob_id = rdf_objects.BlobID(os.urandom(32)) with self.assertRaises(db.UnknownGRRUserError)", "methods of database implementations.\"\"\" def testWriteYaraSignatureReferenceIncorrectUsername(self): blob_id = rdf_objects.BlobID(os.urandom(32)) with", "DatabaseTestYaraMixin(object): \"\"\"A mixin class for testing YARA methods of database", "import objects as rdf_objects class DatabaseTestYaraMixin(object): \"\"\"A mixin class for", "-*- \"\"\"A module with test cases for the YARA database", "mixin class for testing YARA methods of database implementations.\"\"\" def", "raise. self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") def testVerifyYaraSignatureReferenceSimple(self): self.db.WriteGRRUser(\"foo\") blob_id =", "utf-8 -*- \"\"\"A module with test cases for the YARA", "\"quux\") def testWriteYaraSignatureReferenceDuplicated(self): self.db.WriteGRRUser(\"foo\") blob_id = rdf_objects.BlobID(os.urandom(32)) # Writing duplicated", "as context: self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"quux\") self.assertEqual(context.exception.username, \"quux\") def testWriteYaraSignatureReferenceDuplicated(self): self.db.WriteGRRUser(\"foo\") blob_id", "for testing YARA methods of database implementations.\"\"\" def testWriteYaraSignatureReferenceIncorrectUsername(self): blob_id", "for the YARA database method.\"\"\" import os from grr_response_server.databases import", "grr_response_server.rdfvalues import objects as rdf_objects class DatabaseTestYaraMixin(object): \"\"\"A mixin class", "import db from grr_response_server.rdfvalues import objects as rdf_objects class DatabaseTestYaraMixin(object):", "it should not raise. self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") def testVerifyYaraSignatureReferenceSimple(self):", "class for testing YARA methods of database implementations.\"\"\" def testWriteYaraSignatureReferenceIncorrectUsername(self):", "should not raise. self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") def testVerifyYaraSignatureReferenceSimple(self): self.db.WriteGRRUser(\"foo\")", "rdf_objects class DatabaseTestYaraMixin(object): \"\"\"A mixin class for testing YARA methods", "self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") def testVerifyYaraSignatureReferenceSimple(self): self.db.WriteGRRUser(\"foo\") blob_id = rdf_objects.BlobID(os.urandom(32))", "python # -*- encoding: utf-8 -*- \"\"\"A module with test", "\"\"\"A mixin class for testing YARA methods of database implementations.\"\"\"", "# Writing duplicated signatures is possible, it should not raise.", "database method.\"\"\" import os from grr_response_server.databases import db from grr_response_server.rdfvalues", "username=\"foo\") self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") def testVerifyYaraSignatureReferenceSimple(self): self.db.WriteGRRUser(\"foo\") blob_id = rdf_objects.BlobID(os.urandom(32)) self.db.WriteYaraSignatureReference(blob_id=blob_id,", "db from grr_response_server.rdfvalues import objects as rdf_objects class DatabaseTestYaraMixin(object): \"\"\"A", "testVerifyYaraSignatureReferenceSimple(self): self.db.WriteGRRUser(\"foo\") blob_id = rdf_objects.BlobID(os.urandom(32)) self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.assertTrue(self.db.VerifyYaraSignatureReference(blob_id)) def testVerifyYaraSignatureReferenceIncorrect(self):", "implementations.\"\"\" def testWriteYaraSignatureReferenceIncorrectUsername(self): blob_id = rdf_objects.BlobID(os.urandom(32)) with self.assertRaises(db.UnknownGRRUserError) as context:", "encoding: utf-8 -*- \"\"\"A module with test cases for the", "the YARA database method.\"\"\" import os from grr_response_server.databases import db", "blob_id = rdf_objects.BlobID(os.urandom(32)) with self.assertRaises(db.UnknownGRRUserError) as context: self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"quux\") self.assertEqual(context.exception.username,", "is possible, it should not raise. self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\")", "with test cases for the YARA database method.\"\"\" import os", "self.assertRaises(db.UnknownGRRUserError) as context: self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"quux\") self.assertEqual(context.exception.username, \"quux\") def testWriteYaraSignatureReferenceDuplicated(self): self.db.WriteGRRUser(\"foo\")", "test cases for the YARA database method.\"\"\" import os from", "os from grr_response_server.databases import db from grr_response_server.rdfvalues import objects as", "rdf_objects.BlobID(os.urandom(32)) # Writing duplicated signatures is possible, it should not", "testWriteYaraSignatureReferenceIncorrectUsername(self): blob_id = rdf_objects.BlobID(os.urandom(32)) with self.assertRaises(db.UnknownGRRUserError) as context: self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"quux\")", "duplicated signatures is possible, it should not raise. self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\")", "method.\"\"\" import os from grr_response_server.databases import db from grr_response_server.rdfvalues import", "not raise. self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") self.db.WriteYaraSignatureReference(blob_id=blob_id, username=\"foo\") def testVerifyYaraSignatureReferenceSimple(self): self.db.WriteGRRUser(\"foo\") blob_id", "#!/usr/bin/env python # -*- encoding: utf-8 -*- \"\"\"A module with" ]
[ "likelihood if inducing_points.ndimension() == 1: inducing_points = inducing_points.unsqueeze(-1) self.register_parameter(name=\"inducing_points\", parameter=torch.nn.Parameter(inducing_points))", "for fast variances return exact_prediction_strategies.SGPRPredictionStrategy( train_inputs, train_prior_dist, train_labels, likelihood )", "equal x2 in training mode\") zero_mean = torch.zeros_like(x1.select(-1, 0)) new_added_loss_term", "train_inputs, train_prior_dist, train_labels, likelihood): # Allow for fast variances return", "kernel_mat = self._cached_kernel_mat cp = self.__class__( base_kernel=copy.deepcopy(self.base_kernel), inducing_points=copy.deepcopy(self.inducing_points), likelihood=self.likelihood, active_dims=self.active_dims,", "def __deepcopy__(self, memo): replace_inv_root = False replace_kernel_mat = False if", "if inducing_points.ndimension() == 1: inducing_points = inducing_points.unsqueeze(-1) self.register_parameter(name=\"inducing_points\", parameter=torch.nn.Parameter(inducing_points)) self.register_added_loss_term(\"inducing_point_loss_term\")", "= False replace_kernel_mat = False if hasattr(self, \"_cached_kernel_inv_root\"): replace_inv_root =", "inducing_points = inducing_points.unsqueeze(-1) self.register_parameter(name=\"inducing_points\", parameter=torch.nn.Parameter(inducing_points)) self.register_added_loss_term(\"inducing_point_loss_term\") def _clear_cache(self): if hasattr(self,", "= base_kernel self.likelihood = likelihood if inducing_points.ndimension() == 1: inducing_points", "def _get_covariance(self, x1, x2): k_ux1 = delazify(self.base_kernel(x1, self.inducing_points)) if torch.equal(x1,", "return covar def num_outputs_per_input(self, x1, x2): return self.base_kernel.num_outputs_per_input(x1, x2) def", "= res return res def _get_covariance(self, x1, x2): k_ux1 =", "Get diagonal of covar covar_diag = delazify(self.base_kernel(inputs, diag=True)) return DiagLazyTensor(covar_diag)", "of covar covar_diag = delazify(self.base_kernel(inputs, diag=True)) return DiagLazyTensor(covar_diag) def forward(self,", "covar), self.likelihood, ) self.update_added_loss_term(\"inducing_point_loss_term\", new_added_loss_term) if diag: return covar.diag() else:", "MultivariateNormal(zero_mean, covar), self.likelihood, ) self.update_added_loss_term(\"inducing_point_loss_term\", new_added_loss_term) if diag: return covar.diag()", "= delazify(self.base_kernel(x1, self.inducing_points)) if torch.equal(x1, x2): covar = LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root)) #", "for predictive posterior if not self.training: correction = (self.base_kernel(x1, x2,", "covar covar_diag = delazify(self.base_kernel(inputs, diag=True)) return DiagLazyTensor(covar_diag) def forward(self, x1,", "# Diagonal correction for predictive posterior if not self.training: correction", "memo): replace_inv_root = False replace_kernel_mat = False if hasattr(self, \"_cached_kernel_inv_root\"):", "= False if hasattr(self, \"_cached_kernel_inv_root\"): replace_inv_root = True kernel_inv_root =", "__init__(self, base_kernel, inducing_points, likelihood, active_dims=None): super(InducingPointKernel, self).__init__(active_dims=active_dims) self.base_kernel = base_kernel", "not self.training: correction = (self.base_kernel(x1, x2, diag=True) - covar.diag()).clamp(0, math.inf)", "self._cached_kernel_inv_root if hasattr(self, \"_cached_kernel_mat\"): replace_kernel_mat = True kernel_mat = self._cached_kernel_mat", "chol = psd_safe_cholesky(self._inducing_mat, upper=True) eye = torch.eye(chol.size(-1), device=chol.device, dtype=chol.dtype) inv_root", "#!/usr/bin/env python3 import copy import math import torch from ..distributions", "MultivariateNormal from ..lazy import DiagLazyTensor, LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor, MatmulLazyTensor, delazify from", "import exact_prediction_strategies from ..utils.cholesky import psd_safe_cholesky from .kernel import Kernel", "return DiagLazyTensor(covar_diag) def forward(self, x1, x2, diag=False, **kwargs): covar =", "return covar.diag() else: return covar def num_outputs_per_input(self, x1, x2): return", "self._cached_kernel_mat else: res = delazify(self.base_kernel(self.inducing_points, self.inducing_points)) if not self.training: self._cached_kernel_mat", "covar_diag = delazify(self.base_kernel(inputs, diag=True)) return DiagLazyTensor(covar_diag) def forward(self, x1, x2,", "parameter=torch.nn.Parameter(inducing_points)) self.register_added_loss_term(\"inducing_point_loss_term\") def _clear_cache(self): if hasattr(self, \"_cached_kernel_mat\"): del self._cached_kernel_mat @property", "Kernel class InducingPointKernel(Kernel): def __init__(self, base_kernel, inducing_points, likelihood, active_dims=None): super(InducingPointKernel,", "_inducing_inv_root(self): if not self.training and hasattr(self, \"_cached_kernel_inv_root\"): return self._cached_kernel_inv_root else:", "False if hasattr(self, \"_cached_kernel_inv_root\"): replace_inv_root = True kernel_inv_root = self._cached_kernel_inv_root", "self._cached_kernel_inv_root = res return res def _get_covariance(self, x1, x2): k_ux1", "MultivariateNormal(zero_mean, self._covar_diag(x1)), MultivariateNormal(zero_mean, covar), self.likelihood, ) self.update_added_loss_term(\"inducing_point_loss_term\", new_added_loss_term) if diag:", "correction = (self.base_kernel(x1, x2, diag=True) - covar.diag()).clamp(0, math.inf) covar =", "res return res @property def _inducing_inv_root(self): if not self.training and", "_covar_diag(self, inputs): if inputs.ndimension() == 1: inputs = inputs.unsqueeze(1) #", "__deepcopy__(self, memo): replace_inv_root = False replace_kernel_mat = False if hasattr(self,", "prediction_strategy(self, train_inputs, train_prior_dist, train_labels, likelihood): # Allow for fast variances", "diagonal of covar covar_diag = delazify(self.base_kernel(inputs, diag=True)) return DiagLazyTensor(covar_diag) def", "upper=True) eye = torch.eye(chol.size(-1), device=chol.device, dtype=chol.dtype) inv_root = torch.triangular_solve(eye, chol)[0]", "hasattr(self, \"_cached_kernel_mat\"): return self._cached_kernel_mat else: res = delazify(self.base_kernel(self.inducing_points, self.inducing_points)) if", "else: k_ux2 = delazify(self.base_kernel(x2, self.inducing_points)) covar = MatmulLazyTensor( k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1,", "replace_inv_root = True kernel_inv_root = self._cached_kernel_inv_root if hasattr(self, \"_cached_kernel_mat\"): replace_kernel_mat", "= res return res @property def _inducing_inv_root(self): if not self.training", "import copy import math import torch from ..distributions import MultivariateNormal", "self.base_kernel = base_kernel self.likelihood = likelihood if inducing_points.ndimension() == 1:", "x2) if self.training: if not torch.equal(x1, x2): raise RuntimeError(\"x1 should", "InducingPointKernelAddedLossTerm( MultivariateNormal(zero_mean, self._covar_diag(x1)), MultivariateNormal(zero_mean, covar), self.likelihood, ) self.update_added_loss_term(\"inducing_point_loss_term\", new_added_loss_term) if", "x2, diag=True) - covar.diag()).clamp(0, math.inf) covar = LowRankRootAddedDiagLazyTensor(covar, DiagLazyTensor(correction)) else:", "False replace_kernel_mat = False if hasattr(self, \"_cached_kernel_inv_root\"): replace_inv_root = True", "else: chol = psd_safe_cholesky(self._inducing_mat, upper=True) eye = torch.eye(chol.size(-1), device=chol.device, dtype=chol.dtype)", "return self._cached_kernel_inv_root else: chol = psd_safe_cholesky(self._inducing_mat, upper=True) eye = torch.eye(chol.size(-1),", "self._get_covariance(x1, x2) if self.training: if not torch.equal(x1, x2): raise RuntimeError(\"x1", "InducingPointKernel(Kernel): def __init__(self, base_kernel, inducing_points, likelihood, active_dims=None): super(InducingPointKernel, self).__init__(active_dims=active_dims) self.base_kernel", "= delazify(self.base_kernel(inputs, diag=True)) return DiagLazyTensor(covar_diag) def forward(self, x1, x2, diag=False,", "from ..models import exact_prediction_strategies from ..utils.cholesky import psd_safe_cholesky from .kernel", "LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor, MatmulLazyTensor, delazify from ..mlls import InducingPointKernelAddedLossTerm from ..models", "not self.training: self._cached_kernel_inv_root = res return res def _get_covariance(self, x1,", "= True kernel_inv_root = self._cached_kernel_inv_root if hasattr(self, \"_cached_kernel_mat\"): replace_kernel_mat =", "posterior if not self.training: correction = (self.base_kernel(x1, x2, diag=True) -", "Allow for fast variances return exact_prediction_strategies.SGPRPredictionStrategy( train_inputs, train_prior_dist, train_labels, likelihood", "= psd_safe_cholesky(self._inducing_mat, upper=True) eye = torch.eye(chol.size(-1), device=chol.device, dtype=chol.dtype) inv_root =", "torch.equal(x1, x2): covar = LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root)) # Diagonal correction for predictive", "self._cached_kernel_mat @property def _inducing_mat(self): if not self.training and hasattr(self, \"_cached_kernel_mat\"):", "x2) def __deepcopy__(self, memo): replace_inv_root = False replace_kernel_mat = False", "hasattr(self, \"_cached_kernel_inv_root\"): return self._cached_kernel_inv_root else: chol = psd_safe_cholesky(self._inducing_mat, upper=True) eye", "def _inducing_inv_root(self): if not self.training and hasattr(self, \"_cached_kernel_inv_root\"): return self._cached_kernel_inv_root", "def _inducing_mat(self): if not self.training and hasattr(self, \"_cached_kernel_mat\"): return self._cached_kernel_mat", "self.inducing_points)) if not self.training: self._cached_kernel_mat = res return res @property", "self.__class__( base_kernel=copy.deepcopy(self.base_kernel), inducing_points=copy.deepcopy(self.inducing_points), likelihood=self.likelihood, active_dims=self.active_dims, ) if replace_inv_root: cp._cached_kernel_inv_root =", "import DiagLazyTensor, LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor, MatmulLazyTensor, delazify from ..mlls import InducingPointKernelAddedLossTerm", "self.update_added_loss_term(\"inducing_point_loss_term\", new_added_loss_term) if diag: return covar.diag() else: return covar def", "k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2) ) return covar def _covar_diag(self, inputs): if", "@property def _inducing_inv_root(self): if not self.training and hasattr(self, \"_cached_kernel_inv_root\"): return", "from ..distributions import MultivariateNormal from ..lazy import DiagLazyTensor, LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor,", "exact_prediction_strategies from ..utils.cholesky import psd_safe_cholesky from .kernel import Kernel class", "x2 in training mode\") zero_mean = torch.zeros_like(x1.select(-1, 0)) new_added_loss_term =", "delazify(self.base_kernel(self.inducing_points, self.inducing_points)) if not self.training: self._cached_kernel_mat = res return res", "_get_covariance(self, x1, x2): k_ux1 = delazify(self.base_kernel(x1, self.inducing_points)) if torch.equal(x1, x2):", "new_added_loss_term = InducingPointKernelAddedLossTerm( MultivariateNormal(zero_mean, self._covar_diag(x1)), MultivariateNormal(zero_mean, covar), self.likelihood, ) self.update_added_loss_term(\"inducing_point_loss_term\",", "inv_root if not self.training: self._cached_kernel_inv_root = res return res def", "torch.equal(x1, x2): raise RuntimeError(\"x1 should equal x2 in training mode\")", "cp._cached_kernel_inv_root = kernel_inv_root if replace_kernel_mat: cp._cached_kernel_mat = kernel_mat return cp", "kernel_inv_root if replace_kernel_mat: cp._cached_kernel_mat = kernel_mat return cp def prediction_strategy(self,", "base_kernel=copy.deepcopy(self.base_kernel), inducing_points=copy.deepcopy(self.inducing_points), likelihood=self.likelihood, active_dims=self.active_dims, ) if replace_inv_root: cp._cached_kernel_inv_root = kernel_inv_root", "cp._cached_kernel_mat = kernel_mat return cp def prediction_strategy(self, train_inputs, train_prior_dist, train_labels,", "delazify(self.base_kernel(inputs, diag=True)) return DiagLazyTensor(covar_diag) def forward(self, x1, x2, diag=False, **kwargs):", "inducing_points, likelihood, active_dims=None): super(InducingPointKernel, self).__init__(active_dims=active_dims) self.base_kernel = base_kernel self.likelihood =", "= kernel_mat return cp def prediction_strategy(self, train_inputs, train_prior_dist, train_labels, likelihood):", "**kwargs): covar = self._get_covariance(x1, x2) if self.training: if not torch.equal(x1,", "likelihood): # Allow for fast variances return exact_prediction_strategies.SGPRPredictionStrategy( train_inputs, train_prior_dist,", "def prediction_strategy(self, train_inputs, train_prior_dist, train_labels, likelihood): # Allow for fast", "self.likelihood, ) self.update_added_loss_term(\"inducing_point_loss_term\", new_added_loss_term) if diag: return covar.diag() else: return", "hasattr(self, \"_cached_kernel_mat\"): replace_kernel_mat = True kernel_mat = self._cached_kernel_mat cp =", "MatmulLazyTensor, delazify from ..mlls import InducingPointKernelAddedLossTerm from ..models import exact_prediction_strategies", "k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2) ) return covar def _covar_diag(self, inputs): if inputs.ndimension()", "train_labels, likelihood): # Allow for fast variances return exact_prediction_strategies.SGPRPredictionStrategy( train_inputs,", "== 1: inducing_points = inducing_points.unsqueeze(-1) self.register_parameter(name=\"inducing_points\", parameter=torch.nn.Parameter(inducing_points)) self.register_added_loss_term(\"inducing_point_loss_term\") def _clear_cache(self):", "res = inv_root if not self.training: self._cached_kernel_inv_root = res return", "if self.training: if not torch.equal(x1, x2): raise RuntimeError(\"x1 should equal", "torch.zeros_like(x1.select(-1, 0)) new_added_loss_term = InducingPointKernelAddedLossTerm( MultivariateNormal(zero_mean, self._covar_diag(x1)), MultivariateNormal(zero_mean, covar), self.likelihood,", "1: inputs = inputs.unsqueeze(1) # Get diagonal of covar covar_diag", "raise RuntimeError(\"x1 should equal x2 in training mode\") zero_mean =", "not self.training: self._cached_kernel_mat = res return res @property def _inducing_inv_root(self):", "if diag: return covar.diag() else: return covar def num_outputs_per_input(self, x1,", "= delazify(self.base_kernel(x2, self.inducing_points)) covar = MatmulLazyTensor( k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2) )", "x1, x2): return self.base_kernel.num_outputs_per_input(x1, x2) def __deepcopy__(self, memo): replace_inv_root =", "== 1: inputs = inputs.unsqueeze(1) # Get diagonal of covar", "inducing_points=copy.deepcopy(self.inducing_points), likelihood=self.likelihood, active_dims=self.active_dims, ) if replace_inv_root: cp._cached_kernel_inv_root = kernel_inv_root if", "_inducing_mat(self): if not self.training and hasattr(self, \"_cached_kernel_mat\"): return self._cached_kernel_mat else:", "DiagLazyTensor(correction)) else: k_ux2 = delazify(self.base_kernel(x2, self.inducing_points)) covar = MatmulLazyTensor( k_ux1.matmul(self._inducing_inv_root),", "\"_cached_kernel_inv_root\"): return self._cached_kernel_inv_root else: chol = psd_safe_cholesky(self._inducing_mat, upper=True) eye =", "base_kernel, inducing_points, likelihood, active_dims=None): super(InducingPointKernel, self).__init__(active_dims=active_dims) self.base_kernel = base_kernel self.likelihood", "math.inf) covar = LowRankRootAddedDiagLazyTensor(covar, DiagLazyTensor(correction)) else: k_ux2 = delazify(self.base_kernel(x2, self.inducing_points))", "psd_safe_cholesky from .kernel import Kernel class InducingPointKernel(Kernel): def __init__(self, base_kernel,", "inducing_points.unsqueeze(-1) self.register_parameter(name=\"inducing_points\", parameter=torch.nn.Parameter(inducing_points)) self.register_added_loss_term(\"inducing_point_loss_term\") def _clear_cache(self): if hasattr(self, \"_cached_kernel_mat\"): del", "torch.eye(chol.size(-1), device=chol.device, dtype=chol.dtype) inv_root = torch.triangular_solve(eye, chol)[0] res = inv_root", "and hasattr(self, \"_cached_kernel_mat\"): return self._cached_kernel_mat else: res = delazify(self.base_kernel(self.inducing_points, self.inducing_points))", "self.register_parameter(name=\"inducing_points\", parameter=torch.nn.Parameter(inducing_points)) self.register_added_loss_term(\"inducing_point_loss_term\") def _clear_cache(self): if hasattr(self, \"_cached_kernel_mat\"): del self._cached_kernel_mat", "res return res def _get_covariance(self, x1, x2): k_ux1 = delazify(self.base_kernel(x1,", "self._cached_kernel_mat cp = self.__class__( base_kernel=copy.deepcopy(self.base_kernel), inducing_points=copy.deepcopy(self.inducing_points), likelihood=self.likelihood, active_dims=self.active_dims, ) if", "= self._cached_kernel_inv_root if hasattr(self, \"_cached_kernel_mat\"): replace_kernel_mat = True kernel_mat =", "self._cached_kernel_inv_root else: chol = psd_safe_cholesky(self._inducing_mat, upper=True) eye = torch.eye(chol.size(-1), device=chol.device,", "hasattr(self, \"_cached_kernel_mat\"): del self._cached_kernel_mat @property def _inducing_mat(self): if not self.training", "self._cached_kernel_mat = res return res @property def _inducing_inv_root(self): if not", "cp = self.__class__( base_kernel=copy.deepcopy(self.base_kernel), inducing_points=copy.deepcopy(self.inducing_points), likelihood=self.likelihood, active_dims=self.active_dims, ) if replace_inv_root:", "k_ux1 = delazify(self.base_kernel(x1, self.inducing_points)) if torch.equal(x1, x2): covar = LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root))", "in training mode\") zero_mean = torch.zeros_like(x1.select(-1, 0)) new_added_loss_term = InducingPointKernelAddedLossTerm(", "self.likelihood = likelihood if inducing_points.ndimension() == 1: inducing_points = inducing_points.unsqueeze(-1)", "torch from ..distributions import MultivariateNormal from ..lazy import DiagLazyTensor, LowRankRootAddedDiagLazyTensor,", "True kernel_mat = self._cached_kernel_mat cp = self.__class__( base_kernel=copy.deepcopy(self.base_kernel), inducing_points=copy.deepcopy(self.inducing_points), likelihood=self.likelihood,", "likelihood=self.likelihood, active_dims=self.active_dims, ) if replace_inv_root: cp._cached_kernel_inv_root = kernel_inv_root if replace_kernel_mat:", "dtype=chol.dtype) inv_root = torch.triangular_solve(eye, chol)[0] res = inv_root if not", "from .kernel import Kernel class InducingPointKernel(Kernel): def __init__(self, base_kernel, inducing_points,", "= inputs.unsqueeze(1) # Get diagonal of covar covar_diag = delazify(self.base_kernel(inputs,", "base_kernel self.likelihood = likelihood if inducing_points.ndimension() == 1: inducing_points =", "import Kernel class InducingPointKernel(Kernel): def __init__(self, base_kernel, inducing_points, likelihood, active_dims=None):", "forward(self, x1, x2, diag=False, **kwargs): covar = self._get_covariance(x1, x2) if", "= delazify(self.base_kernel(self.inducing_points, self.inducing_points)) if not self.training: self._cached_kernel_mat = res return", "not torch.equal(x1, x2): raise RuntimeError(\"x1 should equal x2 in training", "if hasattr(self, \"_cached_kernel_mat\"): del self._cached_kernel_mat @property def _inducing_mat(self): if not", "eye = torch.eye(chol.size(-1), device=chol.device, dtype=chol.dtype) inv_root = torch.triangular_solve(eye, chol)[0] res", "RuntimeError(\"x1 should equal x2 in training mode\") zero_mean = torch.zeros_like(x1.select(-1,", "covar.diag()).clamp(0, math.inf) covar = LowRankRootAddedDiagLazyTensor(covar, DiagLazyTensor(correction)) else: k_ux2 = delazify(self.base_kernel(x2,", "self.training and hasattr(self, \"_cached_kernel_mat\"): return self._cached_kernel_mat else: res = delazify(self.base_kernel(self.inducing_points,", "from ..mlls import InducingPointKernelAddedLossTerm from ..models import exact_prediction_strategies from ..utils.cholesky", "0)) new_added_loss_term = InducingPointKernelAddedLossTerm( MultivariateNormal(zero_mean, self._covar_diag(x1)), MultivariateNormal(zero_mean, covar), self.likelihood, )", "diag: return covar.diag() else: return covar def num_outputs_per_input(self, x1, x2):", ") return covar def _covar_diag(self, inputs): if inputs.ndimension() == 1:", "DiagLazyTensor, LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor, MatmulLazyTensor, delazify from ..mlls import InducingPointKernelAddedLossTerm from", "new_added_loss_term) if diag: return covar.diag() else: return covar def num_outputs_per_input(self,", "res def _get_covariance(self, x1, x2): k_ux1 = delazify(self.base_kernel(x1, self.inducing_points)) if", "import MultivariateNormal from ..lazy import DiagLazyTensor, LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor, MatmulLazyTensor, delazify", "= torch.eye(chol.size(-1), device=chol.device, dtype=chol.dtype) inv_root = torch.triangular_solve(eye, chol)[0] res =", "import math import torch from ..distributions import MultivariateNormal from ..lazy", "python3 import copy import math import torch from ..distributions import", "inputs): if inputs.ndimension() == 1: inputs = inputs.unsqueeze(1) # Get", "if not self.training: self._cached_kernel_mat = res return res @property def", "# Get diagonal of covar covar_diag = delazify(self.base_kernel(inputs, diag=True)) return", "inv_root = torch.triangular_solve(eye, chol)[0] res = inv_root if not self.training:", "= self._cached_kernel_mat cp = self.__class__( base_kernel=copy.deepcopy(self.base_kernel), inducing_points=copy.deepcopy(self.inducing_points), likelihood=self.likelihood, active_dims=self.active_dims, )", "x2): k_ux1 = delazify(self.base_kernel(x1, self.inducing_points)) if torch.equal(x1, x2): covar =", "likelihood, active_dims=None): super(InducingPointKernel, self).__init__(active_dims=active_dims) self.base_kernel = base_kernel self.likelihood = likelihood", "if inputs.ndimension() == 1: inputs = inputs.unsqueeze(1) # Get diagonal", "(self.base_kernel(x1, x2, diag=True) - covar.diag()).clamp(0, math.inf) covar = LowRankRootAddedDiagLazyTensor(covar, DiagLazyTensor(correction))", "delazify(self.base_kernel(x1, self.inducing_points)) if torch.equal(x1, x2): covar = LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root)) # Diagonal", "@property def _inducing_mat(self): if not self.training and hasattr(self, \"_cached_kernel_mat\"): return", "chol)[0] res = inv_root if not self.training: self._cached_kernel_inv_root = res", ".kernel import Kernel class InducingPointKernel(Kernel): def __init__(self, base_kernel, inducing_points, likelihood,", "if not self.training and hasattr(self, \"_cached_kernel_inv_root\"): return self._cached_kernel_inv_root else: chol", "k_ux2 = delazify(self.base_kernel(x2, self.inducing_points)) covar = MatmulLazyTensor( k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2)", "if replace_kernel_mat: cp._cached_kernel_mat = kernel_mat return cp def prediction_strategy(self, train_inputs,", "math import torch from ..distributions import MultivariateNormal from ..lazy import", "import InducingPointKernelAddedLossTerm from ..models import exact_prediction_strategies from ..utils.cholesky import psd_safe_cholesky", "train_prior_dist, train_labels, likelihood): # Allow for fast variances return exact_prediction_strategies.SGPRPredictionStrategy(", "def _covar_diag(self, inputs): if inputs.ndimension() == 1: inputs = inputs.unsqueeze(1)", "inputs.unsqueeze(1) # Get diagonal of covar covar_diag = delazify(self.base_kernel(inputs, diag=True))", "mode\") zero_mean = torch.zeros_like(x1.select(-1, 0)) new_added_loss_term = InducingPointKernelAddedLossTerm( MultivariateNormal(zero_mean, self._covar_diag(x1)),", "from ..utils.cholesky import psd_safe_cholesky from .kernel import Kernel class InducingPointKernel(Kernel):", "_clear_cache(self): if hasattr(self, \"_cached_kernel_mat\"): del self._cached_kernel_mat @property def _inducing_mat(self): if", "- covar.diag()).clamp(0, math.inf) covar = LowRankRootAddedDiagLazyTensor(covar, DiagLazyTensor(correction)) else: k_ux2 =", "self._covar_diag(x1)), MultivariateNormal(zero_mean, covar), self.likelihood, ) self.update_added_loss_term(\"inducing_point_loss_term\", new_added_loss_term) if diag: return", "return res def _get_covariance(self, x1, x2): k_ux1 = delazify(self.base_kernel(x1, self.inducing_points))", "= (self.base_kernel(x1, x2, diag=True) - covar.diag()).clamp(0, math.inf) covar = LowRankRootAddedDiagLazyTensor(covar,", "def __init__(self, base_kernel, inducing_points, likelihood, active_dims=None): super(InducingPointKernel, self).__init__(active_dims=active_dims) self.base_kernel =", "= LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root)) # Diagonal correction for predictive posterior if not", "covar = LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root)) # Diagonal correction for predictive posterior if", "if replace_inv_root: cp._cached_kernel_inv_root = kernel_inv_root if replace_kernel_mat: cp._cached_kernel_mat = kernel_mat", "inputs = inputs.unsqueeze(1) # Get diagonal of covar covar_diag =", "return self.base_kernel.num_outputs_per_input(x1, x2) def __deepcopy__(self, memo): replace_inv_root = False replace_kernel_mat", "self.training: self._cached_kernel_mat = res return res @property def _inducing_inv_root(self): if", "replace_kernel_mat: cp._cached_kernel_mat = kernel_mat return cp def prediction_strategy(self, train_inputs, train_prior_dist,", "and hasattr(self, \"_cached_kernel_inv_root\"): return self._cached_kernel_inv_root else: chol = psd_safe_cholesky(self._inducing_mat, upper=True)", "..lazy import DiagLazyTensor, LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor, MatmulLazyTensor, delazify from ..mlls import", "covar = MatmulLazyTensor( k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2) ) return covar def", "\"_cached_kernel_mat\"): replace_kernel_mat = True kernel_mat = self._cached_kernel_mat cp = self.__class__(", "replace_kernel_mat = True kernel_mat = self._cached_kernel_mat cp = self.__class__( base_kernel=copy.deepcopy(self.base_kernel),", "zero_mean = torch.zeros_like(x1.select(-1, 0)) new_added_loss_term = InducingPointKernelAddedLossTerm( MultivariateNormal(zero_mean, self._covar_diag(x1)), MultivariateNormal(zero_mean,", "-2) ) return covar def _covar_diag(self, inputs): if inputs.ndimension() ==", "covar def num_outputs_per_input(self, x1, x2): return self.base_kernel.num_outputs_per_input(x1, x2) def __deepcopy__(self,", "return res @property def _inducing_inv_root(self): if not self.training and hasattr(self,", "correction for predictive posterior if not self.training: correction = (self.base_kernel(x1,", "else: return covar def num_outputs_per_input(self, x1, x2): return self.base_kernel.num_outputs_per_input(x1, x2)", "delazify from ..mlls import InducingPointKernelAddedLossTerm from ..models import exact_prediction_strategies from", "return self._cached_kernel_mat else: res = delazify(self.base_kernel(self.inducing_points, self.inducing_points)) if not self.training:", "..models import exact_prediction_strategies from ..utils.cholesky import psd_safe_cholesky from .kernel import", "if not self.training: correction = (self.base_kernel(x1, x2, diag=True) - covar.diag()).clamp(0,", "inducing_points.ndimension() == 1: inducing_points = inducing_points.unsqueeze(-1) self.register_parameter(name=\"inducing_points\", parameter=torch.nn.Parameter(inducing_points)) self.register_added_loss_term(\"inducing_point_loss_term\") def", "kernel_mat return cp def prediction_strategy(self, train_inputs, train_prior_dist, train_labels, likelihood): #", "Diagonal correction for predictive posterior if not self.training: correction =", ") self.update_added_loss_term(\"inducing_point_loss_term\", new_added_loss_term) if diag: return covar.diag() else: return covar", "# Allow for fast variances return exact_prediction_strategies.SGPRPredictionStrategy( train_inputs, train_prior_dist, train_labels,", "DiagLazyTensor(covar_diag) def forward(self, x1, x2, diag=False, **kwargs): covar = self._get_covariance(x1,", "True kernel_inv_root = self._cached_kernel_inv_root if hasattr(self, \"_cached_kernel_mat\"): replace_kernel_mat = True", "del self._cached_kernel_mat @property def _inducing_mat(self): if not self.training and hasattr(self,", "torch.triangular_solve(eye, chol)[0] res = inv_root if not self.training: self._cached_kernel_inv_root =", "copy import math import torch from ..distributions import MultivariateNormal from", "return covar def _covar_diag(self, inputs): if inputs.ndimension() == 1: inputs", "self).__init__(active_dims=active_dims) self.base_kernel = base_kernel self.likelihood = likelihood if inducing_points.ndimension() ==", "LowRankRootLazyTensor, MatmulLazyTensor, delazify from ..mlls import InducingPointKernelAddedLossTerm from ..models import", "diag=True) - covar.diag()).clamp(0, math.inf) covar = LowRankRootAddedDiagLazyTensor(covar, DiagLazyTensor(correction)) else: k_ux2", "LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root)) # Diagonal correction for predictive posterior if not self.training:", "MatmulLazyTensor( k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2) ) return covar def _covar_diag(self, inputs):", "if not self.training and hasattr(self, \"_cached_kernel_mat\"): return self._cached_kernel_mat else: res", "= likelihood if inducing_points.ndimension() == 1: inducing_points = inducing_points.unsqueeze(-1) self.register_parameter(name=\"inducing_points\",", "= inv_root if not self.training: self._cached_kernel_inv_root = res return res", "import psd_safe_cholesky from .kernel import Kernel class InducingPointKernel(Kernel): def __init__(self,", "..utils.cholesky import psd_safe_cholesky from .kernel import Kernel class InducingPointKernel(Kernel): def", "if hasattr(self, \"_cached_kernel_mat\"): replace_kernel_mat = True kernel_mat = self._cached_kernel_mat cp", "hasattr(self, \"_cached_kernel_inv_root\"): replace_inv_root = True kernel_inv_root = self._cached_kernel_inv_root if hasattr(self,", "x2): covar = LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root)) # Diagonal correction for predictive posterior", "not self.training and hasattr(self, \"_cached_kernel_inv_root\"): return self._cached_kernel_inv_root else: chol =", "self.training and hasattr(self, \"_cached_kernel_inv_root\"): return self._cached_kernel_inv_root else: chol = psd_safe_cholesky(self._inducing_mat,", "replace_inv_root = False replace_kernel_mat = False if hasattr(self, \"_cached_kernel_inv_root\"): replace_inv_root", "..mlls import InducingPointKernelAddedLossTerm from ..models import exact_prediction_strategies from ..utils.cholesky import", "x2): raise RuntimeError(\"x1 should equal x2 in training mode\") zero_mean", "covar.diag() else: return covar def num_outputs_per_input(self, x1, x2): return self.base_kernel.num_outputs_per_input(x1,", "= inducing_points.unsqueeze(-1) self.register_parameter(name=\"inducing_points\", parameter=torch.nn.Parameter(inducing_points)) self.register_added_loss_term(\"inducing_point_loss_term\") def _clear_cache(self): if hasattr(self, \"_cached_kernel_mat\"):", "self.training: self._cached_kernel_inv_root = res return res def _get_covariance(self, x1, x2):", "from ..lazy import DiagLazyTensor, LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor, MatmulLazyTensor, delazify from ..mlls", "delazify(self.base_kernel(x2, self.inducing_points)) covar = MatmulLazyTensor( k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2) ) return", "1: inducing_points = inducing_points.unsqueeze(-1) self.register_parameter(name=\"inducing_points\", parameter=torch.nn.Parameter(inducing_points)) self.register_added_loss_term(\"inducing_point_loss_term\") def _clear_cache(self): if", "LowRankRootAddedDiagLazyTensor(covar, DiagLazyTensor(correction)) else: k_ux2 = delazify(self.base_kernel(x2, self.inducing_points)) covar = MatmulLazyTensor(", "res = delazify(self.base_kernel(self.inducing_points, self.inducing_points)) if not self.training: self._cached_kernel_mat = res", "x1, x2, diag=False, **kwargs): covar = self._get_covariance(x1, x2) if self.training:", "active_dims=None): super(InducingPointKernel, self).__init__(active_dims=active_dims) self.base_kernel = base_kernel self.likelihood = likelihood if", "\"_cached_kernel_inv_root\"): replace_inv_root = True kernel_inv_root = self._cached_kernel_inv_root if hasattr(self, \"_cached_kernel_mat\"):", "res @property def _inducing_inv_root(self): if not self.training and hasattr(self, \"_cached_kernel_inv_root\"):", "num_outputs_per_input(self, x1, x2): return self.base_kernel.num_outputs_per_input(x1, x2) def __deepcopy__(self, memo): replace_inv_root", ") if replace_inv_root: cp._cached_kernel_inv_root = kernel_inv_root if replace_kernel_mat: cp._cached_kernel_mat =", "= self.__class__( base_kernel=copy.deepcopy(self.base_kernel), inducing_points=copy.deepcopy(self.inducing_points), likelihood=self.likelihood, active_dims=self.active_dims, ) if replace_inv_root: cp._cached_kernel_inv_root", "replace_inv_root: cp._cached_kernel_inv_root = kernel_inv_root if replace_kernel_mat: cp._cached_kernel_mat = kernel_mat return", "import torch from ..distributions import MultivariateNormal from ..lazy import DiagLazyTensor,", "x2): return self.base_kernel.num_outputs_per_input(x1, x2) def __deepcopy__(self, memo): replace_inv_root = False", "= InducingPointKernelAddedLossTerm( MultivariateNormal(zero_mean, self._covar_diag(x1)), MultivariateNormal(zero_mean, covar), self.likelihood, ) self.update_added_loss_term(\"inducing_point_loss_term\", new_added_loss_term)", "= MatmulLazyTensor( k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2) ) return covar def _covar_diag(self,", "def forward(self, x1, x2, diag=False, **kwargs): covar = self._get_covariance(x1, x2)", "def num_outputs_per_input(self, x1, x2): return self.base_kernel.num_outputs_per_input(x1, x2) def __deepcopy__(self, memo):", "class InducingPointKernel(Kernel): def __init__(self, base_kernel, inducing_points, likelihood, active_dims=None): super(InducingPointKernel, self).__init__(active_dims=active_dims)", "kernel_inv_root = self._cached_kernel_inv_root if hasattr(self, \"_cached_kernel_mat\"): replace_kernel_mat = True kernel_mat", "= True kernel_mat = self._cached_kernel_mat cp = self.__class__( base_kernel=copy.deepcopy(self.base_kernel), inducing_points=copy.deepcopy(self.inducing_points),", "cp def prediction_strategy(self, train_inputs, train_prior_dist, train_labels, likelihood): # Allow for", "super(InducingPointKernel, self).__init__(active_dims=active_dims) self.base_kernel = base_kernel self.likelihood = likelihood if inducing_points.ndimension()", "training mode\") zero_mean = torch.zeros_like(x1.select(-1, 0)) new_added_loss_term = InducingPointKernelAddedLossTerm( MultivariateNormal(zero_mean,", "= torch.zeros_like(x1.select(-1, 0)) new_added_loss_term = InducingPointKernelAddedLossTerm( MultivariateNormal(zero_mean, self._covar_diag(x1)), MultivariateNormal(zero_mean, covar),", "InducingPointKernelAddedLossTerm from ..models import exact_prediction_strategies from ..utils.cholesky import psd_safe_cholesky from", "self.training: if not torch.equal(x1, x2): raise RuntimeError(\"x1 should equal x2", "if not torch.equal(x1, x2): raise RuntimeError(\"x1 should equal x2 in", "\"_cached_kernel_mat\"): del self._cached_kernel_mat @property def _inducing_mat(self): if not self.training and", "replace_kernel_mat = False if hasattr(self, \"_cached_kernel_inv_root\"): replace_inv_root = True kernel_inv_root", "if torch.equal(x1, x2): covar = LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root)) # Diagonal correction for", "return cp def prediction_strategy(self, train_inputs, train_prior_dist, train_labels, likelihood): # Allow", "inputs.ndimension() == 1: inputs = inputs.unsqueeze(1) # Get diagonal of", "self.base_kernel.num_outputs_per_input(x1, x2) def __deepcopy__(self, memo): replace_inv_root = False replace_kernel_mat =", "self.training: correction = (self.base_kernel(x1, x2, diag=True) - covar.diag()).clamp(0, math.inf) covar", "= LowRankRootAddedDiagLazyTensor(covar, DiagLazyTensor(correction)) else: k_ux2 = delazify(self.base_kernel(x2, self.inducing_points)) covar =", "x1, x2): k_ux1 = delazify(self.base_kernel(x1, self.inducing_points)) if torch.equal(x1, x2): covar", "self.inducing_points)) if torch.equal(x1, x2): covar = LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root)) # Diagonal correction", "diag=True)) return DiagLazyTensor(covar_diag) def forward(self, x1, x2, diag=False, **kwargs): covar", "active_dims=self.active_dims, ) if replace_inv_root: cp._cached_kernel_inv_root = kernel_inv_root if replace_kernel_mat: cp._cached_kernel_mat", "psd_safe_cholesky(self._inducing_mat, upper=True) eye = torch.eye(chol.size(-1), device=chol.device, dtype=chol.dtype) inv_root = torch.triangular_solve(eye,", "else: res = delazify(self.base_kernel(self.inducing_points, self.inducing_points)) if not self.training: self._cached_kernel_mat =", "def _clear_cache(self): if hasattr(self, \"_cached_kernel_mat\"): del self._cached_kernel_mat @property def _inducing_mat(self):", "diag=False, **kwargs): covar = self._get_covariance(x1, x2) if self.training: if not", "if hasattr(self, \"_cached_kernel_inv_root\"): replace_inv_root = True kernel_inv_root = self._cached_kernel_inv_root if", "self.inducing_points)) covar = MatmulLazyTensor( k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2) ) return covar", "self.register_added_loss_term(\"inducing_point_loss_term\") def _clear_cache(self): if hasattr(self, \"_cached_kernel_mat\"): del self._cached_kernel_mat @property def", "\"_cached_kernel_mat\"): return self._cached_kernel_mat else: res = delazify(self.base_kernel(self.inducing_points, self.inducing_points)) if not", "= kernel_inv_root if replace_kernel_mat: cp._cached_kernel_mat = kernel_mat return cp def", "predictive posterior if not self.training: correction = (self.base_kernel(x1, x2, diag=True)", "should equal x2 in training mode\") zero_mean = torch.zeros_like(x1.select(-1, 0))", "covar def _covar_diag(self, inputs): if inputs.ndimension() == 1: inputs =", "..distributions import MultivariateNormal from ..lazy import DiagLazyTensor, LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor, MatmulLazyTensor,", "device=chol.device, dtype=chol.dtype) inv_root = torch.triangular_solve(eye, chol)[0] res = inv_root if", "= self._get_covariance(x1, x2) if self.training: if not torch.equal(x1, x2): raise", "x2, diag=False, **kwargs): covar = self._get_covariance(x1, x2) if self.training: if", "covar = self._get_covariance(x1, x2) if self.training: if not torch.equal(x1, x2):", "not self.training and hasattr(self, \"_cached_kernel_mat\"): return self._cached_kernel_mat else: res =", "if not self.training: self._cached_kernel_inv_root = res return res def _get_covariance(self,", "= torch.triangular_solve(eye, chol)[0] res = inv_root if not self.training: self._cached_kernel_inv_root", "covar = LowRankRootAddedDiagLazyTensor(covar, DiagLazyTensor(correction)) else: k_ux2 = delazify(self.base_kernel(x2, self.inducing_points)) covar" ]
[ "import os from flask import Flask from flask_cors import CORS", "ma from app.views import albums, artists, hello, tracks def create_app(config,", "db from app.extensions.schema import ma from app.views import albums, artists,", "\"*\"}}) app.config.from_object(config) # app.url_map.strict_slashes = False with app.app_context(): api.init_app(app) db.init_app(app)", "= False with app.app_context(): api.init_app(app) db.init_app(app) db.create_all() ma.init_app(app) api.register_blueprint(hello.blp) api.register_blueprint(artists.blp)", "CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}}) app.config.from_object(config) # app.url_map.strict_slashes = False with", "import CORS from app.extensions import api from app.extensions.database import db", "logging.basicConfig(level=logging.INFO) app = Flask(__name__, **kwargs) CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}}) app.config.from_object(config)", "app.extensions.schema import ma from app.views import albums, artists, hello, tracks", "app.views import albums, artists, hello, tracks def create_app(config, **kwargs): logging.basicConfig(level=logging.INFO)", "with app.app_context(): api.init_app(app) db.init_app(app) db.create_all() ma.init_app(app) api.register_blueprint(hello.blp) api.register_blueprint(artists.blp) api.register_blueprint(albums.blp) api.register_blueprint(tracks.blp)", "tracks def create_app(config, **kwargs): logging.basicConfig(level=logging.INFO) app = Flask(__name__, **kwargs) CORS(app,", "app.extensions.database import db from app.extensions.schema import ma from app.views import", "def create_app(config, **kwargs): logging.basicConfig(level=logging.INFO) app = Flask(__name__, **kwargs) CORS(app, resources={r\"/api/*\":", "create_app(config, **kwargs): logging.basicConfig(level=logging.INFO) app = Flask(__name__, **kwargs) CORS(app, resources={r\"/api/*\": {\"origins\":", "**kwargs): logging.basicConfig(level=logging.INFO) app = Flask(__name__, **kwargs) CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})", "CORS from app.extensions import api from app.extensions.database import db from", "api.init_app(app) db.init_app(app) db.create_all() ma.init_app(app) api.register_blueprint(hello.blp) api.register_blueprint(artists.blp) api.register_blueprint(albums.blp) api.register_blueprint(tracks.blp) try: os.makedirs(app.instance_path)", "import ma from app.views import albums, artists, hello, tracks def", "# app.url_map.strict_slashes = False with app.app_context(): api.init_app(app) db.init_app(app) db.create_all() ma.init_app(app)", "from app.extensions.schema import ma from app.views import albums, artists, hello,", "api from app.extensions.database import db from app.extensions.schema import ma from", "import api from app.extensions.database import db from app.extensions.schema import ma", "import Flask from flask_cors import CORS from app.extensions import api", "from flask import Flask from flask_cors import CORS from app.extensions", "from app.extensions.database import db from app.extensions.schema import ma from app.views", "import albums, artists, hello, tracks def create_app(config, **kwargs): logging.basicConfig(level=logging.INFO) app", "app = Flask(__name__, **kwargs) CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}}) app.config.from_object(config) #", "albums, artists, hello, tracks def create_app(config, **kwargs): logging.basicConfig(level=logging.INFO) app =", "**kwargs) CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}}) app.config.from_object(config) # app.url_map.strict_slashes = False", "False with app.app_context(): api.init_app(app) db.init_app(app) db.create_all() ma.init_app(app) api.register_blueprint(hello.blp) api.register_blueprint(artists.blp) api.register_blueprint(albums.blp)", "logging import os from flask import Flask from flask_cors import", "from app.extensions import api from app.extensions.database import db from app.extensions.schema", "db.create_all() ma.init_app(app) api.register_blueprint(hello.blp) api.register_blueprint(artists.blp) api.register_blueprint(albums.blp) api.register_blueprint(tracks.blp) try: os.makedirs(app.instance_path) except OSError:", "flask import Flask from flask_cors import CORS from app.extensions import", "flask_cors import CORS from app.extensions import api from app.extensions.database import", "hello, tracks def create_app(config, **kwargs): logging.basicConfig(level=logging.INFO) app = Flask(__name__, **kwargs)", "api.register_blueprint(hello.blp) api.register_blueprint(artists.blp) api.register_blueprint(albums.blp) api.register_blueprint(tracks.blp) try: os.makedirs(app.instance_path) except OSError: pass return", "= Flask(__name__, **kwargs) CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}}) app.config.from_object(config) # app.url_map.strict_slashes", "import logging import os from flask import Flask from flask_cors", "app.config.from_object(config) # app.url_map.strict_slashes = False with app.app_context(): api.init_app(app) db.init_app(app) db.create_all()", "app.extensions import api from app.extensions.database import db from app.extensions.schema import", "app.app_context(): api.init_app(app) db.init_app(app) db.create_all() ma.init_app(app) api.register_blueprint(hello.blp) api.register_blueprint(artists.blp) api.register_blueprint(albums.blp) api.register_blueprint(tracks.blp) try:", "api.register_blueprint(artists.blp) api.register_blueprint(albums.blp) api.register_blueprint(tracks.blp) try: os.makedirs(app.instance_path) except OSError: pass return app", "{\"origins\": \"*\"}}) app.config.from_object(config) # app.url_map.strict_slashes = False with app.app_context(): api.init_app(app)", "Flask from flask_cors import CORS from app.extensions import api from", "artists, hello, tracks def create_app(config, **kwargs): logging.basicConfig(level=logging.INFO) app = Flask(__name__,", "from app.views import albums, artists, hello, tracks def create_app(config, **kwargs):", "os from flask import Flask from flask_cors import CORS from", "ma.init_app(app) api.register_blueprint(hello.blp) api.register_blueprint(artists.blp) api.register_blueprint(albums.blp) api.register_blueprint(tracks.blp) try: os.makedirs(app.instance_path) except OSError: pass", "Flask(__name__, **kwargs) CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}}) app.config.from_object(config) # app.url_map.strict_slashes =", "app.url_map.strict_slashes = False with app.app_context(): api.init_app(app) db.init_app(app) db.create_all() ma.init_app(app) api.register_blueprint(hello.blp)", "resources={r\"/api/*\": {\"origins\": \"*\"}}) app.config.from_object(config) # app.url_map.strict_slashes = False with app.app_context():", "from flask_cors import CORS from app.extensions import api from app.extensions.database", "db.init_app(app) db.create_all() ma.init_app(app) api.register_blueprint(hello.blp) api.register_blueprint(artists.blp) api.register_blueprint(albums.blp) api.register_blueprint(tracks.blp) try: os.makedirs(app.instance_path) except", "import db from app.extensions.schema import ma from app.views import albums," ]
[ "= \"Transaction added successfully\" elif to_loc in [None, '', '", "to_loc)) # IMPORTANT to maintain consistency cursor.execute(\"\"\" UPDATE products SET", "unallocated_quantity) VALUES (%s, %s, %s)\", (prod_name, quantity, quantity)) mysql.connection.commit() except(MySQLdb.Error(not", "machine\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}, #", "l_id in [x[0] for x in locations]: cursor.execute(\"SELECT loc_name FROM", "@app.route('/product.html', methods=['POST', 'GET']) def product(): init_database() msg=None cursor = mysql.connection.cursor()", "= request.form['from_loc'] to_loc = request.form['to_loc'] quantity = request.form['quantity'] # if", "WHERE prod_id = %s\", (prod_id,)) old_prod_quantity = cursor.fetchone()[0] cursor.execute(\"\"\" UPDATE", "in products]: cursor.execute(\"SELECT prod_name FROM products WHERE prod_id = %s\",", "== 'POST': prod_id = request.form['product_id'] prod_name = request.form['prod_name'] prod_quantity =", "@app.route('/edit', methods=['POST', 'GET']) def edit(): # Try capitalize() type_ =", "def product(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT * from", "', None]: transaction_allowed= True if transaction_allowed: try: cursor.execute(\"INSERT INTO products(prod_name,", "'location' and request.method == 'POST': loc_id = request.form['loc_id'] loc_name =", "cursor = mysql.connection.cursor() cursor.execute(\"SELECT * from products\") products = cursor.fetchall()", "error temp_loc_name = cursor.fetchone() # print(temp_loc_name) - (Andaman,) #e.g. prod_id", "dict(out_place) all_place = {} #Inplace = {1:20, 3:2000} - keys", "\"Summary\", warehouses = warehouse, products = products, database = q_data)", "x in cursor.fetchall()]) # cursor.fetchall -> ((1,)), x -> (1,)", "0), # ('Microwave', 'Andaman', 0), ('Microwave', 'Assam', 0), ('Microwave', 'Jodhpur',", "prod_id FROM products WHERE prod_name = %s\", (prod_name,)) prod_id =", "[None, '', ' ']: print(\"To Location wasn't specified, will be", "cursor.close() @app.route('/') def summary(): init_database() msg = None q_data, warehouse,", "TABLE IF NOT EXISTS location(loc_id integer primary key auto_increment, loc_name", "if msg: print(msg) cursor.close() return redirect(url_for('location')) return render('location.html', link=link, warehouses=warehouse_data,", "3:2000} - keys - prod_id - toloc = mumbai #out_place", "' ', None] and warehouse_name not in loc_new: transaction_allowed=True if", "WHERE loc_name = %s\", (from_loc,)) from_loc = ''.join([str(x[0]) for x", "%s WHERE prod_id = %s \"\"\", (all_place[products_], products_)) cursor.execute(\"DELETE FROM", "unique not null); \"\"\") cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS", "to the previous quantity else: alloc_json[row[0]][row[1]] = row[2] #If no,", "# ('Washing machine', 'Andaman', 0), ('Washing machine', 'Assam', 0), ('Washing", "KEY(from_loc_id) REFERENCES location(loc_id), FOREIGN KEY(to_loc_id) REFERENCES location(loc_id)); \"\"\") mysql.connection.commit() cursor.close()", "('Piano', 'Andaman', 0) #print(log_summary) # [('Piano', 'Andaman', 0), ('Piano', 'Assam',", "msg = f\"{prod_name} added succcessfully\" if msg: print(msg) cursor.close() return", "if request.method == 'POST': # transaction times are stored in", "sum_to_loc = (0,) #how much enters andaman - how much", "cursor.execute(\"SELECT * FROM logistics\") logistics_data = cursor.fetchall() cursor.execute(\"SELECT prod_id, prod_name,", "redirect(url_for('movement')) return render('movement.html', title = \"Product Movement\", link=link, trans_message=msg, products=products,", "mysql = MySQL(app) link = {x:x for x in [\"location\",", "xr', 'Assam', 0), ('Iphone xr', 'Jodhpur', 0), ('Iphone xr', 'Puducherry',", "in mumbai which will be unallocated if mumbai is deleted", "(26, 'Jodhpur'), (17, 'Puducherry')) # 20 # 19 # 26", "piano, loc_id = 1 = andaman cursor.execute(\"\"\" SELECT SUM(log.prod_quantity) FROM", "link=link, products = products, transaction_message=msg, title=\"Products Log\") @app.route('/movement.html', methods=['POST', 'GET'])", "= [] for i in range(len(loc_names)): loc_new.append(loc_names[i][0]) cursor.execute(\"SELECT prod_name FROM", "prod_id, SUM(prod_quantity) FROM logistics where to_loc_id = %s GROUP BY", "= cursor.fetchone() # No. of pianos that leave andaman #", "%s\", (prod_name,)) prod_id = ''.join([str(x[0]) for x in cursor.fetchall() ])", "warehouse_name not in ['', ' ', None] and warehouse_name not", "out_place = cursor.fetchall() #Convert list of tuples to dict in_place", "'', ' ']: print(\"To Location wasn't specified, will be unallocated\")", "mumbai if x in out_place.keys(): #calculator left mumbai all_place[x] =", "loc_name = %s WHERE loc_id = %s\", (loc_name, loc_id)) mysql.connection.commit()", "\"\"\", (p_id, l_id)) sum_to_loc = cursor.fetchone() # No.of pianos that", "cursor.execute(\"SELECT loc_id, loc_name FROM location\") locations = cursor.fetchall() # products", "location.loc_id, %s FROM products, location WHERE products.prod_name = %s AND", "added succcessfully\" if msg: print(msg) cursor.close() return redirect(url_for('product')) return render('product.html',", "'to_loc' given the product is being shipped between warehouses else:", "prod_id = ''.join([str(x[0]) for x in cursor.fetchall() ]) cursor.execute(\"\"\" INSERT", "= %s AND location.loc_name = %s \"\"\", (quantity, prod_name, from_loc))", "'Puducherry', 0), # ('Iphone xr', 'Andaman', 0), ('Iphone xr', 'Assam',", "= %s\", (loc_name, loc_id)) mysql.connection.commit() cursor.close() return redirect(url_for('location')) elif type_", "in [x[0] for x in locations]: cursor.execute(\"SELECT loc_name FROM location", "in locations]: cursor.execute(\"SELECT loc_name FROM location WHERE loc_id = %s\",", "quantity, quantity)) mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg =", "products(prod_name, prod_quantity, unallocated_quantity) VALUES (%s, %s, %s)\", (prod_name, quantity, quantity))", "= log_summary) @app.route('/delete') def delete(): # Make sure that the", "not in ['', ' ', None]: transaction_allowed= True if transaction_allowed:", "cls = Encoder) # print(alloc_json) # {\"Piano\": {\"Andaman\": 0, \"Assam\":", "stored in UTC prod_name = request.form['prod_name'] from_loc = request.form['from_loc'] to_loc", "p_id in [x[0] for x in products]: # print(p_id) #", "that the queries are working properly....I'm having some doubts about", "= f\"An error occured: {e}\" print(msg) cursor.close() return render('index.html',link=link, title", "for x in in_place.keys(): #calculator entered mumbai if x in", "link=link, trans_message=msg, products=products, locations=locations, allocated = alloc_json, logs = logistics_data,", "\"\"\", (quantity, prod_name)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as e: msg=f\"An error", "{} #Inplace = {1:20, 3:2000} - keys - prod_id -", "0), ('Washing machine', 'Assam', 0), ('Washing machine', 'Jodhpur', 0), ('Washing", "if Assam, exists in Piano ka keys, etc. alloc_json[row[0]][row[1]] +=", "= request.form['product_id'] prod_name = request.form['prod_name'] prod_quantity = request.form['prod_quantity'] prod_name =", "row[2] #Add Andaman with quantity as a new value in", "db['mysql_password'] app.config['MYSQL_DB'] = db['mysql_db'] mysql = MySQL(app) link = {x:x", "enter andaman sum_to_loc = (0,) #how much enters andaman -", "loc_name = loc_name.capitalize() if loc_name not in ['', ' ',", "= %s\", (l_id,)) #str(l_id,) giving an error temp_loc_name = cursor.fetchone()", "def location(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT * FROM", "\"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}, # \"Iphone xr\": {\"Andaman\":", "quantity)) mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg = f\"An", "= mysql.connection.cursor() cursor.execute(\"SELECT * FROM logistics\") logistics_data = cursor.fetchall() cursor.execute(\"SELECT", "elif to_loc in [None, '', ' ']: print(\"To Location wasn't", "init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT * FROM logistics\") logistics_data", "unallocated_quantity integer); \"\"\") # Might have to create a trigger,", "that leave andaman # print(sum_from_loc) if sum_from_loc[0] is None: #e.g.", "in the dictionary #print(alloc_json) # {'Piano': {'Andaman': 0, 'Assam': 0,", "%s)\", (prod_name, quantity, quantity)) mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e:", "NULL, from_loc_id INTEGER NULL, to_loc_id INTEGER NULL, prod_quantity INTEGER NOT", "%s WHERE prod_name = %s \"\"\", (quantity, prod_name)) mysql.connection.commit() except", "the flask instance app = Flask(__name__) # Configure the database", "loc_new = [] for i in range(len(loc_names)): loc_new.append(loc_names[i][0]) cursor.execute(\"SELECT prod_name", "mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as e: msg=f\"An error occurred: {e}\" else:", "products \"\"\") q_data = cursor.fetchall() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e:", "= warehouse_name.capitalize() transaction_allowed = False if warehouse_name not in ['',", "if request.method == 'POST': prod_name = request.form['prod_name'] quantity = request.form['prod_quantity']", "transaction_message=msg, title = \"Warehouse Locations\") @app.route('/product.html', methods=['POST', 'GET']) def product():", "try: cursor.execute(\"INSERT INTO location(loc_name) VALUES(%s)\", (warehouse_name,)) mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning())", "db = yaml.load(open('db.yaml')) app.config['MYSQL_HOST'] = db['mysql_host'] app.config['MYSQL_USER'] = db['mysql_user'] app.config['MYSQL_PASSWORD']", "prod_quantity, unallocated_quantity) VALUES (%s, %s, %s)\", (prod_name, quantity, quantity)) mysql.connection.commit()", "# x in product - (1, 'Piano', 250) # x[0]", "print(msg) cursor.close() return redirect(url_for('movement')) return render('movement.html', title = \"Product Movement\",", "%s \"\"\", (quantity, prod_name, to_loc)) # IMPORTANT to maintain consistency", "CURRENT_TIMESTAMP, FOREIGN KEY(prod_id) REFERENCES products(prod_id), FOREIGN KEY(from_loc_id) REFERENCES location(loc_id), FOREIGN", "unallocated_quantity + %s - %s WHERE prod_id = %s \"\"\",", "prod_name = %s \"\"\", (quantity, prod_name)) mysql.connection.commit() except (MySQLdb.Error, MySQLdb.Warning)", "[] for p_id in [x[0] for x in products]: cursor.execute(\"SELECT", "successfully\" #Print a transaction message if exists! if msg: print(msg)", "cursor.fetchall()]) # cursor.fetchall -> ((1,)), x -> (1,) x[0] ->", "('Piano', 'Puducherry', 0), # ('Iphone xr', 'Andaman', 0), ('Iphone xr',", "# Initialise all tables cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS", "succcessfully\" if msg: print(msg) cursor.close() return redirect(url_for('product')) return render('product.html', link=link,", "cursor.close() return redirect(url_for('product')) return render('product.html', link=link, products = products, transaction_message=msg,", "(quantity, prod_name)) mysql.connection.commit() except (MySQLdb.Error, MySQLdb.Warning) as e: msg =", "((20, 'Andaman'), (19, 'Assam'), (26, 'Jodhpur'), (17, 'Puducherry')) # 20", "INTO logistics(prod_id, to_loc_id, prod_quantity) SELECT products.prod_id, location.loc_id, %s FROM products,", "logistics log WHERE log.prod_id = %s AND log.from_loc_id = %s", "cursor.execute(\"\"\" UPDATE products SET unallocated_quantity = unallocated_quantity + %s WHERE", "{e}\" else: msg = \"Transaction added successfully\" #Print a transaction", "= %s\", (id_,)) mysql.connection.commit() cursor.close() return redirect(url_for('product')) @app.route('/edit', methods=['POST', 'GET'])", "(7, 'Microwave', 50)) # x in product - (1, 'Piano',", "fridges were sent to daman diu, therefore, 1900 remains in", "in loc_new: transaction_allowed=True if transaction_allowed: try: cursor.execute(\"INSERT INTO location(loc_name) VALUES(%s)\",", "('Washing machine', 'Puducherry', 0), # ('Microwave', 'Andaman', 0), ('Microwave', 'Assam',", "VALUES(%s)\", (warehouse_name,)) mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg =", "2 # 6 # 7 # print(locations) # for l_id", "(p_id, l_id)) sum_from_loc = cursor.fetchone() # No. of pianos that", "redirect from flask import render_template as render from flask_mysqldb import", "= %s\", (to_loc,)) to_loc = ''.join([str(x[0]) for x in cursor.fetchall()", "- sum_from_loc[0],)) # (Piano,) + (Andaman,), (0,) = ('Piano', 'Andaman',", "EXISTS products(prod_id integer primary key auto_increment, prod_name varchar(20) UNIQUE NOT", "0), # ('Washing machine', 'Andaman', 0), ('Washing machine', 'Assam', 0),", "cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS location(loc_id integer primary key", "prod_quantity not in ['', ' ', None] and prod_name not", "- (Andaman,) #e.g. prod_id = 1 = piano, loc_id =", "from location\") warehouse = cursor.fetchall() cursor.execute(\"Select * from products\") products", "products]: # print(p_id) # 1 # 2 # 6 #", "\"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}, # \"Washing machine\": {\"Andaman\":", "'POST': prod_id = request.form['product_id'] prod_name = request.form['prod_name'] prod_quantity = request.form['prod_quantity']", "prod_name = %s WHERE prod_id = %s\", (prod_name, str(prod_id))) if", "occured: {e}\" print(msg) cursor.close() return render('index.html',link=link, title = \"Summary\", warehouses", "came to mumbai from kolkata, 100 fridges were sent to", "quantity as a new value in the dictionary #print(alloc_json) #", "== 'location': id_ = request.args.get('loc_id') cursor.execute(\"SELECT prod_id, SUM(prod_quantity) FROM logistics", "location SET loc_name = %s WHERE loc_id = %s\", (loc_name,", "= f\"An error occured: {e}\" else: msg = \"Transaction added", "= request.form['quantity'] # if no 'from loc' is given, that", "(warehouse_name,)) mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg = f\"An", "render('index.html',link=link, title = \"Summary\", warehouses = warehouse, products = products,", "integer); \"\"\") # Might have to create a trigger, let's", "all tables cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS products(prod_id integer", "added successfully\" elif to_loc in [None, '', ' ']: print(\"To", "flask import Flask, url_for, request, redirect from flask import render_template", "new value in the dictionary #print(alloc_json) # {'Piano': {'Andaman': 0,", "('Microwave', 'Assam', 0), ('Microwave', 'Jodhpur', 0), ('Microwave', 'Puducherry', 0)] alloc_json", "# for l_id in [x[0] for x in locations]: #", "warehouse (init condition) if from_loc in [None, '', ' ']:", "None] and loc_name not in loc_new: cursor.execute(\"UPDATE location SET loc_name", "from flask import render_template as render from flask_mysqldb import MySQL", "(0,) = ('Piano', 'Andaman', 0) #print(log_summary) # [('Piano', 'Andaman', 0),", "('Washing machine', 'Assam', 0), ('Washing machine', 'Jodhpur', 0), ('Washing machine',", "in cursor.fetchall() ]) cursor.execute(\"SELECT prod_id FROM products WHERE prod_name =", "msg = \"Transaction added successfully\" #Print a transaction message if", "SELECT SUM(log.prod_quantity) FROM logistics log WHERE log.prod_id = %s AND", "list of tuples to dict in_place = dict(in_place) out_place =", "from_loc_id, prod_quantity) SELECT products.prod_id, location.loc_id, %s FROM products, location WHERE", "loc_id = %s\", (loc_name, loc_id)) mysql.connection.commit() cursor.close() return redirect(url_for('location')) elif", "= mumbai #out_place = {3:100} - keys - prod_id -", "= q_data) @app.route('/location.html', methods=['POST', 'GET']) def location(): init_database() msg=None cursor", "\"\"\", (all_place[products_], products_)) cursor.execute(\"DELETE FROM location where loc_id = %s\",", "0}, # 'Iphone xr': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0,", "%s FROM products, location WHERE products.prod_name = %s AND location.loc_name", "AND location.loc_name = %s \"\"\", (quantity, prod_name, to_loc)) # IMPORTANT", "for p_id in [x[0] for x in products]: # print(p_id)", "wasn't specified, will be unallocated\") try: cursor.execute(\"\"\" INSERT INTO logistics(prod_id,", "= unallocated_quantity + %s - %s WHERE prod_id = %s", "CREATE TABLE IF NOT EXISTS products(prod_id integer primary key auto_increment,", "INTO logistics(prod_id, from_loc_id, prod_quantity) SELECT products.prod_id, location.loc_id, %s FROM products,", "AND location.loc_name = %s \"\"\", (quantity, prod_name, from_loc)) #Important to", "request.form['prod_quantity'] prod_name = prod_name.capitalize() if prod_name not in ['', '", "'Andaman', 0), ('Washing machine', 'Assam', 0), ('Washing machine', 'Jodhpur', 0),", "loc_name FROM location\") loc_names = cursor.fetchall() loc_new = [] for", "= mumbai for x in in_place.keys(): #calculator entered mumbai if", "= request.args.get('prod_id') cursor.execute(\"DELETE FROM products WHERE prod_id = %s\", (id_,))", "not in loc_new: cursor.execute(\"UPDATE location SET loc_name = %s WHERE", "(0,) #how much enters andaman - how much leaves andaman", "('Microwave', 'Jodhpur', 0), ('Microwave', 'Puducherry', 0)] alloc_json = {} for", "row[2] #If no, add it as a new quantity except", "GROUP BY prod_id\", (id_,)) in_place = cursor.fetchall() cursor.execute(\"SELECT prod_id, SUM(prod_quantity)", "xr', 'Jodhpur', 0), ('Iphone xr', 'Puducherry', 0), # ('Washing machine',", "instance app = Flask(__name__) # Configure the database db =", "loc_id = %s\", (l_id,)) #str(l_id,) giving an error temp_loc_name =", "cursor.execute(\"INSERT INTO location(loc_name) VALUES(%s)\", (warehouse_name,)) mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as", "%s\", (to_loc,)) to_loc = ''.join([str(x[0]) for x in cursor.fetchall() ])", "50)) # x in product - (1, 'Piano', 250) #", "+ (sum_to_loc[0] - sum_from_loc[0],) )] ORRRRRRRRRRR log_summary.append(temp_prod_name + temp_loc_name +", "varchar(20) unique not null); \"\"\") cursor.execute(\"\"\" CREATE TABLE IF NOT", "\"\"\", (p_id, l_id)) sum_from_loc = cursor.fetchone() # No. of pianos", "from kolkata, 100 fridges were sent to daman diu, therefore,", "cursor = mysql.connection.cursor() cursor.execute(\"SELECT loc_name FROM location\") loc_names = cursor.fetchall()", "0) #print(log_summary) # [('Piano', 'Andaman', 0), ('Piano', 'Assam', 0), ('Piano',", "cursor.execute(\"UPDATE location SET loc_name = %s WHERE loc_id = %s\",", "the quantity to the previous quantity else: alloc_json[row[0]][row[1]] = row[2]", "'location': id_ = request.args.get('loc_id') cursor.execute(\"SELECT prod_id, SUM(prod_quantity) FROM logistics where", "\"movement\"]} link[\"index\"] = '/' def init_database(): cursor = mysql.connection.cursor() #", "'from loc' is given, that means the product is being", "in all_place.keys(): cursor.execute(\"\"\" UPDATE products SET unallocated_quantity = unallocated_quantity +", "Locations\") @app.route('/product.html', methods=['POST', 'GET']) def product(): init_database() msg=None cursor =", "(Andaman,), (0,) = ('Piano', 'Andaman', 0) #print(log_summary) # [('Piano', 'Andaman',", "mumbai from kolkata, 100 fridges were sent to daman diu,", "{'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Washing", "#e.g. prod_id = 1 = piano, loc_id = 1 =", "to maintain consistency cursor.execute(\"\"\" UPDATE products SET unallocated_quantity = unallocated_quantity", "def init_database(): cursor = mysql.connection.cursor() # Initialise all tables cursor.execute(\"\"\"", "prod_name not in ['', ' ', None] and prod_name not", "= f\"{warehouse_name} added succcessfully\" if msg: print(msg) cursor.close() return redirect(url_for('location'))", "prod_name = %s\", (prod_name,)) prod_id = ''.join([str(x[0]) for x in", "= %s \"\"\", (quantity, prod_name)) mysql.connection.commit() except (MySQLdb.Error, MySQLdb.Warning) as", "#If yes, the add the quantity to the previous quantity", "print(sum_from_loc) if sum_from_loc[0] is None: #e.g. (None,) --> (0,) -->", "id_ = request.args.get('loc_id') cursor.execute(\"SELECT prod_id, SUM(prod_quantity) FROM logistics where to_loc_id", "= f\"An error occured: {e}\" else: msg = f\"{warehouse_name} added", "decimal.Decimal): return str(obj) # Setting up the flask instance app", "* FROM logistics\") logistics_data = cursor.fetchall() cursor.execute(\"SELECT prod_id, prod_name, unallocated_quantity", "7 # print(locations) # for l_id in [x[0] for x", "msg = f\"An error occured: {e}\" else: msg = f\"{prod_name}", "%s \"\"\", (quantity, prod_name)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as e: msg=f\"An", "= db['mysql_db'] mysql = MySQL(app) link = {x:x for x", "WHERE prod_id = %s \"\"\", (all_place[products_], products_)) cursor.execute(\"DELETE FROM location", "alloc_json = {} for row in log_summary: try: if row[1]", "to_loc_id = %s GROUP BY prod_id\", (id_,)) in_place = cursor.fetchall()", "msg = f\"An error occured: {e}\" else: msg = f\"{warehouse_name}", "WHERE products.prod_name = %s AND location.loc_name = %s \"\"\", (quantity,", "logistics where to_loc_id = %s GROUP BY prod_id\", (id_,)) in_place", "Flask, url_for, request, redirect from flask import render_template as render", "if quantity not in ['', ' ', None]: transaction_allowed= True", "shipped to a warehouse (init condition) if from_loc in [None,", "(p_id, l_id)) sum_to_loc = cursor.fetchone() # No.of pianos that enter", "print(l_id) # ((20, 'Andaman'), (19, 'Assam'), (26, 'Jodhpur'), (17, 'Puducherry'))", "prod_id = 1 = piano, loc_id = 1 = andaman", "cursor.execute(\"\"\" UPDATE products SET prod_quantity = %s, unallocated_quantity = unallocated_quantity", "'GET']) def location(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT *", "cursor = mysql.connection.cursor() # Initialise all tables cursor.execute(\"\"\" CREATE TABLE", "msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT * FROM location ORDER BY", "for products_ in all_place.keys(): cursor.execute(\"\"\" UPDATE products SET unallocated_quantity =", "= unallocated_quantity + %s WHERE prod_id = %s \"\"\", (all_place[products_],", "that means the product is being shipped to a warehouse", "* from products\") products = cursor.fetchall() cursor.execute(\"SELECT prod_name FROM products\")", "where to_loc_id = %s GROUP BY prod_id\", (id_,)) in_place =", "\"\"\", (quantity, prod_name)) mysql.connection.commit() except (MySQLdb.Error, MySQLdb.Warning) as e: msg", "render_template as render from flask_mysqldb import MySQL import yaml import", "WHERE prod_id = %s\", (id_,)) mysql.connection.commit() cursor.close() return redirect(url_for('product')) @app.route('/edit',", "0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Washing machine': {'Andaman': 0,", "'Washing machine': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0},", "= request.form['to_loc'] quantity = request.form['quantity'] # if no 'from loc'", "(quantity, prod_name, from_loc)) #Important to maintain consistency cursor.execute(\"\"\" UPDATE products", "'Puducherry': 0}} alloc_json = json.dumps(alloc_json, cls = Encoder) # print(alloc_json)", "= db['mysql_user'] app.config['MYSQL_PASSWORD'] = db['mysql_password'] app.config['MYSQL_DB'] = db['mysql_db'] mysql =", "VALUES(%s, %s, %s, %s) \"\"\", (prod_id, from_loc, to_loc, quantity)) mysql.connection.commit()", "prod_name)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as e: msg=f\"An error occurred: {e}\"", "null, unallocated_quantity integer); \"\"\") # Might have to create a", "= mysql.connection.cursor() try: cursor.execute(\"Select * from location\") warehouse = cursor.fetchall()", "null); \"\"\") cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS logistics(trans_id integer", "('Washing machine', 'Jodhpur', 0), ('Washing machine', 'Puducherry', 0), # ('Microwave',", "import MySQL import yaml import json import MySQLdb import decimal", "unallocated_quantity = unallocated_quantity + %s WHERE prod_name = %s \"\"\",", "[] for i in range(len(prod_names)): prod_new.append(prod_names[i][0]) if type_ == 'location'", "if mumbai is deleted else: all_place[x] = in_place[x] for products_", "(MySQLdb.Error, MySQLdb.Warning) as e: msg = f\"An error occured: {e}\"", ")] ORRRRRRRRRRR log_summary.append(temp_prod_name + temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],)) #", "trans_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY(prod_id) REFERENCES products(prod_id),", "'Assam', 0), ('Washing machine', 'Jodhpur', 0), ('Washing machine', 'Puducherry', 0),", "times are stored in UTC prod_name = request.form['prod_name'] from_loc =", "(id_,)) out_place = cursor.fetchall() #Convert list of tuples to dict", "100), (7, 'Microwave', 50)) # x in product - (1,", "= %s AND log.to_loc_id = %s \"\"\", (p_id, l_id)) sum_to_loc", "in cursor.fetchall() ]) cursor.execute(\"\"\" INSERT INTO logistics(prod_id, from_loc_id, to_loc_id, prod_quantity)", "def default(self, obj): if isinstance(obj, decimal.Decimal): return str(obj) # Setting", "Piano ka keys, Check if Assam, exists in Piano ka", "primary key auto_increment, prod_name varchar(20) UNIQUE NOT NULL, prod_quantity integer", "the product is being shipped to a warehouse (init condition)", "doubts about the datatypes type_ = request.args.get('type') cursor = mysql.connection.cursor()", "methods=['POST', 'GET']) def movement(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT", "WHERE prod_id = %s\", (prod_name, str(prod_id))) if prod_quantity not in", "Configure the database db = yaml.load(open('db.yaml')) app.config['MYSQL_HOST'] = db['mysql_host'] app.config['MYSQL_USER']", "f\"An error occured: {e}\" else: msg = \"Transaction added successfully\"", "AND log.to_loc_id = %s \"\"\", (p_id, l_id)) sum_to_loc = cursor.fetchone()", "request.form['prod_name'] from_loc = request.form['from_loc'] to_loc = request.form['to_loc'] quantity = request.form['quantity']", "logistics where from_loc_id = %s GROUP BY prod_id\", (id_,)) out_place", "= cursor.fetchall() prod_new = [] for i in range(len(prod_names)): prod_new.append(prod_names[i][0])", "return redirect(url_for('movement')) return render('movement.html', title = \"Product Movement\", link=link, trans_message=msg,", "- ((1, 'Piano', 250), (2, 'Iphone xr', 600), (6, 'Washing", "sum_from_loc[0] is None: #e.g. (None,) --> (0,) --> No pianos", "to_loc in [None, '', ' ']: print(\"To Location wasn't specified,", "None: #e.g. (None,) --> (0,) --> No pianos leave andaman", "cursor.fetchall() cursor.execute(\"SELECT prod_id, SUM(prod_quantity) FROM logistics where from_loc_id = %s", "json import MySQLdb import decimal class Encoder(json.JSONEncoder): def default(self, obj):", "= '/' def init_database(): cursor = mysql.connection.cursor() # Initialise all", "will be unallocated\") try: cursor.execute(\"\"\" INSERT INTO logistics(prod_id, from_loc_id, prod_quantity)", "'GET']) def edit(): # Try capitalize() type_ = request.args.get('type') cursor", "how much remains (allocated) in andaman # log_summary += [(temp_prod_name", "import decimal class Encoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, decimal.Decimal):", "row[2] #If yes, the add the quantity to the previous", "0}} alloc_json = json.dumps(alloc_json, cls = Encoder) # print(alloc_json) #", "request.args.get('prod_id') cursor.execute(\"DELETE FROM products WHERE prod_id = %s\", (id_,)) mysql.connection.commit()", "'Jodhpur'), (17, 'Puducherry')) # 20 # 19 # 26 #", "* from products\") products = cursor.fetchall() cursor.execute(\"\"\" SELECT prod_name, unallocated_quantity,", "FROM logistics where to_loc_id = %s GROUP BY prod_id\", (id_,))", "MySQLdb.Warning) as e: msg=f\"An error occurred: {e}\" else: msg =", "varchar(20) UNIQUE NOT NULL, prod_quantity integer not null, unallocated_quantity integer);", "x in cursor.fetchall() ]) cursor.execute(\"SELECT prod_id FROM products WHERE prod_name", "%s\", (from_loc,)) from_loc = ''.join([str(x[0]) for x in cursor.fetchall()]) #", "ORDER BY loc_id\") warehouse_data = cursor.fetchall() cursor.execute(\"SELECT loc_name FROM location\")", "20 # 19 # 26 # 17 log_summary = []", "loc_new: transaction_allowed=True if transaction_allowed: try: cursor.execute(\"INSERT INTO location(loc_name) VALUES(%s)\", (warehouse_name,))", "sum_to_loc = cursor.fetchone() # No.of pianos that enter andaman cursor.execute(\"\"\"", "WHERE loc_id = %s\", (l_id,)) #str(l_id,) giving an error temp_loc_name", "given the product is being shipped between warehouses else: try:", "Check if Assam, exists in Piano ka keys, etc. alloc_json[row[0]][row[1]]", "cursor.execute(\"\"\" INSERT INTO logistics(prod_id, from_loc_id, to_loc_id, prod_quantity) VALUES(%s, %s, %s,", "in range(len(prod_names)): prod_new.append(prod_names[i][0]) if request.method == 'POST': prod_name = request.form['prod_name']", "%s\", (prod_id,)) old_prod_quantity = cursor.fetchone()[0] cursor.execute(\"\"\" UPDATE products SET prod_quantity", "'product': id_ = request.args.get('prod_id') cursor.execute(\"DELETE FROM products WHERE prod_id =", "locations]: # print(l_id) # ((20, 'Andaman'), (19, 'Assam'), (26, 'Jodhpur'),", "Movement\", link=link, trans_message=msg, products=products, locations=locations, allocated = alloc_json, logs =", "%s WHERE prod_name = %s \"\"\", (quantity, prod_name)) mysql.connection.commit() except(MySQLdb.Error,", "= cursor.fetchall() cursor.execute(\"SELECT prod_name FROM products\") prod_names = cursor.fetchall() prod_new", "ORRRRRRRRRRR log_summary.append(temp_prod_name + temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],)) # (Piano,)", "= request.form['loc_name'] loc_name = loc_name.capitalize() if loc_name not in ['',", "in range(len(loc_names)): loc_new.append(loc_names[i][0]) if request.method == 'POST': warehouse_name = request.form['warehouse_name']", "' ']: try: cursor.execute(\"\"\" INSERT INTO logistics(prod_id, to_loc_id, prod_quantity) SELECT", "queries are working properly....I'm having some doubts about the datatypes", "= %s \"\"\", (p_id, l_id)) sum_to_loc = cursor.fetchone() # No.of", "andaman sum_to_loc = (0,) #how much enters andaman - how", "x -> (1,) x[0] -> 1 join converts 1 into", "successfully\" # if 'from loc' and 'to_loc' given the product", "cursor.execute(\"SELECT prod_id, SUM(prod_quantity) FROM logistics where to_loc_id = %s GROUP", "(0,) if sum_to_loc[0] is None: #No pianos enter andaman sum_to_loc", "= ('Piano', 'Andaman', 0) #print(log_summary) # [('Piano', 'Andaman', 0), ('Piano',", "x in products]: cursor.execute(\"SELECT prod_name FROM products WHERE prod_id =", "else: all_place[x] = in_place[x] for products_ in all_place.keys(): cursor.execute(\"\"\" UPDATE", "msg: print(msg) cursor.close() return redirect(url_for('product')) return render('product.html', link=link, products =", "being shipped between warehouses else: try: cursor.execute(\"SELECT loc_id FROM location", "quantity not in ['', ' ', None]: transaction_allowed= True if", "'Jodhpur', 0), ('Microwave', 'Puducherry', 0)] alloc_json = {} for row", "from_loc in [None, '', ' ']: try: cursor.execute(\"\"\" INSERT INTO", "products_)) cursor.execute(\"DELETE FROM location where loc_id = %s\", (id_,)) mysql.connection.commit()", "# Setting up the flask instance app = Flask(__name__) #", "prod_id, prod_name, unallocated_quantity FROM products\") products = cursor.fetchall() cursor.execute(\"SELECT loc_id,", "# ('Iphone xr', 'Andaman', 0), ('Iphone xr', 'Assam', 0), ('Iphone", "xr', 'Andaman', 0), ('Iphone xr', 'Assam', 0), ('Iphone xr', 'Jodhpur',", "delete(): # Make sure that the queries are working properly....I'm", "key auto_increment, prod_name varchar(20) UNIQUE NOT NULL, prod_quantity integer not", "f\"{prod_name} added succcessfully\" if msg: print(msg) cursor.close() return redirect(url_for('product')) return", "msg: print(msg) cursor.close() return redirect(url_for('location')) return render('location.html', link=link, warehouses=warehouse_data, transaction_message=msg,", "No. of pianos that leave andaman # print(sum_from_loc) if sum_from_loc[0]", "#out_place = {3:100} - keys - prod_id - fromloc =", "added successfully\" # if 'from loc' and 'to_loc' given the", "prod_names = cursor.fetchall() prod_new = [] for i in range(len(prod_names)):", "FROM location where loc_id = %s\", (id_,)) mysql.connection.commit() cursor.close() return", "'Andaman'), (19, 'Assam'), (26, 'Jodhpur'), (17, 'Puducherry')) # 20 #", "x[0] = 1 # for p_id in [x[0] for x", "products.prod_id, location.loc_id, %s FROM products, location WHERE products.prod_name = %s", "UTC prod_name = request.form['prod_name'] from_loc = request.form['from_loc'] to_loc = request.form['to_loc']", "\"Transaction added successfully\" #Print a transaction message if exists! if", "prod_id = %s\", (id_,)) mysql.connection.commit() cursor.close() return redirect(url_for('product')) @app.route('/edit', methods=['POST',", "[] for i in range(len(loc_names)): loc_new.append(loc_names[i][0]) if request.method == 'POST':", "init_database(): cursor = mysql.connection.cursor() # Initialise all tables cursor.execute(\"\"\" CREATE", "NOT EXISTS location(loc_id integer primary key auto_increment, loc_name varchar(20) unique", "NULL, prod_quantity INTEGER NOT NULL, trans_time TIMESTAMP NOT NULL DEFAULT", "print(msg) cursor.close() return render('index.html',link=link, title = \"Summary\", warehouses = warehouse,", "(prod_name,)) prod_id = ''.join([str(x[0]) for x in cursor.fetchall() ]) cursor.execute(\"\"\"", "integer primary key auto_increment, prod_name varchar(20) UNIQUE NOT NULL, prod_quantity", "['', ' ', None] and loc_name not in loc_new: cursor.execute(\"UPDATE", "try: cursor.execute(\"\"\" INSERT INTO logistics(prod_id, from_loc_id, prod_quantity) SELECT products.prod_id, location.loc_id,", "redirect(url_for('location')) return render('location.html', link=link, warehouses=warehouse_data, transaction_message=msg, title = \"Warehouse Locations\")", "andaman # log_summary += [(temp_prod_name + temp_loc_name + (sum_to_loc[0] -", "'Andaman', 0), ('Iphone xr', 'Assam', 0), ('Iphone xr', 'Jodhpur', 0),", "quantity except (KeyError, TypeError): alloc_json[row[0]] = {} #Make the value", "None q_data, warehouse, products = None, None, None cursor =", "mysql.connection.cursor() if type_ == 'location': id_ = request.args.get('loc_id') cursor.execute(\"SELECT prod_id,", "request.method == 'POST': prod_id = request.form['product_id'] prod_name = request.form['prod_name'] prod_quantity", "= cursor.fetchall() # products - ((1, 'Piano', 250), (2, 'Iphone", "cursor.close() return redirect(url_for('location')) elif type_ == 'product': id_ = request.args.get('prod_id')", "redirect(url_for('product')) @app.route('/edit', methods=['POST', 'GET']) def edit(): # Try capitalize() type_", "= {} for row in log_summary: try: if row[1] in", "if exists! if msg: print(msg) cursor.close() return redirect(url_for('movement')) return render('movement.html',", "type_ == 'product' and request.method == 'POST': prod_id = request.form['product_id']", "= 1 = andaman cursor.execute(\"\"\" SELECT SUM(log.prod_quantity) FROM logistics log", "FROM location\") locations = cursor.fetchall() # products - ((1, 'Piano',", "if prod_name not in ['', ' ', None] and prod_name", "%s GROUP BY prod_id\", (id_,)) out_place = cursor.fetchall() #Convert list", "q_data, warehouse, products = None, None, None cursor = mysql.connection.cursor()", "in Piano ka keys, etc. alloc_json[row[0]][row[1]] += row[2] #If yes,", "in ['', ' ', None] and prod_name not in prod_new:", "products = products, database = q_data) @app.route('/location.html', methods=['POST', 'GET']) def", "up the flask instance app = Flask(__name__) # Configure the", "\"product\", \"movement\"]} link[\"index\"] = '/' def init_database(): cursor = mysql.connection.cursor()", "shipped between warehouses else: try: cursor.execute(\"SELECT loc_id FROM location WHERE", "in locations]: # print(l_id) # ((20, 'Andaman'), (19, 'Assam'), (26,", "prod_name varchar(20) UNIQUE NOT NULL, prod_quantity integer not null, unallocated_quantity", "= False if prod_name not in ['', ' ', None]", "tables cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS products(prod_id integer primary", "== 'location' and request.method == 'POST': loc_id = request.form['loc_id'] loc_name", "previous quantity else: alloc_json[row[0]][row[1]] = row[2] #If no, add it", "prod_quantity = request.form['prod_quantity'] prod_name = prod_name.capitalize() if prod_name not in", "26 # 17 log_summary = [] for p_id in [x[0]", "1900 remains in mumbai which will be unallocated if mumbai", "logistics(trans_id integer primary key auto_increment, prod_id INTEGER NOT NULL, from_loc_id", "loc_name varchar(20) unique not null); \"\"\") cursor.execute(\"\"\" CREATE TABLE IF", "prod_new.append(prod_names[i][0]) if request.method == 'POST': prod_name = request.form['prod_name'] quantity =", "else: msg = f\"{prod_name} added succcessfully\" if msg: print(msg) cursor.close()", "= [] for i in range(len(prod_names)): prod_new.append(prod_names[i][0]) if request.method ==", "add the quantity to the previous quantity else: alloc_json[row[0]][row[1]] =", "* from location\") warehouse = cursor.fetchall() cursor.execute(\"Select * from products\")", "prod_new: cursor.execute(\"UPDATE products SET prod_name = %s WHERE prod_id =", "products SET unallocated_quantity = unallocated_quantity - %s WHERE prod_name =", "location(loc_name) VALUES(%s)\", (warehouse_name,)) mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg", "0}, # \"Iphone xr\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0,", "= f\"An error occured: {e}\" else: msg = f\"{prod_name} added", "if msg: print(msg) cursor.close() return redirect(url_for('movement')) return render('movement.html', title =", "prod_name = request.form['prod_name'] quantity = request.form['prod_quantity'] prod_name = prod_name.capitalize() transaction_allowed", "(prod_id, from_loc, to_loc, quantity)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as e: msg=f\"An", "alloc_json[row[0]].keys(): #Check if Andaman exists in Piano ka keys, Check", "to create a trigger, let's see! cursor.execute(\"\"\" CREATE TABLE IF", "locations]: cursor.execute(\"SELECT loc_name FROM location WHERE loc_id = %s\", (l_id,))", "('Piano', 'Assam', 0), ('Piano', 'Jodhpur', 0), ('Piano', 'Puducherry', 0), #", "for x in locations]: cursor.execute(\"SELECT loc_name FROM location WHERE loc_id", "unallocated_quantity = unallocated_quantity + %s WHERE prod_id = %s \"\"\",", "#If no, add it as a new quantity except (KeyError,", "pianos that enter andaman cursor.execute(\"\"\" SELECT SUM(log.prod_quantity) FROM logistics log", "%s, %s) \"\"\", (prod_id, from_loc, to_loc, quantity)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning)", "', None] and prod_name not in prod_new: cursor.execute(\"SELECT prod_quantity FROM", "prod_quantity FROM products WHERE prod_id = %s\", (prod_id,)) old_prod_quantity =", "db['mysql_user'] app.config['MYSQL_PASSWORD'] = db['mysql_password'] app.config['MYSQL_DB'] = db['mysql_db'] mysql = MySQL(app)", "render('location.html', link=link, warehouses=warehouse_data, transaction_message=msg, title = \"Warehouse Locations\") @app.route('/product.html', methods=['POST',", "('Iphone xr', 'Puducherry', 0), # ('Washing machine', 'Andaman', 0), ('Washing", "#Add Andaman with quantity as a new value in the", "+ %s - %s WHERE prod_id = %s \"\"\", (prod_quantity,", "left mumbai all_place[x] = in_place[x] - out_place[x] #2000 fridges came", "# No.of pianos that enter andaman cursor.execute(\"\"\" SELECT SUM(log.prod_quantity) FROM", "and loc_name not in loc_new: cursor.execute(\"UPDATE location SET loc_name =", "SET unallocated_quantity = unallocated_quantity + %s WHERE prod_name = %s", "cursor.execute(\"DELETE FROM products WHERE prod_id = %s\", (id_,)) mysql.connection.commit() cursor.close()", "error occurred: {e}\" else: msg = \"Transaction added successfully\" #", "it as a new quantity except (KeyError, TypeError): alloc_json[row[0]] =", "warehouse, products = products, database = q_data) @app.route('/location.html', methods=['POST', 'GET'])", "@app.route('/delete') def delete(): # Make sure that the queries are", "loc_name not in loc_new: cursor.execute(\"UPDATE location SET loc_name = %s", "INTO logistics(prod_id, from_loc_id, to_loc_id, prod_quantity) VALUES(%s, %s, %s, %s) \"\"\",", "= %s AND location.loc_name = %s \"\"\", (quantity, prod_name, to_loc))", "mysql.connection.commit() cursor.close() return redirect(url_for('location')) elif type_ == 'product' and request.method", "= 1 = piano, loc_id = 1 = andaman cursor.execute(\"\"\"", "0, \"Puducherry\": 0}, # \"Microwave\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\":", "UNIQUE NOT NULL, prod_quantity integer not null, unallocated_quantity integer); \"\"\")", "loc_name.capitalize() if loc_name not in ['', ' ', None] and", "{\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}, # \"Microwave\":", "a new quantity except (KeyError, TypeError): alloc_json[row[0]] = {} #Make", "{e}\" else: msg = f\"{warehouse_name} added succcessfully\" if msg: print(msg)", "is None: #No pianos enter andaman sum_to_loc = (0,) #how", "mysql.connection.commit() cursor.close() return redirect(url_for('location')) elif type_ == 'product': id_ =", "(prod_id,)) old_prod_quantity = cursor.fetchone()[0] cursor.execute(\"\"\" UPDATE products SET prod_quantity =", "error occured: {e}\" else: msg = f\"{warehouse_name} added succcessfully\" if", "0), ('Iphone xr', 'Assam', 0), ('Iphone xr', 'Jodhpur', 0), ('Iphone", "loc' is given, that means the product is being shipped", "the product is being shipped between warehouses else: try: cursor.execute(\"SELECT", "FROM location WHERE loc_id = %s\", (l_id,)) #str(l_id,) giving an", "log.prod_id = %s AND log.to_loc_id = %s \"\"\", (p_id, l_id))", "(quantity, prod_name, to_loc)) # IMPORTANT to maintain consistency cursor.execute(\"\"\" UPDATE", "{'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}} alloc_json =", "f\"An error occured: {e}\" else: msg = f\"{prod_name} added succcessfully\"", "datatypes type_ = request.args.get('type') cursor = mysql.connection.cursor() if type_ ==", "- fromloc = mumbai for x in in_place.keys(): #calculator entered", "\"\"\", (quantity, prod_name, to_loc)) # IMPORTANT to maintain consistency cursor.execute(\"\"\"", "= {} #Make the value of piano empty alloc_json[row[0]][row[1]] =", "%s \"\"\", (p_id, l_id)) sum_from_loc = cursor.fetchone() # No. of", "in [x[0] for x in products]: cursor.execute(\"SELECT prod_name FROM products", "unallocated_quantity, prod_quantity FROM products \"\"\") q_data = cursor.fetchall() except(MySQLdb.Error(not Warning),", "SET prod_quantity = %s, unallocated_quantity = unallocated_quantity + %s -", "as render from flask_mysqldb import MySQL import yaml import json", "import render_template as render from flask_mysqldb import MySQL import yaml", "products\") products = cursor.fetchall() cursor.execute(\"SELECT prod_name FROM products\") prod_names =", "to_loc_id INTEGER NULL, prod_quantity INTEGER NOT NULL, trans_time TIMESTAMP NOT", "where loc_id = %s\", (id_,)) mysql.connection.commit() cursor.close() return redirect(url_for('location')) elif", "{\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}, # \"Washing", "return render('product.html', link=link, products = products, transaction_message=msg, title=\"Products Log\") @app.route('/movement.html',", "cursor.fetchall() cursor.execute(\"SELECT loc_id, loc_name FROM location\") locations = cursor.fetchall() #", "request.form['prod_name'] prod_quantity = request.form['prod_quantity'] prod_name = prod_name.capitalize() if prod_name not", "cursor.execute(\"SELECT prod_id, SUM(prod_quantity) FROM logistics where from_loc_id = %s GROUP", "= mysql.connection.cursor() if type_ == 'location': id_ = request.args.get('loc_id') cursor.execute(\"SELECT", "= %s WHERE prod_id = %s\", (prod_name, str(prod_id))) if prod_quantity", "NULL, prod_quantity integer not null, unallocated_quantity integer); \"\"\") # Might", "pianos enter andaman sum_to_loc = (0,) #how much enters andaman", "''.join([str(x[0]) for x in cursor.fetchall() ]) cursor.execute(\"SELECT prod_id FROM products", "not in prod_new: if quantity not in ['', ' ',", "products\") prod_names = cursor.fetchall() prod_new = [] for i in", "machine', 100), (7, 'Microwave', 50)) # x in product -", "= cursor.fetchall() cursor.execute(\"SELECT prod_id, SUM(prod_quantity) FROM logistics where from_loc_id =", "title = \"Warehouse Locations\") @app.route('/product.html', methods=['POST', 'GET']) def product(): init_database()", "database = log_summary) @app.route('/delete') def delete(): # Make sure that", "product(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT * from products\")", "Andaman exists in Piano ka keys, Check if Assam, exists", "i in range(len(prod_names)): prod_new.append(prod_names[i][0]) if request.method == 'POST': prod_name =", "SET unallocated_quantity = unallocated_quantity - %s WHERE prod_name = %s", "= unallocated_quantity - %s WHERE prod_name = %s \"\"\", (quantity,", "request.form['prod_name'] quantity = request.form['prod_quantity'] prod_name = prod_name.capitalize() transaction_allowed = False", "1 = piano, loc_id = 1 = andaman cursor.execute(\"\"\" SELECT", "= andaman cursor.execute(\"\"\" SELECT SUM(log.prod_quantity) FROM logistics log WHERE log.prod_id", "let's see! cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS location(loc_id integer", "(prod_name, quantity, quantity)) mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg", "19 # 26 # 17 log_summary = [] for p_id", "INTEGER NOT NULL, from_loc_id INTEGER NULL, to_loc_id INTEGER NULL, prod_quantity", "decimal class Encoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, decimal.Decimal): return", "request.args.get('loc_id') cursor.execute(\"SELECT prod_id, SUM(prod_quantity) FROM logistics where to_loc_id = %s", "msg = f\"{warehouse_name} added succcessfully\" if msg: print(msg) cursor.close() return", "for x in cursor.fetchall()]) # cursor.fetchall -> ((1,)), x ->", "KEY(to_loc_id) REFERENCES location(loc_id)); \"\"\") mysql.connection.commit() cursor.close() @app.route('/') def summary(): init_database()", "prod_name = request.form['prod_name'] from_loc = request.form['from_loc'] to_loc = request.form['to_loc'] quantity", "methods=['POST', 'GET']) def product(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT", "\"Puducherry\": 0}} if request.method == 'POST': # transaction times are", "prod_new = [] for i in range(len(prod_names)): prod_new.append(prod_names[i][0]) if request.method", "are stored in UTC prod_name = request.form['prod_name'] from_loc = request.form['from_loc']", "- how much leaves andaman = how much remains (allocated)", "location where loc_id = %s\", (id_,)) mysql.connection.commit() cursor.close() return redirect(url_for('location'))", "NULL DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY(prod_id) REFERENCES products(prod_id), FOREIGN KEY(from_loc_id) REFERENCES", "str(p_id,)) temp_prod_name = cursor.fetchone() #print(temp_prod_name) ('Piano',) for l_id in [x[0]", "0)] alloc_json = {} for row in log_summary: try: if", "= cursor.fetchall() cursor.execute(\"SELECT loc_name FROM location\") loc_names = cursor.fetchall() loc_new", "database = q_data) @app.route('/location.html', methods=['POST', 'GET']) def location(): init_database() msg=None", "str(prod_id))) if prod_quantity not in ['', ' ', None] and", "\"Jodhpur\": 0, \"Puducherry\": 0}, # \"Microwave\": {\"Andaman\": 0, \"Assam\": 0,", "FROM location WHERE loc_name = %s\", (to_loc,)) to_loc = ''.join([str(x[0])", "0, \"Puducherry\": 0}, # \"Washing machine\": {\"Andaman\": 0, \"Assam\": 0,", "# Might have to create a trigger, let's see! cursor.execute(\"\"\"", "products\") products = cursor.fetchall() cursor.execute(\"SELECT loc_id, loc_name FROM location\") locations", "loc_new.append(loc_names[i][0]) if request.method == 'POST': warehouse_name = request.form['warehouse_name'] warehouse_name =", "= logistics_data, database = log_summary) @app.route('/delete') def delete(): # Make", "range(len(prod_names)): prod_new.append(prod_names[i][0]) if type_ == 'location' and request.method == 'POST':", "(17, 'Puducherry')) # 20 # 19 # 26 # 17", "for l_id in [x[0] for x in locations]: # print(l_id)", "redirect(url_for('location')) elif type_ == 'product' and request.method == 'POST': prod_id", "else: msg = f\"{warehouse_name} added succcessfully\" if msg: print(msg) cursor.close()", "transaction_allowed=True if transaction_allowed: try: cursor.execute(\"INSERT INTO location(loc_name) VALUES(%s)\", (warehouse_name,)) mysql.connection.commit()", "occured: {e}\" else: msg = f\"{prod_name} added succcessfully\" if msg:", "as e: msg=f\"An error occurred: {e}\" else: msg = \"Transaction", "Warning), MySQLdb.Warning()) as e: msg = f\"An error occured: {e}\"", "1 # 2 # 6 # 7 # print(locations) #", "x in locations]: # print(l_id) # ((20, 'Andaman'), (19, 'Assam'),", "WHERE loc_name = %s\", (to_loc,)) to_loc = ''.join([str(x[0]) for x", "in_place = dict(in_place) out_place = dict(out_place) all_place = {} #Inplace", "%s\", (id_,)) mysql.connection.commit() cursor.close() return redirect(url_for('location')) elif type_ == 'product':", "db['mysql_db'] mysql = MySQL(app) link = {x:x for x in", "FROM logistics log WHERE log.prod_id = %s AND log.from_loc_id =", "title = \"Product Movement\", link=link, trans_message=msg, products=products, locations=locations, allocated =", "e: msg = f\"An error occured: {e}\" else: msg =", "#Check if Andaman exists in Piano ka keys, Check if", "andaman = how much remains (allocated) in andaman # log_summary", "to_loc = request.form['to_loc'] quantity = request.form['quantity'] # if no 'from", "init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT * FROM location ORDER", "= None q_data, warehouse, products = None, None, None cursor", "prod_id = %s\", (prod_name, str(prod_id))) if prod_quantity not in ['',", "an error temp_loc_name = cursor.fetchone() # print(temp_loc_name) - (Andaman,) #e.g.", "#print(temp_prod_name) ('Piano',) for l_id in [x[0] for x in locations]:", "mumbai #out_place = {3:100} - keys - prod_id - fromloc", "cursor.execute(\"SELECT loc_name FROM location\") loc_names = cursor.fetchall() loc_new = []", "\"Jodhpur\": 0, \"Puducherry\": 0}} if request.method == 'POST': # transaction", "loc_name not in ['', ' ', None] and loc_name not", "[x[0] for x in locations]: # print(l_id) # ((20, 'Andaman'),", "warehouse_data = cursor.fetchall() cursor.execute(\"SELECT loc_name FROM location\") loc_names = cursor.fetchall()", "is being shipped to a warehouse (init condition) if from_loc", "l_id)) sum_to_loc = cursor.fetchone() # No.of pianos that enter andaman", "some doubts about the datatypes type_ = request.args.get('type') cursor =", "None]: transaction_allowed= True if transaction_allowed: try: cursor.execute(\"INSERT INTO products(prod_name, prod_quantity,", "products WHERE prod_id = %s\", str(p_id,)) temp_prod_name = cursor.fetchone() #print(temp_prod_name)", "giving an error temp_loc_name = cursor.fetchone() # print(temp_loc_name) - (Andaman,)", "temp_loc_name = cursor.fetchone() # print(temp_loc_name) - (Andaman,) #e.g. prod_id =", "FROM products WHERE prod_name = %s\", (prod_name,)) prod_id = ''.join([str(x[0])", "products SET prod_quantity = %s, unallocated_quantity = unallocated_quantity + %s", "to dict in_place = dict(in_place) out_place = dict(out_place) all_place =", "try: cursor.execute(\"\"\" INSERT INTO logistics(prod_id, to_loc_id, prod_quantity) SELECT products.prod_id, location.loc_id,", "for i in range(len(loc_names)): loc_new.append(loc_names[i][0]) cursor.execute(\"SELECT prod_name FROM products\") prod_names", "- keys - prod_id - toloc = mumbai #out_place =", "location(loc_id), FOREIGN KEY(to_loc_id) REFERENCES location(loc_id)); \"\"\") mysql.connection.commit() cursor.close() @app.route('/') def", "Log\") @app.route('/movement.html', methods=['POST', 'GET']) def movement(): init_database() msg=None cursor =", "andaman cursor.execute(\"\"\" SELECT SUM(log.prod_quantity) FROM logistics log WHERE log.prod_id =", "logistics(prod_id, from_loc_id, to_loc_id, prod_quantity) VALUES(%s, %s, %s, %s) \"\"\", (prod_id,", "'Iphone xr': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0},", "app.config['MYSQL_DB'] = db['mysql_db'] mysql = MySQL(app) link = {x:x for", "leaves andaman = how much remains (allocated) in andaman #", "REFERENCES products(prod_id), FOREIGN KEY(from_loc_id) REFERENCES location(loc_id), FOREIGN KEY(to_loc_id) REFERENCES location(loc_id));", "machine', 'Puducherry', 0), # ('Microwave', 'Andaman', 0), ('Microwave', 'Assam', 0),", "x in [\"location\", \"product\", \"movement\"]} link[\"index\"] = '/' def init_database():", "succcessfully\" if msg: print(msg) cursor.close() return redirect(url_for('location')) return render('location.html', link=link,", "warehouses = warehouse, products = products, database = q_data) @app.route('/location.html',", "if from_loc in [None, '', ' ']: try: cursor.execute(\"\"\" INSERT", "if loc_name not in ['', ' ', None] and loc_name", "loc_name FROM location WHERE loc_id = %s\", (l_id,)) #str(l_id,) giving", "= %s \"\"\", (quantity, prod_name, from_loc)) #Important to maintain consistency", "cursor.execute(\"SELECT prod_id FROM products WHERE prod_name = %s\", (prod_name,)) prod_id", "msg=f\"An error occurred: {e}\" else: msg = \"Transaction added successfully\"", "fridges came to mumbai from kolkata, 100 fridges were sent", "False if warehouse_name not in ['', ' ', None] and", "mysql.connection.cursor() cursor.execute(\"SELECT * from products\") products = cursor.fetchall() cursor.execute(\"SELECT prod_name", "' ', None] and prod_name not in prod_new: cursor.execute(\"SELECT prod_quantity", "def movement(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT * FROM", "No pianos leave andaman sum_from_loc = (0,) if sum_to_loc[0] is", "'Puducherry': 0}, # 'Iphone xr': {'Andaman': 0, 'Assam': 0, 'Jodhpur':", "remains in mumbai which will be unallocated if mumbai is", "if msg: print(msg) cursor.close() return redirect(url_for('product')) return render('product.html', link=link, products", "quantity = request.form['prod_quantity'] prod_name = prod_name.capitalize() transaction_allowed = False if", "# 'Iphone xr': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry':", "Andaman with quantity as a new value in the dictionary", "and prod_name not in prod_new: cursor.execute(\"UPDATE products SET prod_name =", "if request.method == 'POST': warehouse_name = request.form['warehouse_name'] warehouse_name = warehouse_name.capitalize()", "\"\"\") mysql.connection.commit() cursor.close() @app.route('/') def summary(): init_database() msg = None", "True if transaction_allowed: try: cursor.execute(\"INSERT INTO products(prod_name, prod_quantity, unallocated_quantity) VALUES", "warehouse_name = warehouse_name.capitalize() transaction_allowed = False if warehouse_name not in", "(1, 'Piano', 250) # x[0] = 1 # for p_id", "much enters andaman - how much leaves andaman = how", "%s \"\"\", (prod_quantity, prod_quantity, old_prod_quantity, str(prod_id))) mysql.connection.commit() cursor.close() return redirect(url_for('product'))", "prod_name = prod_name.capitalize() transaction_allowed = False if prod_name not in", "(None,) --> (0,) --> No pianos leave andaman sum_from_loc =", "= (0,) if sum_to_loc[0] is None: #No pianos enter andaman", "'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}} alloc_json = json.dumps(alloc_json, cls", "key auto_increment, prod_id INTEGER NOT NULL, from_loc_id INTEGER NULL, to_loc_id", "FROM location\") loc_names = cursor.fetchall() loc_new = [] for i", "- %s WHERE prod_name = %s \"\"\", (quantity, prod_name)) mysql.connection.commit()", "None] and prod_name not in prod_new: if quantity not in", "1 join converts 1 into a string cursor.execute(\"SELECT loc_id FROM", "DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY(prod_id) REFERENCES products(prod_id), FOREIGN KEY(from_loc_id) REFERENCES location(loc_id),", "0}, # 'Washing machine': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0,", "for x in locations]: # print(l_id) # ((20, 'Andaman'), (19,", "#how much enters andaman - how much leaves andaman =", "print(locations) # for l_id in [x[0] for x in locations]:", "FROM products WHERE prod_id = %s\", (id_,)) mysql.connection.commit() cursor.close() return", "request.form['to_loc'] quantity = request.form['quantity'] # if no 'from loc' is", "0, \"Jodhpur\": 0, \"Puducherry\": 0}, # \"Microwave\": {\"Andaman\": 0, \"Assam\":", "sum_from_loc = cursor.fetchone() # No. of pianos that leave andaman", "INTEGER NOT NULL, trans_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, FOREIGN", "Encoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, decimal.Decimal): return str(obj) #", "from_loc_id, to_loc_id, prod_quantity) VALUES(%s, %s, %s, %s) \"\"\", (prod_id, from_loc,", "prod_id\", (id_,)) out_place = cursor.fetchall() #Convert list of tuples to", "SUM(prod_quantity) FROM logistics where from_loc_id = %s GROUP BY prod_id\",", "mumbai all_place[x] = in_place[x] - out_place[x] #2000 fridges came to", "and request.method == 'POST': loc_id = request.form['loc_id'] loc_name = request.form['loc_name']", "sum_from_loc = (0,) if sum_to_loc[0] is None: #No pianos enter", "%s - %s WHERE prod_id = %s \"\"\", (prod_quantity, prod_quantity,", "not in ['', ' ', None] and prod_name not in", "= mysql.connection.cursor() cursor.execute(\"SELECT loc_name FROM location\") loc_names = cursor.fetchall() loc_new", "' ']: print(\"To Location wasn't specified, will be unallocated\") try:", "in out_place.keys(): #calculator left mumbai all_place[x] = in_place[x] - out_place[x]", "{} for row in log_summary: try: if row[1] in alloc_json[row[0]].keys():", "product is being shipped between warehouses else: try: cursor.execute(\"SELECT loc_id", "cursor.close() return redirect(url_for('location')) elif type_ == 'product' and request.method ==", "mumbai is deleted else: all_place[x] = in_place[x] for products_ in", "f\"An error occured: {e}\" else: msg = f\"{warehouse_name} added succcessfully\"", "{e}\" else: msg = \"Transaction added successfully\" # if 'from", "machine', 'Andaman', 0), ('Washing machine', 'Assam', 0), ('Washing machine', 'Jodhpur',", "error occured: {e}\" else: msg = f\"{prod_name} added succcessfully\" if", "FROM products, location WHERE products.prod_name = %s AND location.loc_name =", "default(self, obj): if isinstance(obj, decimal.Decimal): return str(obj) # Setting up", "link[\"index\"] = '/' def init_database(): cursor = mysql.connection.cursor() # Initialise", "Make sure that the queries are working properly....I'm having some", "in range(len(prod_names)): prod_new.append(prod_names[i][0]) if type_ == 'location' and request.method ==", "{x:x for x in [\"location\", \"product\", \"movement\"]} link[\"index\"] = '/'", "# ((20, 'Andaman'), (19, 'Assam'), (26, 'Jodhpur'), (17, 'Puducherry')) #", "trigger, let's see! cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS location(loc_id", "\"Microwave\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}} if", "- toloc = mumbai #out_place = {3:100} - keys -", "#No pianos enter andaman sum_to_loc = (0,) #how much enters", "'Microwave': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}} alloc_json", "in prod_new: if quantity not in ['', ' ', None]:", "None cursor = mysql.connection.cursor() try: cursor.execute(\"Select * from location\") warehouse", "alloc_json[row[0]][row[1]] = row[2] #Add Andaman with quantity as a new", "title = \"Summary\", warehouses = warehouse, products = products, database", "['', ' ', None]: transaction_allowed= True if transaction_allowed: try: cursor.execute(\"INSERT", "= %s \"\"\", (quantity, prod_name, to_loc)) # IMPORTANT to maintain", "for x in cursor.fetchall() ]) cursor.execute(\"\"\" INSERT INTO logistics(prod_id, from_loc_id,", "in ['', ' ', None] and loc_name not in loc_new:", "# \"Iphone xr\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\":", "much leaves andaman = how much remains (allocated) in andaman", "VALUES (%s, %s, %s)\", (prod_name, quantity, quantity)) mysql.connection.commit() except(MySQLdb.Error(not Warning),", "= db['mysql_host'] app.config['MYSQL_USER'] = db['mysql_user'] app.config['MYSQL_PASSWORD'] = db['mysql_password'] app.config['MYSQL_DB'] =", "# 6 # 7 # print(locations) # for l_id in", "KEY(prod_id) REFERENCES products(prod_id), FOREIGN KEY(from_loc_id) REFERENCES location(loc_id), FOREIGN KEY(to_loc_id) REFERENCES", "keys - prod_id - fromloc = mumbai for x in", "products, transaction_message=msg, title=\"Products Log\") @app.route('/movement.html', methods=['POST', 'GET']) def movement(): init_database()", "# IMPORTANT to maintain consistency cursor.execute(\"\"\" UPDATE products SET unallocated_quantity", "products]: cursor.execute(\"SELECT prod_name FROM products WHERE prod_id = %s\", str(p_id,))", "loc_id FROM location WHERE loc_name = %s\", (to_loc,)) to_loc =", "[('Piano', 'Andaman', 0), ('Piano', 'Assam', 0), ('Piano', 'Jodhpur', 0), ('Piano',", "]) cursor.execute(\"\"\" INSERT INTO logistics(prod_id, from_loc_id, to_loc_id, prod_quantity) VALUES(%s, %s,", "0), ('Microwave', 'Jodhpur', 0), ('Microwave', 'Puducherry', 0)] alloc_json = {}", "REFERENCES location(loc_id)); \"\"\") mysql.connection.commit() cursor.close() @app.route('/') def summary(): init_database() msg", "\"Iphone xr\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0},", "= %s WHERE loc_id = %s\", (loc_name, loc_id)) mysql.connection.commit() cursor.close()", "#2000 fridges came to mumbai from kolkata, 100 fridges were", "of piano empty alloc_json[row[0]][row[1]] = row[2] #Add Andaman with quantity", "error occurred: {e}\" else: msg = \"Transaction added successfully\" #Print", "in_place[x] - out_place[x] #2000 fridges came to mumbai from kolkata,", "= unallocated_quantity + %s WHERE prod_name = %s \"\"\", (quantity,", "cursor.execute(\"Select * from products\") products = cursor.fetchall() cursor.execute(\"\"\" SELECT prod_name,", "-> 1 join converts 1 into a string cursor.execute(\"SELECT loc_id", "\"Product Movement\", link=link, trans_message=msg, products=products, locations=locations, allocated = alloc_json, logs", "Flask(__name__) # Configure the database db = yaml.load(open('db.yaml')) app.config['MYSQL_HOST'] =", "location WHERE loc_id = %s\", (l_id,)) #str(l_id,) giving an error", "the add the quantity to the previous quantity else: alloc_json[row[0]][row[1]]", "Encoder) # print(alloc_json) # {\"Piano\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\":", "cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS logistics(trans_id integer primary key", "cursor.fetchone() # No.of pianos that enter andaman cursor.execute(\"\"\" SELECT SUM(log.prod_quantity)", "[] for i in range(len(loc_names)): loc_new.append(loc_names[i][0]) cursor.execute(\"SELECT prod_name FROM products\")", "loc_name = request.form['loc_name'] loc_name = loc_name.capitalize() if loc_name not in", "if transaction_allowed: try: cursor.execute(\"INSERT INTO products(prod_name, prod_quantity, unallocated_quantity) VALUES (%s,", "# cursor.fetchall -> ((1,)), x -> (1,) x[0] -> 1", "successfully\" elif to_loc in [None, '', ' ']: print(\"To Location", "\"\"\") # Might have to create a trigger, let's see!", "INTEGER NULL, to_loc_id INTEGER NULL, prod_quantity INTEGER NOT NULL, trans_time", "unallocated_quantity = unallocated_quantity - %s WHERE prod_name = %s \"\"\",", "elif type_ == 'product' and request.method == 'POST': prod_id =", "and warehouse_name not in loc_new: transaction_allowed=True if transaction_allowed: try: cursor.execute(\"INSERT", "else: msg = \"Transaction added successfully\" # if 'from loc'", "REFERENCES location(loc_id), FOREIGN KEY(to_loc_id) REFERENCES location(loc_id)); \"\"\") mysql.connection.commit() cursor.close() @app.route('/')", "if sum_from_loc[0] is None: #e.g. (None,) --> (0,) --> No", "\"Puducherry\": 0}, # \"Washing machine\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\":", "IMPORTANT to maintain consistency cursor.execute(\"\"\" UPDATE products SET unallocated_quantity =", "loc_name = %s\", (from_loc,)) from_loc = ''.join([str(x[0]) for x in", "FOREIGN KEY(from_loc_id) REFERENCES location(loc_id), FOREIGN KEY(to_loc_id) REFERENCES location(loc_id)); \"\"\") mysql.connection.commit()", "cursor = mysql.connection.cursor() try: cursor.execute(\"Select * from location\") warehouse =", "'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Washing machine': {'Andaman':", "means the product is being shipped to a warehouse (init", "= how much remains (allocated) in andaman # log_summary +=", "for i in range(len(prod_names)): prod_new.append(prod_names[i][0]) if type_ == 'location' and", "= cursor.fetchone() # print(temp_loc_name) - (Andaman,) #e.g. prod_id = 1", "q_data) @app.route('/location.html', methods=['POST', 'GET']) def location(): init_database() msg=None cursor =", "be unallocated if mumbai is deleted else: all_place[x] = in_place[x]", "(Andaman,) #e.g. prod_id = 1 = piano, loc_id = 1", "0), ('Piano', 'Puducherry', 0), # ('Iphone xr', 'Andaman', 0), ('Iphone", "0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Microwave': {'Andaman': 0, 'Assam':", "all_place = {} #Inplace = {1:20, 3:2000} - keys -", "'', ' ']: try: cursor.execute(\"\"\" INSERT INTO logistics(prod_id, to_loc_id, prod_quantity)", "'Jodhpur', 0), ('Piano', 'Puducherry', 0), # ('Iphone xr', 'Andaman', 0),", "dict in_place = dict(in_place) out_place = dict(out_place) all_place = {}", "prod_new: if quantity not in ['', ' ', None]: transaction_allowed=", "flask instance app = Flask(__name__) # Configure the database db", "cursor.execute(\"\"\" UPDATE products SET unallocated_quantity = unallocated_quantity - %s WHERE", "= in_place[x] for products_ in all_place.keys(): cursor.execute(\"\"\" UPDATE products SET", "# {'Piano': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0},", "loc_id = 1 = andaman cursor.execute(\"\"\" SELECT SUM(log.prod_quantity) FROM logistics", "import json import MySQLdb import decimal class Encoder(json.JSONEncoder): def default(self,", "elif type_ == 'product': id_ = request.args.get('prod_id') cursor.execute(\"DELETE FROM products", "== 'POST': loc_id = request.form['loc_id'] loc_name = request.form['loc_name'] loc_name =", "# print(temp_loc_name) - (Andaman,) #e.g. prod_id = 1 = piano,", "- sum_from_loc[0],) )] ORRRRRRRRRRR log_summary.append(temp_prod_name + temp_loc_name + (sum_to_loc[0] -", "FROM products\") products = cursor.fetchall() cursor.execute(\"SELECT loc_id, loc_name FROM location\")", "leave andaman sum_from_loc = (0,) if sum_to_loc[0] is None: #No", "in cursor.fetchall()]) # cursor.fetchall -> ((1,)), x -> (1,) x[0]", "prod_id = %s \"\"\", (prod_quantity, prod_quantity, old_prod_quantity, str(prod_id))) mysql.connection.commit() cursor.close()", "cursor.execute(\"SELECT prod_quantity FROM products WHERE prod_id = %s\", (prod_id,)) old_prod_quantity", "['', ' ', None] and prod_name not in prod_new: if", "unallocated_quantity FROM products\") products = cursor.fetchall() cursor.execute(\"SELECT loc_id, loc_name FROM", "i in range(len(loc_names)): loc_new.append(loc_names[i][0]) if request.method == 'POST': warehouse_name =", "in [\"location\", \"product\", \"movement\"]} link[\"index\"] = '/' def init_database(): cursor", "return redirect(url_for('location')) return render('location.html', link=link, warehouses=warehouse_data, transaction_message=msg, title = \"Warehouse", "location WHERE loc_name = %s\", (to_loc,)) to_loc = ''.join([str(x[0]) for", "xr': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, #", "#Convert list of tuples to dict in_place = dict(in_place) out_place", "sure that the queries are working properly....I'm having some doubts", "FOREIGN KEY(prod_id) REFERENCES products(prod_id), FOREIGN KEY(from_loc_id) REFERENCES location(loc_id), FOREIGN KEY(to_loc_id)", "if isinstance(obj, decimal.Decimal): return str(obj) # Setting up the flask", "a warehouse (init condition) if from_loc in [None, '', '", "redirect(url_for('product')) return render('product.html', link=link, products = products, transaction_message=msg, title=\"Products Log\")", "cursor = mysql.connection.cursor() cursor.execute(\"SELECT * FROM location ORDER BY loc_id\")", "log WHERE log.prod_id = %s AND log.from_loc_id = %s \"\"\",", "else: alloc_json[row[0]][row[1]] = row[2] #If no, add it as a", "mysql.connection.cursor() # Initialise all tables cursor.execute(\"\"\" CREATE TABLE IF NOT", "'Microwave', 50)) # x in product - (1, 'Piano', 250)", "about the datatypes type_ = request.args.get('type') cursor = mysql.connection.cursor() if", "db['mysql_host'] app.config['MYSQL_USER'] = db['mysql_user'] app.config['MYSQL_PASSWORD'] = db['mysql_password'] app.config['MYSQL_DB'] = db['mysql_db']", "{'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Microwave':", "products = cursor.fetchall() cursor.execute(\"\"\" SELECT prod_name, unallocated_quantity, prod_quantity FROM products", "\"\"\", (prod_id, from_loc, to_loc, quantity)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as e:", "in ['', ' ', None] and warehouse_name not in loc_new:", "CREATE TABLE IF NOT EXISTS location(loc_id integer primary key auto_increment,", "in_place = cursor.fetchall() cursor.execute(\"SELECT prod_id, SUM(prod_quantity) FROM logistics where from_loc_id", "@app.route('/location.html', methods=['POST', 'GET']) def location(): init_database() msg=None cursor = mysql.connection.cursor()", "in ['', ' ', None]: transaction_allowed= True if transaction_allowed: try:", "cursor.fetchall() ]) cursor.execute(\"\"\" INSERT INTO logistics(prod_id, from_loc_id, to_loc_id, prod_quantity) VALUES(%s,", "-> (1,) x[0] -> 1 join converts 1 into a", "except(MySQLdb.Error, MySQLdb.Warning) as e: msg=f\"An error occurred: {e}\" else: msg", "methods=['POST', 'GET']) def location(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT", "unallocated if mumbai is deleted else: all_place[x] = in_place[x] for", "= {3:100} - keys - prod_id - fromloc = mumbai", "products_ in all_place.keys(): cursor.execute(\"\"\" UPDATE products SET unallocated_quantity = unallocated_quantity", "print(\"To Location wasn't specified, will be unallocated\") try: cursor.execute(\"\"\" INSERT", "%s \"\"\", (all_place[products_], products_)) cursor.execute(\"DELETE FROM location where loc_id =", "cursor.fetchall() cursor.execute(\"SELECT prod_id, prod_name, unallocated_quantity FROM products\") products = cursor.fetchall()", "request.form['loc_id'] loc_name = request.form['loc_name'] loc_name = loc_name.capitalize() if loc_name not", "= request.form['loc_id'] loc_name = request.form['loc_name'] loc_name = loc_name.capitalize() if loc_name", "(KeyError, TypeError): alloc_json[row[0]] = {} #Make the value of piano", "loc_id)) mysql.connection.commit() cursor.close() return redirect(url_for('location')) elif type_ == 'product' and", "[x[0] for x in products]: # print(p_id) # 1 #", "prod_name not in prod_new: cursor.execute(\"SELECT prod_quantity FROM products WHERE prod_id", "NULL, to_loc_id INTEGER NULL, prod_quantity INTEGER NOT NULL, trans_time TIMESTAMP", "NOT NULL, trans_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY(prod_id)", "location.loc_name = %s \"\"\", (quantity, prod_name, from_loc)) #Important to maintain", "transaction_allowed= True if transaction_allowed: try: cursor.execute(\"INSERT INTO products(prod_name, prod_quantity, unallocated_quantity)", "'POST': prod_name = request.form['prod_name'] quantity = request.form['prod_quantity'] prod_name = prod_name.capitalize()", "for i in range(len(loc_names)): loc_new.append(loc_names[i][0]) if request.method == 'POST': warehouse_name", "for x in products]: # print(p_id) # 1 # 2", "cursor.fetchall() cursor.execute(\"SELECT loc_name FROM location\") loc_names = cursor.fetchall() loc_new =", "{e}\" print(msg) cursor.close() return render('index.html',link=link, title = \"Summary\", warehouses =", "auto_increment, prod_id INTEGER NOT NULL, from_loc_id INTEGER NULL, to_loc_id INTEGER", "request.form['product_id'] prod_name = request.form['prod_name'] prod_quantity = request.form['prod_quantity'] prod_name = prod_name.capitalize()", "%s AND location.loc_name = %s \"\"\", (quantity, prod_name, to_loc)) #", "0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Microwave': {'Andaman':", "(Piano,) + (Andaman,), (0,) = ('Piano', 'Andaman', 0) #print(log_summary) #", "products WHERE prod_name = %s\", (prod_name,)) prod_id = ''.join([str(x[0]) for", "0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}} alloc_json = json.dumps(alloc_json,", "\"Jodhpur\": 0, \"Puducherry\": 0}, # \"Washing machine\": {\"Andaman\": 0, \"Assam\":", "msg = f\"An error occured: {e}\" print(msg) cursor.close() return render('index.html',link=link,", "andaman # print(sum_from_loc) if sum_from_loc[0] is None: #e.g. (None,) -->", "%s, unallocated_quantity = unallocated_quantity + %s - %s WHERE prod_id", "having some doubts about the datatypes type_ = request.args.get('type') cursor", "= %s\", (prod_name, str(prod_id))) if prod_quantity not in ['', '", "INSERT INTO logistics(prod_id, from_loc_id, prod_quantity) SELECT products.prod_id, location.loc_id, %s FROM", "SUM(prod_quantity) FROM logistics where to_loc_id = %s GROUP BY prod_id\",", "{\"Piano\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}, #", "primary key auto_increment, loc_name varchar(20) unique not null); \"\"\") cursor.execute(\"\"\"", "%s WHERE loc_id = %s\", (loc_name, loc_id)) mysql.connection.commit() cursor.close() return", "1 = andaman cursor.execute(\"\"\" SELECT SUM(log.prod_quantity) FROM logistics log WHERE", "yes, the add the quantity to the previous quantity else:", "IF NOT EXISTS location(loc_id integer primary key auto_increment, loc_name varchar(20)", "entered mumbai if x in out_place.keys(): #calculator left mumbai all_place[x]", "# 1 # 2 # 6 # 7 # print(locations)", "warehouse = cursor.fetchall() cursor.execute(\"Select * from products\") products = cursor.fetchall()", "from_loc, to_loc, quantity)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as e: msg=f\"An error", "products SET unallocated_quantity = unallocated_quantity + %s WHERE prod_name =", "= json.dumps(alloc_json, cls = Encoder) # print(alloc_json) # {\"Piano\": {\"Andaman\":", "{\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}} if request.method", "enters andaman - how much leaves andaman = how much", "logistics(prod_id, from_loc_id, prod_quantity) SELECT products.prod_id, location.loc_id, %s FROM products, location", "temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],) )] ORRRRRRRRRRR log_summary.append(temp_prod_name + temp_loc_name", "'Puducherry', 0), # ('Microwave', 'Andaman', 0), ('Microwave', 'Assam', 0), ('Microwave',", "# 26 # 17 log_summary = [] for p_id in", "'Piano', 250) # x[0] = 1 # for p_id in", "INSERT INTO logistics(prod_id, to_loc_id, prod_quantity) SELECT products.prod_id, location.loc_id, %s FROM", "= None, None, None cursor = mysql.connection.cursor() try: cursor.execute(\"Select *", "added successfully\" #Print a transaction message if exists! if msg:", "and 'to_loc' given the product is being shipped between warehouses", "', None] and loc_name not in loc_new: cursor.execute(\"UPDATE location SET", "SET prod_name = %s WHERE prod_id = %s\", (prod_name, str(prod_id)))", "cursor.fetchall() cursor.execute(\"SELECT prod_name FROM products\") prod_names = cursor.fetchall() prod_new =", "# for p_id in [x[0] for x in products]: #", "products = products, transaction_message=msg, title=\"Products Log\") @app.route('/movement.html', methods=['POST', 'GET']) def", "Piano ka keys, etc. alloc_json[row[0]][row[1]] += row[2] #If yes, the", "link = {x:x for x in [\"location\", \"product\", \"movement\"]} link[\"index\"]", "AND log.from_loc_id = %s \"\"\", (p_id, l_id)) sum_from_loc = cursor.fetchone()", "transaction_allowed = False if warehouse_name not in ['', ' ',", "in [x[0] for x in products]: # print(p_id) # 1", "msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT * from products\") products =", "str(prod_id))) mysql.connection.commit() cursor.close() return redirect(url_for('product')) return render(url_for(type_)) if __name__ ==", "return redirect(url_for('location')) elif type_ == 'product' and request.method == 'POST':", "= dict(out_place) all_place = {} #Inplace = {1:20, 3:2000} -", "FROM logistics log WHERE log.prod_id = %s AND log.to_loc_id =", "if warehouse_name not in ['', ' ', None] and warehouse_name", "cursor.fetchall -> ((1,)), x -> (1,) x[0] -> 1 join", "mumbai for x in in_place.keys(): #calculator entered mumbai if x", "print(msg) cursor.close() return redirect(url_for('product')) return render('product.html', link=link, products = products,", "(loc_name, loc_id)) mysql.connection.commit() cursor.close() return redirect(url_for('location')) elif type_ == 'product'", "None, None, None cursor = mysql.connection.cursor() try: cursor.execute(\"Select * from", "print(p_id) # 1 # 2 # 6 # 7 #", "app.config['MYSQL_PASSWORD'] = db['mysql_password'] app.config['MYSQL_DB'] = db['mysql_db'] mysql = MySQL(app) link", "'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Iphone xr': {'Andaman':", "= request.form['prod_name'] from_loc = request.form['from_loc'] to_loc = request.form['to_loc'] quantity =", "cursor.fetchone()[0] cursor.execute(\"\"\" UPDATE products SET prod_quantity = %s, unallocated_quantity =", "prod_id - fromloc = mumbai for x in in_place.keys(): #calculator", "BY prod_id\", (id_,)) in_place = cursor.fetchall() cursor.execute(\"SELECT prod_id, SUM(prod_quantity) FROM", "== 'POST': warehouse_name = request.form['warehouse_name'] warehouse_name = warehouse_name.capitalize() transaction_allowed =", "' ', None] and prod_name not in prod_new: cursor.execute(\"UPDATE products", "in log_summary: try: if row[1] in alloc_json[row[0]].keys(): #Check if Andaman", "[\"location\", \"product\", \"movement\"]} link[\"index\"] = '/' def init_database(): cursor =", "prod_name, to_loc)) # IMPORTANT to maintain consistency cursor.execute(\"\"\" UPDATE products", "0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}} if request.method ==", "return redirect(url_for('product')) @app.route('/edit', methods=['POST', 'GET']) def edit(): # Try capitalize()", "- prod_id - fromloc = mumbai for x in in_place.keys():", "0, \"Puducherry\": 0}, # \"Iphone xr\": {\"Andaman\": 0, \"Assam\": 0,", "Location wasn't specified, will be unallocated\") try: cursor.execute(\"\"\" INSERT INTO", "in prod_new: cursor.execute(\"SELECT prod_quantity FROM products WHERE prod_id = %s\",", "loc_new = [] for i in range(len(loc_names)): loc_new.append(loc_names[i][0]) if request.method", "def delete(): # Make sure that the queries are working", "== 'POST': prod_name = request.form['prod_name'] quantity = request.form['prod_quantity'] prod_name =", "Assam, exists in Piano ka keys, etc. alloc_json[row[0]][row[1]] += row[2]", "request.args.get('type') cursor = mysql.connection.cursor() cursor.execute(\"SELECT loc_name FROM location\") loc_names =", "= mysql.connection.cursor() cursor.execute(\"SELECT * from products\") products = cursor.fetchall() cursor.execute(\"SELECT", "location\") locations = cursor.fetchall() # products - ((1, 'Piano', 250),", "redirect(url_for('location')) elif type_ == 'product': id_ = request.args.get('prod_id') cursor.execute(\"DELETE FROM", "cursor.execute(\"\"\" INSERT INTO logistics(prod_id, to_loc_id, prod_quantity) SELECT products.prod_id, location.loc_id, %s", "(from_loc,)) from_loc = ''.join([str(x[0]) for x in cursor.fetchall()]) # cursor.fetchall", "alloc_json = json.dumps(alloc_json, cls = Encoder) # print(alloc_json) # {\"Piano\":", "= loc_name.capitalize() if loc_name not in ['', ' ', None]", "FOREIGN KEY(to_loc_id) REFERENCES location(loc_id)); \"\"\") mysql.connection.commit() cursor.close() @app.route('/') def summary():", "+ (sum_to_loc[0] - sum_from_loc[0],)) # (Piano,) + (Andaman,), (0,) =", "machine': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, #", "''.join([str(x[0]) for x in cursor.fetchall() ]) cursor.execute(\"\"\" INSERT INTO logistics(prod_id,", "NOT NULL, from_loc_id INTEGER NULL, to_loc_id INTEGER NULL, prod_quantity INTEGER", "toloc = mumbai #out_place = {3:100} - keys - prod_id", "(%s, %s, %s)\", (prod_name, quantity, quantity)) mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning())", "specified, will be unallocated\") try: cursor.execute(\"\"\" INSERT INTO logistics(prod_id, from_loc_id,", "is None: #e.g. (None,) --> (0,) --> No pianos leave", "cursor.close() return redirect(url_for('movement')) return render('movement.html', title = \"Product Movement\", link=link,", "= row[2] #Add Andaman with quantity as a new value", "cursor.close() return redirect(url_for('location')) return render('location.html', link=link, warehouses=warehouse_data, transaction_message=msg, title =", "loc_id = %s\", (id_,)) mysql.connection.commit() cursor.close() return redirect(url_for('location')) elif type_", "not in ['', ' ', None] and warehouse_name not in", "except (KeyError, TypeError): alloc_json[row[0]] = {} #Make the value of", "prod_id - toloc = mumbai #out_place = {3:100} - keys", "for x in [\"location\", \"product\", \"movement\"]} link[\"index\"] = '/' def", "to_loc, quantity)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as e: msg=f\"An error occurred:", "= [] for p_id in [x[0] for x in products]:", "0, \"Jodhpur\": 0, \"Puducherry\": 0}, # \"Washing machine\": {\"Andaman\": 0,", "False if prod_name not in ['', ' ', None] and", "into a string cursor.execute(\"SELECT loc_id FROM location WHERE loc_name =", "string cursor.execute(\"SELECT loc_id FROM location WHERE loc_name = %s\", (to_loc,))", "'POST': # transaction times are stored in UTC prod_name =", "transaction_message=msg, title=\"Products Log\") @app.route('/movement.html', methods=['POST', 'GET']) def movement(): init_database() msg=None", "primary key auto_increment, prod_id INTEGER NOT NULL, from_loc_id INTEGER NULL,", "value in the dictionary #print(alloc_json) # {'Piano': {'Andaman': 0, 'Assam':", "('Iphone xr', 'Andaman', 0), ('Iphone xr', 'Assam', 0), ('Iphone xr',", "for row in log_summary: try: if row[1] in alloc_json[row[0]].keys(): #Check", "0), ('Iphone xr', 'Puducherry', 0), # ('Washing machine', 'Andaman', 0),", "e: msg = f\"An error occured: {e}\" print(msg) cursor.close() return", "x[0] -> 1 join converts 1 into a string cursor.execute(\"SELECT", "Setting up the flask instance app = Flask(__name__) # Configure", "str(obj) # Setting up the flask instance app = Flask(__name__)", "= %s, unallocated_quantity = unallocated_quantity + %s - %s WHERE", "'Jodhpur': 0, 'Puducherry': 0}, # 'Microwave': {'Andaman': 0, 'Assam': 0,", "mysql.connection.commit() cursor.close() @app.route('/') def summary(): init_database() msg = None q_data,", "cursor.execute(\"DELETE FROM location where loc_id = %s\", (id_,)) mysql.connection.commit() cursor.close()", "cursor.execute(\"SELECT loc_id FROM location WHERE loc_name = %s\", (from_loc,)) from_loc", "not null); \"\"\") cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS logistics(trans_id", "unallocated\") try: cursor.execute(\"\"\" INSERT INTO logistics(prod_id, from_loc_id, prod_quantity) SELECT products.prod_id,", "where from_loc_id = %s GROUP BY prod_id\", (id_,)) out_place =", "warehouse_name.capitalize() transaction_allowed = False if warehouse_name not in ['', '", "0, \"Jodhpur\": 0, \"Puducherry\": 0}, # \"Iphone xr\": {\"Andaman\": 0,", "properly....I'm having some doubts about the datatypes type_ = request.args.get('type')", "log.from_loc_id = %s \"\"\", (p_id, l_id)) sum_from_loc = cursor.fetchone() #", "cursor.execute(\"INSERT INTO products(prod_name, prod_quantity, unallocated_quantity) VALUES (%s, %s, %s)\", (prod_name,", "app = Flask(__name__) # Configure the database db = yaml.load(open('db.yaml'))", "products - ((1, 'Piano', 250), (2, 'Iphone xr', 600), (6,", "WHERE prod_id = %s\", str(p_id,)) temp_prod_name = cursor.fetchone() #print(temp_prod_name) ('Piano',)", "return str(obj) # Setting up the flask instance app =", "prod_name FROM products WHERE prod_id = %s\", str(p_id,)) temp_prod_name =", "= %s GROUP BY prod_id\", (id_,)) in_place = cursor.fetchall() cursor.execute(\"SELECT", "if type_ == 'location' and request.method == 'POST': loc_id =", "-> ((1,)), x -> (1,) x[0] -> 1 join converts", "a trigger, let's see! cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS", "auto_increment, loc_name varchar(20) unique not null); \"\"\") cursor.execute(\"\"\" CREATE TABLE", "location\") loc_names = cursor.fetchall() loc_new = [] for i in", "+ temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],) )] ORRRRRRRRRRR log_summary.append(temp_prod_name +", "products SET unallocated_quantity = unallocated_quantity + %s WHERE prod_id =", "row[1] in alloc_json[row[0]].keys(): #Check if Andaman exists in Piano ka", "# ('Microwave', 'Andaman', 0), ('Microwave', 'Assam', 0), ('Microwave', 'Jodhpur', 0),", "'GET']) def movement(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT *", "if sum_to_loc[0] is None: #No pianos enter andaman sum_to_loc =", "%s\", (prod_name, str(prod_id))) if prod_quantity not in ['', ' ',", "BY prod_id\", (id_,)) out_place = cursor.fetchall() #Convert list of tuples", "(allocated) in andaman # log_summary += [(temp_prod_name + temp_loc_name +", "request.args.get('type') cursor = mysql.connection.cursor() if type_ == 'location': id_ =", "{e}\" else: msg = f\"{prod_name} added succcessfully\" if msg: print(msg)", "Might have to create a trigger, let's see! cursor.execute(\"\"\" CREATE", "(19, 'Assam'), (26, 'Jodhpur'), (17, 'Puducherry')) # 20 # 19", "loc_id, loc_name FROM location\") locations = cursor.fetchall() # products -", "ka keys, etc. alloc_json[row[0]][row[1]] += row[2] #If yes, the add", "prod_id INTEGER NOT NULL, from_loc_id INTEGER NULL, to_loc_id INTEGER NULL,", "0}, # 'Microwave': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry':", "'from loc' and 'to_loc' given the product is being shipped", "except (MySQLdb.Error, MySQLdb.Warning) as e: msg = f\"An error occured:", "in UTC prod_name = request.form['prod_name'] from_loc = request.form['from_loc'] to_loc =", "TypeError): alloc_json[row[0]] = {} #Make the value of piano empty", "SET unallocated_quantity = unallocated_quantity + %s WHERE prod_id = %s", "no 'from loc' is given, that means the product is", "((1, 'Piano', 250), (2, 'Iphone xr', 600), (6, 'Washing machine',", "the datatypes type_ = request.args.get('type') cursor = mysql.connection.cursor() if type_", "# 19 # 26 # 17 log_summary = [] for", "[(temp_prod_name + temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],) )] ORRRRRRRRRRR log_summary.append(temp_prod_name", "in [x[0] for x in locations]: # print(l_id) # ((20,", "if Andaman exists in Piano ka keys, Check if Assam,", "def edit(): # Try capitalize() type_ = request.args.get('type') cursor =", "cursor.fetchall() cursor.execute(\"\"\" SELECT prod_name, unallocated_quantity, prod_quantity FROM products \"\"\") q_data", "1 # for p_id in [x[0] for x in products]:", "title=\"Products Log\") @app.route('/movement.html', methods=['POST', 'GET']) def movement(): init_database() msg=None cursor", "0), ('Microwave', 'Puducherry', 0)] alloc_json = {} for row in", "alloc_json[row[0]][row[1]] += row[2] #If yes, the add the quantity to", "FROM logistics\") logistics_data = cursor.fetchall() cursor.execute(\"SELECT prod_id, prod_name, unallocated_quantity FROM", "quantity else: alloc_json[row[0]][row[1]] = row[2] #If no, add it as", "(prod_name, str(prod_id))) if prod_quantity not in ['', ' ', None]", "0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Iphone xr':", "MySQLdb import decimal class Encoder(json.JSONEncoder): def default(self, obj): if isinstance(obj,", "# Configure the database db = yaml.load(open('db.yaml')) app.config['MYSQL_HOST'] = db['mysql_host']", "log WHERE log.prod_id = %s AND log.to_loc_id = %s \"\"\",", "= %s\", (prod_id,)) old_prod_quantity = cursor.fetchone()[0] cursor.execute(\"\"\" UPDATE products SET", "mysql.connection.cursor() cursor.execute(\"SELECT * FROM logistics\") logistics_data = cursor.fetchall() cursor.execute(\"SELECT prod_id,", "prod_name, from_loc)) #Important to maintain consistency cursor.execute(\"\"\" UPDATE products SET", "= %s \"\"\", (prod_quantity, prod_quantity, old_prod_quantity, str(prod_id))) mysql.connection.commit() cursor.close() return", "if 'from loc' and 'to_loc' given the product is being", "prod_name not in prod_new: if quantity not in ['', '", "temp_prod_name = cursor.fetchone() #print(temp_prod_name) ('Piano',) for l_id in [x[0] for", "WHERE log.prod_id = %s AND log.to_loc_id = %s \"\"\", (p_id,", "the dictionary #print(alloc_json) # {'Piano': {'Andaman': 0, 'Assam': 0, 'Jodhpur':", "given, that means the product is being shipped to a", "+ %s WHERE prod_name = %s \"\"\", (quantity, prod_name)) mysql.connection.commit()", "product is being shipped to a warehouse (init condition) if", "@app.route('/movement.html', methods=['POST', 'GET']) def movement(): init_database() msg=None cursor = mysql.connection.cursor()", "warehouses else: try: cursor.execute(\"SELECT loc_id FROM location WHERE loc_name =", "id_ = request.args.get('prod_id') cursor.execute(\"DELETE FROM products WHERE prod_id = %s\",", "0, \"Jodhpur\": 0, \"Puducherry\": 0}} if request.method == 'POST': #", "x in cursor.fetchall() ]) cursor.execute(\"\"\" INSERT INTO logistics(prod_id, from_loc_id, to_loc_id,", "maintain consistency cursor.execute(\"\"\" UPDATE products SET unallocated_quantity = unallocated_quantity -", "products=products, locations=locations, allocated = alloc_json, logs = logistics_data, database =", "', None] and prod_name not in prod_new: if quantity not", "\"Puducherry\": 0}, # \"Iphone xr\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\":", "#Make the value of piano empty alloc_json[row[0]][row[1]] = row[2] #Add", "loc_name = %s\", (to_loc,)) to_loc = ''.join([str(x[0]) for x in", "NOT EXISTS logistics(trans_id integer primary key auto_increment, prod_id INTEGER NOT", "cursor.fetchall() # products - ((1, 'Piano', 250), (2, 'Iphone xr',", "products = None, None, None cursor = mysql.connection.cursor() try: cursor.execute(\"Select", "== 'POST': # transaction times are stored in UTC prod_name", "FROM products WHERE prod_id = %s\", (prod_id,)) old_prod_quantity = cursor.fetchone()[0]", "x in product - (1, 'Piano', 250) # x[0] =", "sum_from_loc[0],)) # (Piano,) + (Andaman,), (0,) = ('Piano', 'Andaman', 0)", "= %s\", str(p_id,)) temp_prod_name = cursor.fetchone() #print(temp_prod_name) ('Piano',) for l_id", "allocated = alloc_json, logs = logistics_data, database = log_summary) @app.route('/delete')", "\"Washing machine\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0},", "#Print a transaction message if exists! if msg: print(msg) cursor.close()", "%s WHERE prod_id = %s \"\"\", (prod_quantity, prod_quantity, old_prod_quantity, str(prod_id)))", "render('product.html', link=link, products = products, transaction_message=msg, title=\"Products Log\") @app.route('/movement.html', methods=['POST',", "import MySQLdb import decimal class Encoder(json.JSONEncoder): def default(self, obj): if", "= f\"{prod_name} added succcessfully\" if msg: print(msg) cursor.close() return redirect(url_for('product'))", "= cursor.fetchall() #Convert list of tuples to dict in_place =", "(sum_to_loc[0] - sum_from_loc[0],)) # (Piano,) + (Andaman,), (0,) = ('Piano',", "therefore, 1900 remains in mumbai which will be unallocated if", "['', ' ', None] and warehouse_name not in loc_new: transaction_allowed=True", "0), ('Piano', 'Assam', 0), ('Piano', 'Jodhpur', 0), ('Piano', 'Puducherry', 0),", "type_ == 'product': id_ = request.args.get('prod_id') cursor.execute(\"DELETE FROM products WHERE", "0), ('Washing machine', 'Puducherry', 0), # ('Microwave', 'Andaman', 0), ('Microwave',", "IF NOT EXISTS logistics(trans_id integer primary key auto_increment, prod_id INTEGER", "+ %s WHERE prod_id = %s \"\"\", (all_place[products_], products_)) cursor.execute(\"DELETE", "600), (6, 'Washing machine', 100), (7, 'Microwave', 50)) # x", "loc_names = cursor.fetchall() loc_new = [] for i in range(len(loc_names)):", "all_place[x] = in_place[x] for products_ in all_place.keys(): cursor.execute(\"\"\" UPDATE products", "location\") warehouse = cursor.fetchall() cursor.execute(\"Select * from products\") products =", "fromloc = mumbai for x in in_place.keys(): #calculator entered mumbai", "= cursor.fetchone() #print(temp_prod_name) ('Piano',) for l_id in [x[0] for x", "the previous quantity else: alloc_json[row[0]][row[1]] = row[2] #If no, add", "#calculator entered mumbai if x in out_place.keys(): #calculator left mumbai", "quantity = request.form['quantity'] # if no 'from loc' is given,", "keys, Check if Assam, exists in Piano ka keys, etc.", "except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg = f\"An error occured:", "condition) if from_loc in [None, '', ' ']: try: cursor.execute(\"\"\"", "not in loc_new: transaction_allowed=True if transaction_allowed: try: cursor.execute(\"INSERT INTO location(loc_name)", "request.form['loc_name'] loc_name = loc_name.capitalize() if loc_name not in ['', '", "as a new quantity except (KeyError, TypeError): alloc_json[row[0]] = {}", "log_summary) @app.route('/delete') def delete(): # Make sure that the queries", "were sent to daman diu, therefore, 1900 remains in mumbai", "l_id in [x[0] for x in locations]: # print(l_id) #", "to_loc_id, prod_quantity) VALUES(%s, %s, %s, %s) \"\"\", (prod_id, from_loc, to_loc,", "= \"Transaction added successfully\" # if 'from loc' and 'to_loc'", "i in range(len(loc_names)): loc_new.append(loc_names[i][0]) cursor.execute(\"SELECT prod_name FROM products\") prod_names =", "yaml import json import MySQLdb import decimal class Encoder(json.JSONEncoder): def", "in andaman # log_summary += [(temp_prod_name + temp_loc_name + (sum_to_loc[0]", "0, 'Jodhpur': 0, 'Puducherry': 0}} alloc_json = json.dumps(alloc_json, cls =", "# if 'from loc' and 'to_loc' given the product is", "= products, database = q_data) @app.route('/location.html', methods=['POST', 'GET']) def location():", "# print(alloc_json) # {\"Piano\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0,", "alloc_json[row[0]][row[1]] = row[2] #If no, add it as a new", "= db['mysql_password'] app.config['MYSQL_DB'] = db['mysql_db'] mysql = MySQL(app) link =", "integer primary key auto_increment, prod_id INTEGER NOT NULL, from_loc_id INTEGER", "x in in_place.keys(): #calculator entered mumbai if x in out_place.keys():", "SELECT prod_name, unallocated_quantity, prod_quantity FROM products \"\"\") q_data = cursor.fetchall()", "value of piano empty alloc_json[row[0]][row[1]] = row[2] #Add Andaman with", "'Jodhpur', 0), ('Iphone xr', 'Puducherry', 0), # ('Washing machine', 'Andaman',", "# log_summary += [(temp_prod_name + temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],)", "see! cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS location(loc_id integer primary", "remains (allocated) in andaman # log_summary += [(temp_prod_name + temp_loc_name", "msg = \"Transaction added successfully\" elif to_loc in [None, '',", "# print(p_id) # 1 # 2 # 6 # 7", "f\"{warehouse_name} added succcessfully\" if msg: print(msg) cursor.close() return redirect(url_for('location')) return", "{'Piano': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, #", "not in ['', ' ', None] and loc_name not in", "FROM products\") prod_names = cursor.fetchall() prod_new = [] for i", "0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}, # \"Microwave\": {\"Andaman\":", "link=link, warehouses=warehouse_data, transaction_message=msg, title = \"Warehouse Locations\") @app.route('/product.html', methods=['POST', 'GET'])", "= 1 # for p_id in [x[0] for x in", "= prod_name.capitalize() transaction_allowed = False if prod_name not in ['',", "cursor = mysql.connection.cursor() if type_ == 'location': id_ = request.args.get('loc_id')", "if row[1] in alloc_json[row[0]].keys(): #Check if Andaman exists in Piano", "and request.method == 'POST': prod_id = request.form['product_id'] prod_name = request.form['prod_name']", "warehouses=warehouse_data, transaction_message=msg, title = \"Warehouse Locations\") @app.route('/product.html', methods=['POST', 'GET']) def", "\"Transaction added successfully\" elif to_loc in [None, '', ' ']:", "(0,) --> No pianos leave andaman sum_from_loc = (0,) if", "('Iphone xr', 'Assam', 0), ('Iphone xr', 'Jodhpur', 0), ('Iphone xr',", "error occured: {e}\" else: msg = \"Transaction added successfully\" elif", "join converts 1 into a string cursor.execute(\"SELECT loc_id FROM location", "No.of pianos that enter andaman cursor.execute(\"\"\" SELECT SUM(log.prod_quantity) FROM logistics", "0}} if request.method == 'POST': # transaction times are stored", "pianos leave andaman sum_from_loc = (0,) if sum_to_loc[0] is None:", "all_place.keys(): cursor.execute(\"\"\" UPDATE products SET unallocated_quantity = unallocated_quantity + %s", "logs = logistics_data, database = log_summary) @app.route('/delete') def delete(): #", "and prod_name not in prod_new: if quantity not in ['',", "with quantity as a new value in the dictionary #print(alloc_json)", "UPDATE products SET unallocated_quantity = unallocated_quantity + %s WHERE prod_id", "location.loc_name = %s \"\"\", (quantity, prod_name, to_loc)) # IMPORTANT to", "WHERE prod_name = %s\", (prod_name,)) prod_id = ''.join([str(x[0]) for x", "app.config['MYSQL_USER'] = db['mysql_user'] app.config['MYSQL_PASSWORD'] = db['mysql_password'] app.config['MYSQL_DB'] = db['mysql_db'] mysql", "WHERE prod_name = %s \"\"\", (quantity, prod_name)) mysql.connection.commit() except (MySQLdb.Error,", "exists in Piano ka keys, Check if Assam, exists in", "= \"Product Movement\", link=link, trans_message=msg, products=products, locations=locations, allocated = alloc_json,", "\"Puducherry\": 0}, # \"Microwave\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0,", "\"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}} if request.method == 'POST':", "as e: msg = f\"An error occured: {e}\" print(msg) cursor.close()", "prod_quantity, old_prod_quantity, str(prod_id))) mysql.connection.commit() cursor.close() return redirect(url_for('product')) return render(url_for(type_)) if", "prod_id, SUM(prod_quantity) FROM logistics where from_loc_id = %s GROUP BY", "Initialise all tables cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS products(prod_id", "trans_message=msg, products=products, locations=locations, allocated = alloc_json, logs = logistics_data, database", "6 # 7 # print(locations) # for l_id in [x[0]", "cursor.execute(\"\"\" INSERT INTO logistics(prod_id, from_loc_id, prod_quantity) SELECT products.prod_id, location.loc_id, %s", "type_ == 'location': id_ = request.args.get('loc_id') cursor.execute(\"SELECT prod_id, SUM(prod_quantity) FROM", "range(len(loc_names)): loc_new.append(loc_names[i][0]) cursor.execute(\"SELECT prod_name FROM products\") prod_names = cursor.fetchall() prod_new", "prod_quantity FROM products \"\"\") q_data = cursor.fetchall() except(MySQLdb.Error(not Warning), MySQLdb.Warning())", "is deleted else: all_place[x] = in_place[x] for products_ in all_place.keys():", "xr', 600), (6, 'Washing machine', 100), (7, 'Microwave', 50)) #", "msg = None q_data, warehouse, products = None, None, None", "from products\") products = cursor.fetchall() cursor.execute(\"\"\" SELECT prod_name, unallocated_quantity, prod_quantity", "of pianos that leave andaman # print(sum_from_loc) if sum_from_loc[0] is", "range(len(loc_names)): loc_new.append(loc_names[i][0]) if request.method == 'POST': warehouse_name = request.form['warehouse_name'] warehouse_name", "FROM logistics where from_loc_id = %s GROUP BY prod_id\", (id_,))", "out_place.keys(): #calculator left mumbai all_place[x] = in_place[x] - out_place[x] #2000", "cursor.fetchone() # print(temp_loc_name) - (Andaman,) #e.g. prod_id = 1 =", "yaml.load(open('db.yaml')) app.config['MYSQL_HOST'] = db['mysql_host'] app.config['MYSQL_USER'] = db['mysql_user'] app.config['MYSQL_PASSWORD'] = db['mysql_password']", "%s AND log.from_loc_id = %s \"\"\", (p_id, l_id)) sum_from_loc =", "'Assam', 0), ('Piano', 'Jodhpur', 0), ('Piano', 'Puducherry', 0), # ('Iphone", "daman diu, therefore, 1900 remains in mumbai which will be", "IF NOT EXISTS products(prod_id integer primary key auto_increment, prod_name varchar(20)", "= ''.join([str(x[0]) for x in cursor.fetchall() ]) cursor.execute(\"SELECT prod_id FROM", "= MySQL(app) link = {x:x for x in [\"location\", \"product\",", "= yaml.load(open('db.yaml')) app.config['MYSQL_HOST'] = db['mysql_host'] app.config['MYSQL_USER'] = db['mysql_user'] app.config['MYSQL_PASSWORD'] =", "# 'Washing machine': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry':", "None] and prod_name not in prod_new: cursor.execute(\"SELECT prod_quantity FROM products", "''.join([str(x[0]) for x in cursor.fetchall()]) # cursor.fetchall -> ((1,)), x", "log_summary.append(temp_prod_name + temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],)) # (Piano,) +", "exists in Piano ka keys, etc. alloc_json[row[0]][row[1]] += row[2] #If", "summary(): init_database() msg = None q_data, warehouse, products = None,", "WHERE log.prod_id = %s AND log.from_loc_id = %s \"\"\", (p_id,", "FROM products \"\"\") q_data = cursor.fetchall() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as", "SUM(log.prod_quantity) FROM logistics log WHERE log.prod_id = %s AND log.from_loc_id", "andaman - how much leaves andaman = how much remains", "transaction message if exists! if msg: print(msg) cursor.close() return redirect(url_for('movement'))", "x in locations]: cursor.execute(\"SELECT loc_name FROM location WHERE loc_id =", "in prod_new: cursor.execute(\"UPDATE products SET prod_name = %s WHERE prod_id", "TABLE IF NOT EXISTS products(prod_id integer primary key auto_increment, prod_name", "return render('movement.html', title = \"Product Movement\", link=link, trans_message=msg, products=products, locations=locations,", "250) # x[0] = 1 # for p_id in [x[0]", "msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT * FROM logistics\") logistics_data =", "return render('location.html', link=link, warehouses=warehouse_data, transaction_message=msg, title = \"Warehouse Locations\") @app.route('/product.html',", "else: msg = \"Transaction added successfully\" elif to_loc in [None,", "cursor.execute(\"SELECT prod_name FROM products\") prod_names = cursor.fetchall() prod_new = []", "None] and prod_name not in prod_new: cursor.execute(\"UPDATE products SET prod_name", "from_loc)) #Important to maintain consistency cursor.execute(\"\"\" UPDATE products SET unallocated_quantity", "GROUP BY prod_id\", (id_,)) out_place = cursor.fetchall() #Convert list of", "render('movement.html', title = \"Product Movement\", link=link, trans_message=msg, products=products, locations=locations, allocated", "# print(locations) # for l_id in [x[0] for x in", "(6, 'Washing machine', 100), (7, 'Microwave', 50)) # x in", "('Piano',) for l_id in [x[0] for x in locations]: cursor.execute(\"SELECT", "alloc_json[row[0]] = {} #Make the value of piano empty alloc_json[row[0]][row[1]]", "not in prod_new: cursor.execute(\"UPDATE products SET prod_name = %s WHERE", "%s\", (loc_name, loc_id)) mysql.connection.commit() cursor.close() return redirect(url_for('location')) elif type_ ==", "== 'product': id_ = request.args.get('prod_id') cursor.execute(\"DELETE FROM products WHERE prod_id", "not null, unallocated_quantity integer); \"\"\") # Might have to create", "+= [(temp_prod_name + temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],) )] ORRRRRRRRRRR", "loc_new.append(loc_names[i][0]) cursor.execute(\"SELECT prod_name FROM products\") prod_names = cursor.fetchall() prod_new =", "from flask import Flask, url_for, request, redirect from flask import", "prod_name.capitalize() transaction_allowed = False if prod_name not in ['', '", "out_place = dict(out_place) all_place = {} #Inplace = {1:20, 3:2000}", "0, 'Puducherry': 0}, # 'Iphone xr': {'Andaman': 0, 'Assam': 0,", "logistics log WHERE log.prod_id = %s AND log.to_loc_id = %s", "= piano, loc_id = 1 = andaman cursor.execute(\"\"\" SELECT SUM(log.prod_quantity)", "#print(log_summary) # [('Piano', 'Andaman', 0), ('Piano', 'Assam', 0), ('Piano', 'Jodhpur',", "EXISTS logistics(trans_id integer primary key auto_increment, prod_id INTEGER NOT NULL,", "# transaction times are stored in UTC prod_name = request.form['prod_name']", "= in_place[x] - out_place[x] #2000 fridges came to mumbai from", "NOT NULL DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY(prod_id) REFERENCES products(prod_id), FOREIGN KEY(from_loc_id)", "as e: msg = f\"An error occured: {e}\" else: msg", "prod_quantity integer not null, unallocated_quantity integer); \"\"\") # Might have", "class Encoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, decimal.Decimal): return str(obj)", "loc' and 'to_loc' given the product is being shipped between", "exists! if msg: print(msg) cursor.close() return redirect(url_for('movement')) return render('movement.html', title", "# [('Piano', 'Andaman', 0), ('Piano', 'Assam', 0), ('Piano', 'Jodhpur', 0),", "cursor.fetchall() loc_new = [] for i in range(len(loc_names)): loc_new.append(loc_names[i][0]) cursor.execute(\"SELECT", "%s) \"\"\", (prod_id, from_loc, to_loc, quantity)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as", "EXISTS location(loc_id integer primary key auto_increment, loc_name varchar(20) unique not", "try: cursor.execute(\"SELECT loc_id FROM location WHERE loc_name = %s\", (from_loc,))", "in Piano ka keys, Check if Assam, exists in Piano", "100 fridges were sent to daman diu, therefore, 1900 remains", "- keys - prod_id - fromloc = mumbai for x", "= dict(in_place) out_place = dict(out_place) all_place = {} #Inplace =", "250), (2, 'Iphone xr', 600), (6, 'Washing machine', 100), (7,", "= request.args.get('type') cursor = mysql.connection.cursor() if type_ == 'location': id_", "for i in range(len(prod_names)): prod_new.append(prod_names[i][0]) if request.method == 'POST': prod_name", "# Try capitalize() type_ = request.args.get('type') cursor = mysql.connection.cursor() cursor.execute(\"SELECT", "('Microwave', 'Andaman', 0), ('Microwave', 'Assam', 0), ('Microwave', 'Jodhpur', 0), ('Microwave',", "= {} #Inplace = {1:20, 3:2000} - keys - prod_id", "FROM products WHERE prod_id = %s\", str(p_id,)) temp_prod_name = cursor.fetchone()", "msg = f\"An error occured: {e}\" else: msg = \"Transaction", "# Make sure that the queries are working properly....I'm having", "consistency cursor.execute(\"\"\" UPDATE products SET unallocated_quantity = unallocated_quantity - %s", "0, 'Puducherry': 0}, # 'Washing machine': {'Andaman': 0, 'Assam': 0,", "= Encoder) # print(alloc_json) # {\"Piano\": {\"Andaman\": 0, \"Assam\": 0,", "= cursor.fetchall() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg = f\"An", "prod_name.capitalize() if prod_name not in ['', ' ', None] and", "import Flask, url_for, request, redirect from flask import render_template as", "#Important to maintain consistency cursor.execute(\"\"\" UPDATE products SET unallocated_quantity =", "loc_new: cursor.execute(\"UPDATE location SET loc_name = %s WHERE loc_id =", "logistics_data, database = log_summary) @app.route('/delete') def delete(): # Make sure", "cursor = mysql.connection.cursor() cursor.execute(\"SELECT * FROM logistics\") logistics_data = cursor.fetchall()", "cursor.close() return redirect(url_for('product')) @app.route('/edit', methods=['POST', 'GET']) def edit(): # Try", "= %s\", (prod_name,)) prod_id = ''.join([str(x[0]) for x in cursor.fetchall()", "mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg = f\"An error", "\"Jodhpur\": 0, \"Puducherry\": 0}, # \"Iphone xr\": {\"Andaman\": 0, \"Assam\":", "<gh_stars>0 from flask import Flask, url_for, request, redirect from flask", "ka keys, Check if Assam, exists in Piano ka keys,", "integer not null, unallocated_quantity integer); \"\"\") # Might have to", "= ''.join([str(x[0]) for x in cursor.fetchall()]) # cursor.fetchall -> ((1,)),", "loc_name FROM location\") locations = cursor.fetchall() # products - ((1,", "# print(l_id) # ((20, 'Andaman'), (19, 'Assam'), (26, 'Jodhpur'), (17,", "1 into a string cursor.execute(\"SELECT loc_id FROM location WHERE loc_name", "* FROM location ORDER BY loc_id\") warehouse_data = cursor.fetchall() cursor.execute(\"SELECT", "are working properly....I'm having some doubts about the datatypes type_", "# \"Microwave\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}}", "transaction_allowed: try: cursor.execute(\"INSERT INTO location(loc_name) VALUES(%s)\", (warehouse_name,)) mysql.connection.commit() except(MySQLdb.Error(not Warning),", "'Puducherry', 0), # ('Washing machine', 'Andaman', 0), ('Washing machine', 'Assam',", "database db = yaml.load(open('db.yaml')) app.config['MYSQL_HOST'] = db['mysql_host'] app.config['MYSQL_USER'] = db['mysql_user']", "(id_,)) in_place = cursor.fetchall() cursor.execute(\"SELECT prod_id, SUM(prod_quantity) FROM logistics where", "in products]: # print(p_id) # 1 # 2 # 6", "CREATE TABLE IF NOT EXISTS logistics(trans_id integer primary key auto_increment,", "# No. of pianos that leave andaman # print(sum_from_loc) if", "# (Piano,) + (Andaman,), (0,) = ('Piano', 'Andaman', 0) #print(log_summary)", "flask import render_template as render from flask_mysqldb import MySQL import", "%s \"\"\", (quantity, prod_name)) mysql.connection.commit() except (MySQLdb.Error, MySQLdb.Warning) as e:", "unallocated_quantity + %s WHERE prod_name = %s \"\"\", (quantity, prod_name))", "= (0,) #how much enters andaman - how much leaves", "= %s \"\"\", (quantity, prod_name)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as e:", "\"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}, # \"Microwave\": {\"Andaman\": 0,", "in_place.keys(): #calculator entered mumbai if x in out_place.keys(): #calculator left", "# 2 # 6 # 7 # print(locations) # for", "', None] and prod_name not in prod_new: cursor.execute(\"UPDATE products SET", "(id_,)) mysql.connection.commit() cursor.close() return redirect(url_for('product')) @app.route('/edit', methods=['POST', 'GET']) def edit():", "print(alloc_json) # {\"Piano\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\":", "prod_name = prod_name.capitalize() if prod_name not in ['', ' ',", "0), ('Washing machine', 'Jodhpur', 0), ('Washing machine', 'Puducherry', 0), #", "prod_quantity) SELECT products.prod_id, location.loc_id, %s FROM products, location WHERE products.prod_name", "0}, # \"Microwave\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\":", "%s\", str(p_id,)) temp_prod_name = cursor.fetchone() #print(temp_prod_name) ('Piano',) for l_id in", "converts 1 into a string cursor.execute(\"SELECT loc_id FROM location WHERE", "UPDATE products SET prod_quantity = %s, unallocated_quantity = unallocated_quantity +", "cursor.close() return render('index.html',link=link, title = \"Summary\", warehouses = warehouse, products", "cursor.execute(\"\"\" SELECT SUM(log.prod_quantity) FROM logistics log WHERE log.prod_id = %s", "integer primary key auto_increment, loc_name varchar(20) unique not null); \"\"\")", "warehouse, products = None, None, None cursor = mysql.connection.cursor() try:", "try: cursor.execute(\"INSERT INTO products(prod_name, prod_quantity, unallocated_quantity) VALUES (%s, %s, %s)\",", "'Puducherry', 0)] alloc_json = {} for row in log_summary: try:", "request.method == 'POST': # transaction times are stored in UTC", "row in log_summary: try: if row[1] in alloc_json[row[0]].keys(): #Check if", "prod_name, unallocated_quantity FROM products\") products = cursor.fetchall() cursor.execute(\"SELECT loc_id, loc_name", "(l_id,)) #str(l_id,) giving an error temp_loc_name = cursor.fetchone() # print(temp_loc_name)", "will be unallocated if mumbai is deleted else: all_place[x] =", "('Microwave', 'Puducherry', 0)] alloc_json = {} for row in log_summary:", "'Assam', 0), ('Microwave', 'Jodhpur', 0), ('Microwave', 'Puducherry', 0)] alloc_json =", "products = cursor.fetchall() cursor.execute(\"SELECT prod_name FROM products\") prod_names = cursor.fetchall()", "# 17 log_summary = [] for p_id in [x[0] for", "(all_place[products_], products_)) cursor.execute(\"DELETE FROM location where loc_id = %s\", (id_,))", "'Jodhpur': 0, 'Puducherry': 0}} alloc_json = json.dumps(alloc_json, cls = Encoder)", "sent to daman diu, therefore, 1900 remains in mumbai which", "0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Iphone xr': {'Andaman': 0,", "MySQL(app) link = {x:x for x in [\"location\", \"product\", \"movement\"]}", "mysql.connection.cursor() cursor.execute(\"SELECT loc_name FROM location\") loc_names = cursor.fetchall() loc_new =", "'/' def init_database(): cursor = mysql.connection.cursor() # Initialise all tables", "try: if row[1] in alloc_json[row[0]].keys(): #Check if Andaman exists in", "be unallocated\") try: cursor.execute(\"\"\" INSERT INTO logistics(prod_id, from_loc_id, prod_quantity) SELECT", "%s, %s, %s) \"\"\", (prod_id, from_loc, to_loc, quantity)) mysql.connection.commit() except(MySQLdb.Error,", "%s WHERE prod_id = %s\", (prod_name, str(prod_id))) if prod_quantity not", "']: try: cursor.execute(\"\"\" INSERT INTO logistics(prod_id, to_loc_id, prod_quantity) SELECT products.prod_id,", "--> (0,) --> No pianos leave andaman sum_from_loc = (0,)", "'Jodhpur': 0, 'Puducherry': 0}, # 'Washing machine': {'Andaman': 0, 'Assam':", "MySQL import yaml import json import MySQLdb import decimal class", "occurred: {e}\" else: msg = \"Transaction added successfully\" # if", "%s\", (l_id,)) #str(l_id,) giving an error temp_loc_name = cursor.fetchone() #", "location(loc_id)); \"\"\") mysql.connection.commit() cursor.close() @app.route('/') def summary(): init_database() msg =", "None, None cursor = mysql.connection.cursor() try: cursor.execute(\"Select * from location\")", "keys, etc. alloc_json[row[0]][row[1]] += row[2] #If yes, the add the", "from products\") products = cursor.fetchall() cursor.execute(\"SELECT prod_name FROM products\") prod_names", "= request.form['prod_name'] quantity = request.form['prod_quantity'] prod_name = prod_name.capitalize() transaction_allowed =", "movement(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT * FROM logistics\")", "cursor.fetchall() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg = f\"An error", "['', ' ', None] and prod_name not in prod_new: cursor.execute(\"UPDATE", "= row[2] #If no, add it as a new quantity", "cursor.execute(\"UPDATE products SET prod_name = %s WHERE prod_id = %s\",", "locations=locations, allocated = alloc_json, logs = logistics_data, database = log_summary)", "keys - prod_id - toloc = mumbai #out_place = {3:100}", "{1:20, 3:2000} - keys - prod_id - toloc = mumbai", "not in prod_new: cursor.execute(\"SELECT prod_quantity FROM products WHERE prod_id =", "[None, '', ' ']: try: cursor.execute(\"\"\" INSERT INTO logistics(prod_id, to_loc_id,", "MySQLdb.Warning) as e: msg = f\"An error occured: {e}\" else:", "'Andaman', 0), ('Microwave', 'Assam', 0), ('Microwave', 'Jodhpur', 0), ('Microwave', 'Puducherry',", "== 'product' and request.method == 'POST': prod_id = request.form['product_id'] prod_name", "isinstance(obj, decimal.Decimal): return str(obj) # Setting up the flask instance", "init_database() msg = None q_data, warehouse, products = None, None,", "%s GROUP BY prod_id\", (id_,)) in_place = cursor.fetchall() cursor.execute(\"SELECT prod_id,", "17 log_summary = [] for p_id in [x[0] for x", "in [None, '', ' ']: try: cursor.execute(\"\"\" INSERT INTO logistics(prod_id,", "= {1:20, 3:2000} - keys - prod_id - toloc =", "'Jodhpur', 0), ('Washing machine', 'Puducherry', 0), # ('Microwave', 'Andaman', 0),", "quantity)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as e: msg=f\"An error occurred: {e}\"", "SUM(log.prod_quantity) FROM logistics log WHERE log.prod_id = %s AND log.to_loc_id", "= cursor.fetchall() loc_new = [] for i in range(len(loc_names)): loc_new.append(loc_names[i][0])", "prod_name = %s \"\"\", (quantity, prod_name)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as", "loc_id FROM location WHERE loc_name = %s\", (from_loc,)) from_loc =", "type_ = request.args.get('type') cursor = mysql.connection.cursor() cursor.execute(\"SELECT loc_name FROM location\")", "machine', 'Assam', 0), ('Washing machine', 'Jodhpur', 0), ('Washing machine', 'Puducherry',", "request.form['warehouse_name'] warehouse_name = warehouse_name.capitalize() transaction_allowed = False if warehouse_name not", "= request.args.get('loc_id') cursor.execute(\"SELECT prod_id, SUM(prod_quantity) FROM logistics where to_loc_id =", "from_loc = request.form['from_loc'] to_loc = request.form['to_loc'] quantity = request.form['quantity'] #", "= request.form['warehouse_name'] warehouse_name = warehouse_name.capitalize() transaction_allowed = False if warehouse_name", "return redirect(url_for('location')) elif type_ == 'product': id_ = request.args.get('prod_id') cursor.execute(\"DELETE", "how much leaves andaman = how much remains (allocated) in", "message if exists! if msg: print(msg) cursor.close() return redirect(url_for('movement')) return", "request.method == 'POST': warehouse_name = request.form['warehouse_name'] warehouse_name = warehouse_name.capitalize() transaction_allowed", "log.to_loc_id = %s \"\"\", (p_id, l_id)) sum_to_loc = cursor.fetchone() #", "None] and warehouse_name not in loc_new: transaction_allowed=True if transaction_allowed: try:", "'Puducherry': 0}, # 'Microwave': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0,", "log_summary += [(temp_prod_name + temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],) )]", "--> No pianos leave andaman sum_from_loc = (0,) if sum_to_loc[0]", "type_ = request.args.get('type') cursor = mysql.connection.cursor() if type_ == 'location':", "location(loc_id integer primary key auto_increment, loc_name varchar(20) unique not null);", "0, \"Puducherry\": 0}} if request.method == 'POST': # transaction times", "prod_id = request.form['product_id'] prod_name = request.form['prod_name'] prod_quantity = request.form['prod_quantity'] prod_name", "', None] and warehouse_name not in loc_new: transaction_allowed=True if transaction_allowed:", "def summary(): init_database() msg = None q_data, warehouse, products =", "unallocated_quantity - %s WHERE prod_name = %s \"\"\", (quantity, prod_name))", "occured: {e}\" else: msg = \"Transaction added successfully\" elif to_loc", "if prod_quantity not in ['', ' ', None] and prod_name", "0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Washing machine':", "WHERE loc_id = %s\", (loc_name, loc_id)) mysql.connection.commit() cursor.close() return redirect(url_for('location'))", "prod_quantity) VALUES(%s, %s, %s, %s) \"\"\", (prod_id, from_loc, to_loc, quantity))", "logistics_data = cursor.fetchall() cursor.execute(\"SELECT prod_id, prod_name, unallocated_quantity FROM products\") products", "old_prod_quantity, str(prod_id))) mysql.connection.commit() cursor.close() return redirect(url_for('product')) return render(url_for(type_)) if __name__", "prod_name, unallocated_quantity, prod_quantity FROM products \"\"\") q_data = cursor.fetchall() except(MySQLdb.Error(not", "for p_id in [x[0] for x in products]: cursor.execute(\"SELECT prod_name", "{\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}, # \"Iphone", "request, redirect from flask import render_template as render from flask_mysqldb", "location ORDER BY loc_id\") warehouse_data = cursor.fetchall() cursor.execute(\"SELECT loc_name FROM", "# print(sum_from_loc) if sum_from_loc[0] is None: #e.g. (None,) --> (0,)", "(prod_quantity, prod_quantity, old_prod_quantity, str(prod_id))) mysql.connection.commit() cursor.close() return redirect(url_for('product')) return render(url_for(type_))", "INTEGER NULL, prod_quantity INTEGER NOT NULL, trans_time TIMESTAMP NOT NULL", "@app.route('/') def summary(): init_database() msg = None q_data, warehouse, products", "cursor.fetchall() ]) cursor.execute(\"SELECT prod_id FROM products WHERE prod_name = %s\",", "msg: print(msg) cursor.close() return redirect(url_for('movement')) return render('movement.html', title = \"Product", "(init condition) if from_loc in [None, '', ' ']: try:", "old_prod_quantity = cursor.fetchone()[0] cursor.execute(\"\"\" UPDATE products SET prod_quantity = %s,", "the value of piano empty alloc_json[row[0]][row[1]] = row[2] #Add Andaman", "prod_new.append(prod_names[i][0]) if type_ == 'location' and request.method == 'POST': loc_id", "prod_new = [] for i in range(len(prod_names)): prod_new.append(prod_names[i][0]) if type_", "prod_new: cursor.execute(\"SELECT prod_quantity FROM products WHERE prod_id = %s\", (prod_id,))", "cursor.close() return redirect(url_for('product')) return render(url_for(type_)) if __name__ == '__main__': app.run(debug=True)", "\"\"\") q_data = cursor.fetchall() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg", "piano empty alloc_json[row[0]][row[1]] = row[2] #Add Andaman with quantity as", "0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}, # \"Washing machine\":", "+ temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],)) # (Piano,) + (Andaman,),", "unallocated_quantity = unallocated_quantity + %s - %s WHERE prod_id =", "cursor.execute(\"Select * from location\") warehouse = cursor.fetchall() cursor.execute(\"Select * from", "enter andaman cursor.execute(\"\"\" SELECT SUM(log.prod_quantity) FROM logistics log WHERE log.prod_id", "if x in out_place.keys(): #calculator left mumbai all_place[x] = in_place[x]", "WHERE prod_id = %s \"\"\", (prod_quantity, prod_quantity, old_prod_quantity, str(prod_id))) mysql.connection.commit()", "cursor.execute(\"SELECT prod_id, prod_name, unallocated_quantity FROM products\") products = cursor.fetchall() cursor.execute(\"SELECT", "['', ' ', None] and prod_name not in prod_new: cursor.execute(\"SELECT", "'Puducherry')) # 20 # 19 # 26 # 17 log_summary", "products(prod_id), FOREIGN KEY(from_loc_id) REFERENCES location(loc_id), FOREIGN KEY(to_loc_id) REFERENCES location(loc_id)); \"\"\")", "x in products]: # print(p_id) # 1 # 2 #", "request.form['prod_quantity'] prod_name = prod_name.capitalize() transaction_allowed = False if prod_name not", "no, add it as a new quantity except (KeyError, TypeError):", "for x in products]: cursor.execute(\"SELECT prod_name FROM products WHERE prod_id", "\"Warehouse Locations\") @app.route('/product.html', methods=['POST', 'GET']) def product(): init_database() msg=None cursor", "return redirect(url_for('product')) return render('product.html', link=link, products = products, transaction_message=msg, title=\"Products", "{} #Make the value of piano empty alloc_json[row[0]][row[1]] = row[2]", "NOT NULL, prod_quantity integer not null, unallocated_quantity integer); \"\"\") #", "else: msg = \"Transaction added successfully\" #Print a transaction message", "from_loc_id INTEGER NULL, to_loc_id INTEGER NULL, prod_quantity INTEGER NOT NULL,", "\"Transaction added successfully\" # if 'from loc' and 'to_loc' given", "TABLE IF NOT EXISTS logistics(trans_id integer primary key auto_increment, prod_id", "q_data = cursor.fetchall() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg =", "machine', 'Jodhpur', 0), ('Washing machine', 'Puducherry', 0), # ('Microwave', 'Andaman',", "edit(): # Try capitalize() type_ = request.args.get('type') cursor = mysql.connection.cursor()", "(1,) x[0] -> 1 join converts 1 into a string", "# 'Microwave': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}}", "FROM location ORDER BY loc_id\") warehouse_data = cursor.fetchall() cursor.execute(\"SELECT loc_name", "being shipped to a warehouse (init condition) if from_loc in", "= \"Warehouse Locations\") @app.route('/product.html', methods=['POST', 'GET']) def product(): init_database() msg=None", "- prod_id - toloc = mumbai #out_place = {3:100} -", "\"\"\") cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS logistics(trans_id integer primary", "consistency cursor.execute(\"\"\" UPDATE products SET unallocated_quantity = unallocated_quantity + %s", "(sum_to_loc[0] - sum_from_loc[0],) )] ORRRRRRRRRRR log_summary.append(temp_prod_name + temp_loc_name + (sum_to_loc[0]", "{e}\" else: msg = \"Transaction added successfully\" elif to_loc in", "log.prod_id = %s AND log.from_loc_id = %s \"\"\", (p_id, l_id))", "= False if warehouse_name not in ['', ' ', None]", "= request.form['prod_name'] prod_quantity = request.form['prod_quantity'] prod_name = prod_name.capitalize() if prod_name", "INTO location(loc_name) VALUES(%s)\", (warehouse_name,)) mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e:", "cursor.execute(\"SELECT loc_name FROM location WHERE loc_id = %s\", (l_id,)) #str(l_id,)", "add it as a new quantity except (KeyError, TypeError): alloc_json[row[0]]", "products WHERE prod_id = %s\", (prod_id,)) old_prod_quantity = cursor.fetchone()[0] cursor.execute(\"\"\"", "= \"Transaction added successfully\" #Print a transaction message if exists!", "SET loc_name = %s WHERE loc_id = %s\", (loc_name, loc_id))", "as a new value in the dictionary #print(alloc_json) # {'Piano':", "%s \"\"\", (quantity, prod_name, from_loc)) #Important to maintain consistency cursor.execute(\"\"\"", "0, 'Puducherry': 0}} alloc_json = json.dumps(alloc_json, cls = Encoder) #", "prod_name = request.form['prod_name'] prod_quantity = request.form['prod_quantity'] prod_name = prod_name.capitalize() if", "unallocated_quantity + %s WHERE prod_id = %s \"\"\", (all_place[products_], products_))", "sum_from_loc[0],) )] ORRRRRRRRRRR log_summary.append(temp_prod_name + temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],))", "cursor.fetchall() loc_new = [] for i in range(len(loc_names)): loc_new.append(loc_names[i][0]) if", "logistics(prod_id, to_loc_id, prod_quantity) SELECT products.prod_id, location.loc_id, %s FROM products, location", "= %s \"\"\", (p_id, l_id)) sum_from_loc = cursor.fetchone() # No.", "to_loc = ''.join([str(x[0]) for x in cursor.fetchall() ]) cursor.execute(\"SELECT prod_id", "in alloc_json[row[0]].keys(): #Check if Andaman exists in Piano ka keys,", "' ', None]: transaction_allowed= True if transaction_allowed: try: cursor.execute(\"INSERT INTO", "products WHERE prod_id = %s\", (id_,)) mysql.connection.commit() cursor.close() return redirect(url_for('product'))", "request.method == 'POST': prod_name = request.form['prod_name'] quantity = request.form['prod_quantity'] prod_name", "return render('index.html',link=link, title = \"Summary\", warehouses = warehouse, products =", "cursor.fetchall() cursor.execute(\"Select * from products\") products = cursor.fetchall() cursor.execute(\"\"\" SELECT", "'Puducherry': 0}, # 'Washing machine': {'Andaman': 0, 'Assam': 0, 'Jodhpur':", "'GET']) def product(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT *", "range(len(prod_names)): prod_new.append(prod_names[i][0]) if request.method == 'POST': prod_name = request.form['prod_name'] quantity", "# if no 'from loc' is given, that means the", "mysql.connection.cursor() cursor.execute(\"SELECT * FROM location ORDER BY loc_id\") warehouse_data =", "MySQLdb.Warning()) as e: msg = f\"An error occured: {e}\" else:", "from flask_mysqldb import MySQL import yaml import json import MySQLdb", "mumbai which will be unallocated if mumbai is deleted else:", "logistics\") logistics_data = cursor.fetchall() cursor.execute(\"SELECT prod_id, prod_name, unallocated_quantity FROM products\")", "'POST': loc_id = request.form['loc_id'] loc_name = request.form['loc_name'] loc_name = loc_name.capitalize()", "deleted else: all_place[x] = in_place[x] for products_ in all_place.keys(): cursor.execute(\"\"\"", "the queries are working properly....I'm having some doubts about the", "import yaml import json import MySQLdb import decimal class Encoder(json.JSONEncoder):", "cursor.fetchone() # No. of pianos that leave andaman # print(sum_from_loc)", "from_loc = ''.join([str(x[0]) for x in cursor.fetchall()]) # cursor.fetchall ->", "= request.form['prod_quantity'] prod_name = prod_name.capitalize() if prod_name not in ['',", "warehouse_name = request.form['warehouse_name'] warehouse_name = warehouse_name.capitalize() transaction_allowed = False if", "flask_mysqldb import MySQL import yaml import json import MySQLdb import", "prod_id\", (id_,)) in_place = cursor.fetchall() cursor.execute(\"SELECT prod_id, SUM(prod_quantity) FROM logistics", "print(msg) cursor.close() return redirect(url_for('location')) return render('location.html', link=link, warehouses=warehouse_data, transaction_message=msg, title", "- (1, 'Piano', 250) # x[0] = 1 # for", "a string cursor.execute(\"SELECT loc_id FROM location WHERE loc_name = %s\",", "url_for, request, redirect from flask import render_template as render from", "e: msg=f\"An error occurred: {e}\" else: msg = \"Transaction added", "= prod_name.capitalize() if prod_name not in ['', ' ', None]", "prod_quantity = %s, unallocated_quantity = unallocated_quantity + %s - %s", "# x[0] = 1 # for p_id in [x[0] for", "is being shipped between warehouses else: try: cursor.execute(\"SELECT loc_id FROM", "l_id)) sum_from_loc = cursor.fetchone() # No. of pianos that leave", "log_summary = [] for p_id in [x[0] for x in", "leave andaman # print(sum_from_loc) if sum_from_loc[0] is None: #e.g. (None,)", "cursor.fetchall() prod_new = [] for i in range(len(prod_names)): prod_new.append(prod_names[i][0]) if", "else: try: cursor.execute(\"SELECT loc_id FROM location WHERE loc_name = %s\",", "log_summary: try: if row[1] in alloc_json[row[0]].keys(): #Check if Andaman exists", "occurred: {e}\" else: msg = \"Transaction added successfully\" #Print a", "cursor.execute(\"\"\" SELECT prod_name, unallocated_quantity, prod_quantity FROM products \"\"\") q_data =", "#str(l_id,) giving an error temp_loc_name = cursor.fetchone() # print(temp_loc_name) -", "x in out_place.keys(): #calculator left mumbai all_place[x] = in_place[x] -", "prod_name FROM products\") prod_names = cursor.fetchall() prod_new = [] for", "0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}, # \"Iphone xr\":", "# {\"Piano\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0},", "diu, therefore, 1900 remains in mumbai which will be unallocated", "%s \"\"\", (p_id, l_id)) sum_to_loc = cursor.fetchone() # No.of pianos", "+ (Andaman,), (0,) = ('Piano', 'Andaman', 0) #print(log_summary) # [('Piano',", "0), ('Iphone xr', 'Jodhpur', 0), ('Iphone xr', 'Puducherry', 0), #", "in in_place.keys(): #calculator entered mumbai if x in out_place.keys(): #calculator", "('Piano', 'Jodhpur', 0), ('Piano', 'Puducherry', 0), # ('Iphone xr', 'Andaman',", "cursor.execute(\"SELECT * FROM location ORDER BY loc_id\") warehouse_data = cursor.fetchall()", "[x[0] for x in products]: cursor.execute(\"SELECT prod_name FROM products WHERE", "pianos that leave andaman # print(sum_from_loc) if sum_from_loc[0] is None:", "app.config['MYSQL_HOST'] = db['mysql_host'] app.config['MYSQL_USER'] = db['mysql_user'] app.config['MYSQL_PASSWORD'] = db['mysql_password'] app.config['MYSQL_DB']", "transaction_allowed: try: cursor.execute(\"INSERT INTO products(prod_name, prod_quantity, unallocated_quantity) VALUES (%s, %s,", "render from flask_mysqldb import MySQL import yaml import json import", "= alloc_json, logs = logistics_data, database = log_summary) @app.route('/delete') def", "'Andaman', 0), ('Piano', 'Assam', 0), ('Piano', 'Jodhpur', 0), ('Piano', 'Puducherry',", "(2, 'Iphone xr', 600), (6, 'Washing machine', 100), (7, 'Microwave',", "INSERT INTO logistics(prod_id, from_loc_id, to_loc_id, prod_quantity) VALUES(%s, %s, %s, %s)", "which will be unallocated if mumbai is deleted else: all_place[x]", "f\"An error occured: {e}\" print(msg) cursor.close() return render('index.html',link=link, title =", "location(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT * FROM location", "'Assam'), (26, 'Jodhpur'), (17, 'Puducherry')) # 20 # 19 #", "create a trigger, let's see! cursor.execute(\"\"\" CREATE TABLE IF NOT", "prod_id = %s\", (prod_id,)) old_prod_quantity = cursor.fetchone()[0] cursor.execute(\"\"\" UPDATE products", "NULL, trans_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY(prod_id) REFERENCES", "products SET prod_name = %s WHERE prod_id = %s\", (prod_name,", "to a warehouse (init condition) if from_loc in [None, '',", "locations = cursor.fetchall() # products - ((1, 'Piano', 250), (2,", "quantity to the previous quantity else: alloc_json[row[0]][row[1]] = row[2] #If", "prod_quantity INTEGER NOT NULL, trans_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,", "type_ == 'location' and request.method == 'POST': loc_id = request.form['loc_id']", "= %s\", (id_,)) mysql.connection.commit() cursor.close() return redirect(url_for('location')) elif type_ ==", "i in range(len(prod_names)): prod_new.append(prod_names[i][0]) if type_ == 'location' and request.method", "error occured: {e}\" print(msg) cursor.close() return render('index.html',link=link, title = \"Summary\",", "loc_id = request.form['loc_id'] loc_name = request.form['loc_name'] loc_name = loc_name.capitalize() if", "cursor.fetchone() #print(temp_prod_name) ('Piano',) for l_id in [x[0] for x in", "%s\", (id_,)) mysql.connection.commit() cursor.close() return redirect(url_for('product')) @app.route('/edit', methods=['POST', 'GET']) def", "#calculator left mumbai all_place[x] = in_place[x] - out_place[x] #2000 fridges", "%s, %s)\", (prod_name, quantity, quantity)) mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as", "location WHERE products.prod_name = %s AND location.loc_name = %s \"\"\",", "' ', None] and prod_name not in prod_new: if quantity", "products.prod_name = %s AND location.loc_name = %s \"\"\", (quantity, prod_name,", "cursor.fetchall() #Convert list of tuples to dict in_place = dict(in_place)", "'Andaman', 0) #print(log_summary) # [('Piano', 'Andaman', 0), ('Piano', 'Assam', 0),", "SELECT products.prod_id, location.loc_id, %s FROM products, location WHERE products.prod_name =", "(to_loc,)) to_loc = ''.join([str(x[0]) for x in cursor.fetchall() ]) cursor.execute(\"SELECT", "= request.form['prod_quantity'] prod_name = prod_name.capitalize() transaction_allowed = False if prod_name", "INTO products(prod_name, prod_quantity, unallocated_quantity) VALUES (%s, %s, %s)\", (prod_name, quantity,", "%s AND location.loc_name = %s \"\"\", (quantity, prod_name, from_loc)) #Important", "is given, that means the product is being shipped to", "all_place[x] = in_place[x] - out_place[x] #2000 fridges came to mumbai", "= cursor.fetchall() cursor.execute(\"SELECT prod_id, prod_name, unallocated_quantity FROM products\") products =", "to mumbai from kolkata, 100 fridges were sent to daman", "= {x:x for x in [\"location\", \"product\", \"movement\"]} link[\"index\"] =", "in loc_new: cursor.execute(\"UPDATE location SET loc_name = %s WHERE loc_id", "= cursor.fetchone()[0] cursor.execute(\"\"\" UPDATE products SET prod_quantity = %s, unallocated_quantity", "# 7 # print(locations) # for l_id in [x[0] for", "methods=['POST', 'GET']) def edit(): # Try capitalize() type_ = request.args.get('type')", "= [] for i in range(len(prod_names)): prod_new.append(prod_names[i][0]) if type_ ==", "= cursor.fetchall() cursor.execute(\"\"\" SELECT prod_name, unallocated_quantity, prod_quantity FROM products \"\"\")", "BY loc_id\") warehouse_data = cursor.fetchall() cursor.execute(\"SELECT loc_name FROM location\") loc_names", "# \"Washing machine\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\":", "'Piano', 250), (2, 'Iphone xr', 600), (6, 'Washing machine', 100),", "# products - ((1, 'Piano', 250), (2, 'Iphone xr', 600),", "cursor.execute(\"SELECT * from products\") products = cursor.fetchall() cursor.execute(\"SELECT prod_name FROM", "= %s\", (from_loc,)) from_loc = ''.join([str(x[0]) for x in cursor.fetchall()])", "xr\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0, \"Puducherry\": 0}, #", "= %s AND log.from_loc_id = %s \"\"\", (p_id, l_id)) sum_from_loc", "auto_increment, prod_name varchar(20) UNIQUE NOT NULL, prod_quantity integer not null,", "request.form['quantity'] # if no 'from loc' is given, that means", "products = cursor.fetchall() cursor.execute(\"SELECT loc_id, loc_name FROM location\") locations =", "= %s \"\"\", (all_place[products_], products_)) cursor.execute(\"DELETE FROM location where loc_id", "if transaction_allowed: try: cursor.execute(\"INSERT INTO location(loc_name) VALUES(%s)\", (warehouse_name,)) mysql.connection.commit() except(MySQLdb.Error(not", "if no 'from loc' is given, that means the product", "prod_id = %s \"\"\", (all_place[products_], products_)) cursor.execute(\"DELETE FROM location where", "]) cursor.execute(\"SELECT prod_id FROM products WHERE prod_name = %s\", (prod_name,))", "kolkata, 100 fridges were sent to daman diu, therefore, 1900", "= cursor.fetchall() cursor.execute(\"SELECT loc_id, loc_name FROM location\") locations = cursor.fetchall()", "' ', None] and loc_name not in loc_new: cursor.execute(\"UPDATE location", "in range(len(loc_names)): loc_new.append(loc_names[i][0]) cursor.execute(\"SELECT prod_name FROM products\") prod_names = cursor.fetchall()", "warehouse_name not in loc_new: transaction_allowed=True if transaction_allowed: try: cursor.execute(\"INSERT INTO", "location WHERE loc_name = %s\", (from_loc,)) from_loc = ''.join([str(x[0]) for", "in_place[x] for products_ in all_place.keys(): cursor.execute(\"\"\" UPDATE products SET unallocated_quantity", "products(prod_id integer primary key auto_increment, prod_name varchar(20) UNIQUE NOT NULL,", "dictionary #print(alloc_json) # {'Piano': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0,", "the database db = yaml.load(open('db.yaml')) app.config['MYSQL_HOST'] = db['mysql_host'] app.config['MYSQL_USER'] =", "Try capitalize() type_ = request.args.get('type') cursor = mysql.connection.cursor() cursor.execute(\"SELECT loc_name", "transaction_allowed = False if prod_name not in ['', ' ',", "MySQLdb.Warning()) as e: msg = f\"An error occured: {e}\" print(msg)", "'Iphone xr', 600), (6, 'Washing machine', 100), (7, 'Microwave', 50))", "+= row[2] #If yes, the add the quantity to the", "'Washing machine', 100), (7, 'Microwave', 50)) # x in product", "cursor.execute(\"SELECT prod_name FROM products WHERE prod_id = %s\", str(p_id,)) temp_prod_name", "#print(alloc_json) # {'Piano': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry':", "prod_name)) mysql.connection.commit() except (MySQLdb.Error, MySQLdb.Warning) as e: msg = f\"An", "to daman diu, therefore, 1900 remains in mumbai which will", "request.method == 'POST': loc_id = request.form['loc_id'] loc_name = request.form['loc_name'] loc_name", "TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY(prod_id) REFERENCES products(prod_id), FOREIGN", "prod_name not in prod_new: cursor.execute(\"UPDATE products SET prod_name = %s", "= warehouse, products = products, database = q_data) @app.route('/location.html', methods=['POST',", "= mysql.connection.cursor() cursor.execute(\"SELECT * FROM location ORDER BY loc_id\") warehouse_data", "[] for i in range(len(prod_names)): prod_new.append(prod_names[i][0]) if request.method == 'POST':", "products, location WHERE products.prod_name = %s AND location.loc_name = %s", "'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Microwave': {'Andaman': 0,", "much remains (allocated) in andaman # log_summary += [(temp_prod_name +", "product - (1, 'Piano', 250) # x[0] = 1 #", "that enter andaman cursor.execute(\"\"\" SELECT SUM(log.prod_quantity) FROM logistics log WHERE", "in product - (1, 'Piano', 250) # x[0] = 1", "of tuples to dict in_place = dict(in_place) out_place = dict(out_place)", "= Flask(__name__) # Configure the database db = yaml.load(open('db.yaml')) app.config['MYSQL_HOST']", "in [None, '', ' ']: print(\"To Location wasn't specified, will", "# 20 # 19 # 26 # 17 log_summary =", "occured: {e}\" else: msg = f\"{warehouse_name} added succcessfully\" if msg:", "= %s GROUP BY prod_id\", (id_,)) out_place = cursor.fetchall() #Convert", "prod_id = %s\", str(p_id,)) temp_prod_name = cursor.fetchone() #print(temp_prod_name) ('Piano',) for", "working properly....I'm having some doubts about the datatypes type_ =", "0, 'Puducherry': 0}, # 'Microwave': {'Andaman': 0, 'Assam': 0, 'Jodhpur':", "try: cursor.execute(\"Select * from location\") warehouse = cursor.fetchall() cursor.execute(\"Select *", "etc. alloc_json[row[0]][row[1]] += row[2] #If yes, the add the quantity", "print(temp_loc_name) - (Andaman,) #e.g. prod_id = 1 = piano, loc_id", "a new value in the dictionary #print(alloc_json) # {'Piano': {'Andaman':", "for x in cursor.fetchall() ]) cursor.execute(\"SELECT prod_id FROM products WHERE", "'Assam', 0), ('Iphone xr', 'Jodhpur', 0), ('Iphone xr', 'Puducherry', 0),", "= \"Summary\", warehouses = warehouse, products = products, database =", "dict(in_place) out_place = dict(out_place) all_place = {} #Inplace = {1:20,", "(id_,)) mysql.connection.commit() cursor.close() return redirect(url_for('location')) elif type_ == 'product': id_", "cursor.execute(\"SELECT loc_id FROM location WHERE loc_name = %s\", (to_loc,)) to_loc", "between warehouses else: try: cursor.execute(\"SELECT loc_id FROM location WHERE loc_name", "{3:100} - keys - prod_id - fromloc = mumbai for", "out_place[x] #2000 fridges came to mumbai from kolkata, 100 fridges", "a transaction message if exists! if msg: print(msg) cursor.close() return", "= mysql.connection.cursor() # Initialise all tables cursor.execute(\"\"\" CREATE TABLE IF", "init_database() msg=None cursor = mysql.connection.cursor() cursor.execute(\"SELECT * from products\") products", "']: print(\"To Location wasn't specified, will be unallocated\") try: cursor.execute(\"\"\"", "'POST': warehouse_name = request.form['warehouse_name'] warehouse_name = warehouse_name.capitalize() transaction_allowed = False", "None: #No pianos enter andaman sum_to_loc = (0,) #how much", "NOT EXISTS products(prod_id integer primary key auto_increment, prod_name varchar(20) UNIQUE", "loc_id\") warehouse_data = cursor.fetchall() cursor.execute(\"SELECT loc_name FROM location\") loc_names =", "= ''.join([str(x[0]) for x in cursor.fetchall() ]) cursor.execute(\"\"\" INSERT INTO", "('Iphone xr', 'Jodhpur', 0), ('Iphone xr', 'Puducherry', 0), # ('Washing", "mysql.connection.commit() except (MySQLdb.Error, MySQLdb.Warning) as e: msg = f\"An error", "key auto_increment, loc_name varchar(20) unique not null); \"\"\") cursor.execute(\"\"\" CREATE", "xr', 'Puducherry', 0), # ('Washing machine', 'Andaman', 0), ('Washing machine',", "added succcessfully\" if msg: print(msg) cursor.close() return redirect(url_for('location')) return render('location.html',", "sum_to_loc[0] is None: #No pianos enter andaman sum_to_loc = (0,)", "= cursor.fetchone() # No.of pianos that enter andaman cursor.execute(\"\"\" SELECT", "- %s WHERE prod_id = %s \"\"\", (prod_quantity, prod_quantity, old_prod_quantity,", "((1,)), x -> (1,) x[0] -> 1 join converts 1", "[x[0] for x in locations]: cursor.execute(\"SELECT loc_name FROM location WHERE", "tuples to dict in_place = dict(in_place) out_place = dict(out_place) all_place", "to_loc_id, prod_quantity) SELECT products.prod_id, location.loc_id, %s FROM products, location WHERE", "msg = \"Transaction added successfully\" # if 'from loc' and", "maintain consistency cursor.execute(\"\"\" UPDATE products SET unallocated_quantity = unallocated_quantity +", "= cursor.fetchall() cursor.execute(\"Select * from products\") products = cursor.fetchall() cursor.execute(\"\"\"", "('Washing machine', 'Andaman', 0), ('Washing machine', 'Assam', 0), ('Washing machine',", "request.form['from_loc'] to_loc = request.form['to_loc'] quantity = request.form['quantity'] # if no", "p_id in [x[0] for x in products]: cursor.execute(\"SELECT prod_name FROM", "new quantity except (KeyError, TypeError): alloc_json[row[0]] = {} #Make the", "'Jodhpur': 0, 'Puducherry': 0}, # 'Iphone xr': {'Andaman': 0, 'Assam':", "transaction times are stored in UTC prod_name = request.form['prod_name'] from_loc", "{'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Iphone", "alloc_json, logs = logistics_data, database = log_summary) @app.route('/delete') def delete():", "if type_ == 'location': id_ = request.args.get('loc_id') cursor.execute(\"SELECT prod_id, SUM(prod_quantity)", "0), ('Piano', 'Jodhpur', 0), ('Piano', 'Puducherry', 0), # ('Iphone xr',", "have to create a trigger, let's see! cursor.execute(\"\"\" CREATE TABLE", "= products, transaction_message=msg, title=\"Products Log\") @app.route('/movement.html', methods=['POST', 'GET']) def movement():", "json.dumps(alloc_json, cls = Encoder) # print(alloc_json) # {\"Piano\": {\"Andaman\": 0,", "%s AND log.to_loc_id = %s \"\"\", (p_id, l_id)) sum_to_loc =", "from_loc_id = %s GROUP BY prod_id\", (id_,)) out_place = cursor.fetchall()", "and prod_name not in prod_new: cursor.execute(\"SELECT prod_quantity FROM products WHERE", "\"\"\", (prod_quantity, prod_quantity, old_prod_quantity, str(prod_id))) mysql.connection.commit() cursor.close() return redirect(url_for('product')) return", "- out_place[x] #2000 fridges came to mumbai from kolkata, 100", "0), # ('Iphone xr', 'Andaman', 0), ('Iphone xr', 'Assam', 0),", "UPDATE products SET unallocated_quantity = unallocated_quantity - %s WHERE prod_name", "FROM location WHERE loc_name = %s\", (from_loc,)) from_loc = ''.join([str(x[0])", "(quantity, prod_name)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as e: msg=f\"An error occurred:", "temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],)) # (Piano,) + (Andaman,), (0,)", "andaman sum_from_loc = (0,) if sum_to_loc[0] is None: #No pianos", "\"\"\", (quantity, prod_name, from_loc)) #Important to maintain consistency cursor.execute(\"\"\" UPDATE", "UPDATE products SET unallocated_quantity = unallocated_quantity + %s WHERE prod_name", "empty alloc_json[row[0]][row[1]] = row[2] #Add Andaman with quantity as a", "cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS products(prod_id integer primary key", "for l_id in [x[0] for x in locations]: cursor.execute(\"SELECT loc_name", "0}, # \"Washing machine\": {\"Andaman\": 0, \"Assam\": 0, \"Jodhpur\": 0,", "products, database = q_data) @app.route('/location.html', methods=['POST', 'GET']) def location(): init_database()", "products\") products = cursor.fetchall() cursor.execute(\"\"\" SELECT prod_name, unallocated_quantity, prod_quantity FROM", "0), ('Microwave', 'Assam', 0), ('Microwave', 'Jodhpur', 0), ('Microwave', 'Puducherry', 0)]", "= [] for i in range(len(loc_names)): loc_new.append(loc_names[i][0]) if request.method ==", "#Inplace = {1:20, 3:2000} - keys - prod_id - toloc", "mysql.connection.commit() cursor.close() return redirect(url_for('product')) return render(url_for(type_)) if __name__ == '__main__':", "= request.args.get('type') cursor = mysql.connection.cursor() cursor.execute(\"SELECT loc_name FROM location\") loc_names", "mysql.connection.commit() cursor.close() return redirect(url_for('product')) @app.route('/edit', methods=['POST', 'GET']) def edit(): #", "obj): if isinstance(obj, decimal.Decimal): return str(obj) # Setting up the", "mysql.connection.cursor() try: cursor.execute(\"Select * from location\") warehouse = cursor.fetchall() cursor.execute(\"Select", "'product' and request.method == 'POST': prod_id = request.form['product_id'] prod_name =", "capitalize() type_ = request.args.get('type') cursor = mysql.connection.cursor() cursor.execute(\"SELECT loc_name FROM", "#e.g. (None,) --> (0,) --> No pianos leave andaman sum_from_loc", "WHERE prod_name = %s \"\"\", (quantity, prod_name)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning)" ]
[ "{ \"data\": np.random.random([3, 3, 56, 56]).astype(\"float32\"), } self.enable_trt = True", "30, 32, 1, AnalysisConfig.Precision.Half, True, False) self.fetch_list = [out] def", "np from inference_pass_test import InferencePassTest import paddle.fluid as fluid import", "2.0 (the \"License\"); # you may not use this file", "2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under", "AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanAllTest.DynamicShapeParam({ 'data':", "self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllNoBatchTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program,", "TRTReduceMeanAllNoBatchTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\",", "core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) if __name__", "[out] self.dynamic_shape_params = TRTReduceMeanAllNoBatchTest.DynamicShapeParam( { 'data': [1, 3, 16, 16]", "= [out] self.dynamic_shape_params = TRTReduceMeanTestFP16.DynamicShapeParam({ 'data': [1, 3, 16, 16]", "fluid.data( name=\"data\", shape=[4, 3, 56, 56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data,", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "[1, 3, 16, 16] }, {'data': [3, 3, 56, 56]},", "= TRTReduceMeanAllTest.DynamicShapeParam({ 'data': [1, 3, 56, 56] }, {'data': [3,", "use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) if __name__ ==", "self.trt_parameters = TRTReduceMeanFP16Static.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half, True,", "self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestFP16(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program,", "self.dynamic_shape_params = TRTReduceMeanAllTest.DynamicShapeParam({ 'data': [1, 3, 56, 56] }, {'data':", "class TRTReduceMeanAllNoBatchTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(", "<< 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out]", "language governing permissions and # limitations under the License. from", "as core from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import AnalysisConfig", "= [out] self.dynamic_shape_params = TRTReduceMeanTest.DynamicShapeParam({ 'data': [1, 3, 16, 16]", "use this file except in compliance with the License. #", "core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticFP16(InferencePassTest):", "= fluid.data( name=\"data\", shape=[3, 3, 56, 56], dtype=\"float32\") reduce_mean =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "# limitations under the License. from __future__ import print_function import", "<gh_stars>1-10 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.", "paddle.fluid.core import PassVersionChecker from paddle.fluid.core import AnalysisConfig class TRTReduceMeanTest(InferencePassTest): def", "= fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { \"data\": np.random.random([3, 3, 56,", "class TRTReduceMeanAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(", "License. # You may obtain a copy of the License", "TRTReduceMeanStaticFP16(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\",", "fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { \"data\": np.random.random([4, 3, 56, 56]).astype(\"float32\"),", "{'data': [3, 3, 56, 56]}, {'data': [3, 3, 56, 56]},", "[3, 3, 56, 56]}, False) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu", "TRTReduceMeanAllTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list", "under the License is distributed on an \"AS IS\" BASIS,", "License for the specific language governing permissions and # limitations", "} self.enable_trt = True self.trt_parameters = TRTReduceMeanAllTest.TensorRTParam( 1 << 30,", "[out] def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True)", "self.startup_program): data = fluid.data( name=\"data\", shape=[4, 3, 56, 56], dtype=\"float32\")", "self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestStatic(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data", "= True self.trt_parameters = TRTReduceMeanAllNoBatchTest.TensorRTParam( 1 << 30, 32, 1,", "Reserved. # # Licensed under the Apache License, Version 2.0", "import print_function import unittest import numpy as np from inference_pass_test", "with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\", shape=[-1, 3, -1,", "name=\"data\", shape=[3, 3, 56, 56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean( data,", "import PassVersionChecker from paddle.fluid.core import AnalysisConfig class TRTReduceMeanTest(InferencePassTest): def setUp(self):", "self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestStatic(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program,", "core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllNoBatchTest(InferencePassTest):", "56, 56]}, {'data': [3, 3, 56, 56]}, False) def test_check_output(self):", "name=\"data\", shape=[-1, 3, 56, 56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True)", "class TRTReduceMeanStaticFP16(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(", "= True self.trt_parameters = TRTReduceMeanTestStatic.TensorRTParam( 1 << 30, 32, 1,", "56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanFP16Static.TensorRTParam( 1 <<", "is_test=True) self.feeds = { \"data\": np.random.random([4, 3, 56, 56]).astype(\"float32\"), }", "56, 56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean( data, dim=[2, -1], keep_dim=True)", "in compliance with the License. # You may obtain a", "self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticFP16(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data", "software # distributed under the License is distributed on an", "<< 30, 32, 1, AnalysisConfig.Precision.Half, False, False) self.fetch_list = [out]", "[out] self.dynamic_shape_params = TRTReduceMeanAllTest.DynamicShapeParam({ 'data': [1, 3, 56, 56] },", "\"data\": np.random.random([3, 3, 56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters", "class TRTReduceMeanStaticAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(", "[out] self.dynamic_shape_params = TRTReduceMeanTestFP16.DynamicShapeParam({ 'data': [1, 3, 16, 16] },", "56, 56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean,", "True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestStatic(InferencePassTest): def setUp(self): with", "self.startup_program): data = fluid.data( name=\"data\", shape=[-1, 3, 56, 56], dtype=\"float32\")", "56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean( data, dim=[2, -1], keep_dim=True) out", "= TRTReduceMeanTestFP16.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False)", "True self.trt_parameters = TRTReduceMeanTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32,", "3, 56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanAllNoBatchTest.TensorRTParam(", "fluid import paddle.fluid.core as core from paddle.fluid.core import PassVersionChecker from", "'data': [1, 3, 56, 56] }, {'data': [3, 3, 56,", "and # limitations under the License. from __future__ import print_function", "fluid.data( name=\"data\", shape=[-1, 3, -1, -1], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data,", "32, 1, AnalysisConfig.Precision.Half, True, False) self.fetch_list = [out] def test_check_output(self):", "PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticFP16(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data =", "from paddle.fluid.core import AnalysisConfig class TRTReduceMeanTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program,", "True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticFP16(InferencePassTest): def setUp(self): with", "self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestFP16(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "{ \"data\": np.random.random([4, 3, 56, 56]).astype(\"float32\"), } self.enable_trt = True", "<< 30, 32, 1, AnalysisConfig.Precision.Half, True, False) self.fetch_list = [out]", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "name=\"data\", shape=[4, 3, 56, 56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True)", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "to in writing, software # distributed under the License is", "test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))", "1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False) self.fetch_list =", "# See the License for the specific language governing permissions", "56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanTestStatic.TensorRTParam( 1 <<", "shape=[-1, 3, -1, -1], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out", "or agreed to in writing, software # distributed under the", "1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list =", "required by applicable law or agreed to in writing, software", "self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticFP16(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllNoBatchTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program):", "True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestFP16(InferencePassTest): def setUp(self): with", "with the License. # You may obtain a copy of", "def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\", shape=[3,", "= { \"data\": np.random.random([3, 3, 56, 56]).astype(\"float32\"), } self.enable_trt =", "self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data", "TRTReduceMeanStaticAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\",", "= TRTReduceMeanTestFP16.DynamicShapeParam({ 'data': [1, 3, 16, 16] }, {'data': [3,", "1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanTest.DynamicShapeParam({", "AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanTest.DynamicShapeParam({ 'data':", "\"data\": np.random.random([4, 3, 56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters", "compliance with the License. # You may obtain a copy", "np.random.random([4, 3, 56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters =", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanAllNoBatchTest.TensorRTParam( 1 <<", "32, 1, AnalysisConfig.Precision.Half, False, False) self.fetch_list = [out] def test_check_output(self):", "(c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed", "distributed under the License is distributed on an \"AS IS\"", "-1], keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { \"data\":", "} self.enable_trt = True self.trt_parameters = TRTReduceMeanTestFP16.TensorRTParam( 1 << 30,", "32, 1, AnalysisConfig.Precision.Half, False, False) self.fetch_list = [out] self.dynamic_shape_params =", "dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean( data, dim=[2, -1], keep_dim=True) out =", "1, AnalysisConfig.Precision.Half, True, False) self.fetch_list = [out] def test_check_output(self): if", "self.enable_trt = True self.trt_parameters = TRTReduceMeanFP16Static.TensorRTParam( 1 << 30, 32,", "data, dim=[2, -1], keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds =", "express or implied. # See the License for the specific", "class TRTReduceMeanFP16Static(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(", "except in compliance with the License. # You may obtain", "self.trt_parameters = TRTReduceMeanTestFP16.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False,", "3, -1, -1], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out =", "def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\", shape=[-1,", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\", shape=[4, 3, 56, 56],", "3, 56, 56]}, False) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu =", "not use this file except in compliance with the License.", "self.trt_parameters = TRTReduceMeanAllTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False,", "56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanTest.TensorRTParam( 1 <<", "= True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllTest(InferencePassTest): def setUp(self):", "writing, software # distributed under the License is distributed on", "fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\", shape=[3, 3, 56, 56],", "you may not use this file except in compliance with", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "from inference_pass_test import InferencePassTest import paddle.fluid as fluid import paddle.fluid.core", "core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestFP16(InferencePassTest):", "} self.enable_trt = True self.trt_parameters = TRTReduceMeanStaticAllTest.TensorRTParam( 1 << 30,", "out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { \"data\": np.random.random([4, 3,", "False) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True)", "32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] def test_check_output(self):", "use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllNoBatchTest(InferencePassTest): def", "data = fluid.data( name=\"data\", shape=[-1, 3, 56, 56], dtype=\"float32\") reduce_mean", "TRTReduceMeanTestStatic.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list", "CONDITIONS OF ANY KIND, either express or implied. # See", "print_function import unittest import numpy as np from inference_pass_test import", "self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program,", "56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanStaticFP16.TensorRTParam( 1 <<", "True self.trt_parameters = TRTReduceMeanAllTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32,", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "self.startup_program): data = fluid.data( name=\"data\", shape=[3, 3, 56, 56], dtype=\"float32\")", "self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllNoBatchTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data", "-1, -1], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean( data, dim=[2, -1], keep_dim=True)", "= fluid.layers.reduce_mean( data, dim=[2, -1], keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True)", "False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanAllTest.DynamicShapeParam({ 'data': [1,", "{ 'data': [1, 3, 16, 16] }, {'data': [3, 3,", "fluid.data( name=\"data\", shape=[3, 3, 56, 56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(", "use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestStatic(InferencePassTest): def", "core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllTest(InferencePassTest):", "= TRTReduceMeanTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False)", "3, 56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanAllTest.TensorRTParam(", "self.dynamic_shape_params = TRTReduceMeanTestFP16.DynamicShapeParam({ 'data': [1, 3, 16, 16] }, {'data':", "[3, 3, 56, 56]}, {'data': [3, 3, 56, 56]}, False)", "TRTReduceMeanStaticFP16.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False) self.fetch_list", "= True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticAllTest(InferencePassTest): def setUp(self):", "core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticAllTest(InferencePassTest):", "= [out] def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu,", "PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data =", "reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds =", "OR CONDITIONS OF ANY KIND, either express or implied. #", "False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanAllNoBatchTest.DynamicShapeParam( { 'data': [1,", "the License is distributed on an \"AS IS\" BASIS, #", "30, 32, 1, AnalysisConfig.Precision.Half, False, False) self.fetch_list = [out] def", "1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] def test_check_output(self): if", "self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanFP16Static(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data", "with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\", shape=[3, 3, 56,", "3, 56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanTest.TensorRTParam(", "fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\", shape=[-1, 3, 56, 56],", "56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanAllTest.TensorRTParam( 1", "= fluid.data( name=\"data\", shape=[-1, 3, 56, 56], dtype=\"float32\") reduce_mean =", "= TRTReduceMeanStaticAllTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False)", "3, 56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanTestFP16.TensorRTParam(", "3, 56, 56] }, {'data': [3, 3, 56, 56]}, {'data':", "30, 32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] self.dynamic_shape_params", "PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllNoBatchTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data =", "paddle.fluid.core as core from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import", "= fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = {", "AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanAllNoBatchTest.DynamicShapeParam( {", "law or agreed to in writing, software # distributed under", "16] }, {'data': [3, 3, 56, 56]}, {'data': [3, 3,", "numpy as np from inference_pass_test import InferencePassTest import paddle.fluid as", "class TRTReduceMeanTestStatic(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(", "PassVersionChecker from paddle.fluid.core import AnalysisConfig class TRTReduceMeanTest(InferencePassTest): def setUp(self): with", "= fluid.data( name=\"data\", shape=[-1, 3, -1, -1], dtype=\"float32\") reduce_mean =", "56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanTest.TensorRTParam( 1", "shape=[4, 3, 56, 56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out", "as fluid import paddle.fluid.core as core from paddle.fluid.core import PassVersionChecker", "may obtain a copy of the License at # #", "AnalysisConfig class TRTReduceMeanTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data =", "__future__ import print_function import unittest import numpy as np from", "TRTReduceMeanTest.DynamicShapeParam({ 'data': [1, 3, 16, 16] }, {'data': [3, 3,", "= True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestStatic(InferencePassTest): def setUp(self):", "import InferencePassTest import paddle.fluid as fluid import paddle.fluid.core as core", "True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticAllTest(InferencePassTest): def setUp(self): with", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "may not use this file except in compliance with the", "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. #", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "False) self.fetch_list = [out] def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu =", "this file except in compliance with the License. # You", "self.enable_trt = True self.trt_parameters = TRTReduceMeanTestStatic.TensorRTParam( 1 << 30, 32,", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "TRTReduceMeanTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\",", "keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { \"data\": np.random.random([4,", "# # Licensed under the Apache License, Version 2.0 (the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "= TRTReduceMeanTestStatic.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False)", "def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue(", "TRTReduceMeanTestFP16(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\",", "use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticFP16(InferencePassTest): def", "{'data': [3, 3, 56, 56]}, False) def test_check_output(self): if core.is_compiled_with_cuda():", "core from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import AnalysisConfig class", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "} self.enable_trt = True self.trt_parameters = TRTReduceMeanTest.TensorRTParam( 1 << 30,", "self.feeds = { \"data\": np.random.random([4, 3, 56, 56]).astype(\"float32\"), } self.enable_trt", "56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanStaticAllTest.TensorRTParam( 1 <<", "3, -1, -1], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean( data, dim=[2, -1],", "with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\", shape=[4, 3, 56,", "keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { \"data\": np.random.random([3,", "self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data", "out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { \"data\": np.random.random([3, 3,", "Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # #", "False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanTestFP16.DynamicShapeParam({ 'data': [1,", "self.trt_parameters = TRTReduceMeanTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False,", "self.enable_trt = True self.trt_parameters = TRTReduceMeanAllNoBatchTest.TensorRTParam( 1 << 30, 32,", "np.random.random([3, 3, 56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters =", "True self.trt_parameters = TRTReduceMeanTestFP16.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half,", "TRTReduceMeanFP16Static.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half, True, False) self.fetch_list", "TRTReduceMeanTestFP16.DynamicShapeParam({ 'data': [1, 3, 16, 16] }, {'data': [3, 3,", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "} self.enable_trt = True self.trt_parameters = TRTReduceMeanStaticFP16.TensorRTParam( 1 << 30,", "fluid.data( name=\"data\", shape=[-1, 3, 56, 56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data,", "= TRTReduceMeanTest.DynamicShapeParam({ 'data': [1, 3, 16, 16] }, {'data': [3,", "TRTReduceMeanFP16Static(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\",", "with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\", shape=[-1, 3, 56,", "or implied. # See the License for the specific language", "governing permissions and # limitations under the License. from __future__", "Rights Reserved. # # Licensed under the Apache License, Version", "AnalysisConfig.Precision.Half, False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanTestFP16.DynamicShapeParam({ 'data':", "= TRTReduceMeanStaticFP16.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False)", "False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanTest.DynamicShapeParam({ 'data': [1,", "56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanFP16Static.TensorRTParam( 1", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "import numpy as np from inference_pass_test import InferencePassTest import paddle.fluid", "self.trt_parameters = TRTReduceMeanTestStatic.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False,", "-1], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True)", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "fluid.layers.reduce_mean( data, dim=[2, -1], keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds", "TRTReduceMeanTestFP16.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False) self.fetch_list", "[out] self.dynamic_shape_params = TRTReduceMeanTest.DynamicShapeParam({ 'data': [1, 3, 16, 16] },", "True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllNoBatchTest(InferencePassTest): def setUp(self): with", "True self.trt_parameters = TRTReduceMeanAllNoBatchTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32,", "56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanAllNoBatchTest.TensorRTParam( 1", "3, 56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanTestStatic.TensorRTParam(", "= TRTReduceMeanAllTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False)", "self.enable_trt = True self.trt_parameters = TRTReduceMeanTestFP16.TensorRTParam( 1 << 30, 32,", "1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanAllTest.DynamicShapeParam({", "56, 56]}, False) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True", "32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] self.dynamic_shape_params =", "(the \"License\"); # you may not use this file except", "# you may not use this file except in compliance", "self.startup_program): data = fluid.data( name=\"data\", shape=[-1, 3, -1, -1], dtype=\"float32\")", "self.enable_trt = True self.trt_parameters = TRTReduceMeanStaticAllTest.TensorRTParam( 1 << 30, 32,", "56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanTestFP16.TensorRTParam( 1 <<", "self.feeds = { \"data\": np.random.random([3, 3, 56, 56]).astype(\"float32\"), } self.enable_trt", "paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.core import", "setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\", shape=[-1, 3,", "permissions and # limitations under the License. from __future__ import", "True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) if __name__ == \"__main__\": unittest.main()", "= True self.trt_parameters = TRTReduceMeanStaticAllTest.TensorRTParam( 1 << 30, 32, 1,", "fluid.data( name=\"data\", shape=[-1, 3, -1, -1], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(", "import AnalysisConfig class TRTReduceMeanTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data", "self.enable_trt = True self.trt_parameters = TRTReduceMeanStaticFP16.TensorRTParam( 1 << 30, 32,", "= TRTReduceMeanFP16Static.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half, True, False)", "data = fluid.data( name=\"data\", shape=[-1, 3, -1, -1], dtype=\"float32\") reduce_mean", "'data': [1, 3, 16, 16] }, {'data': [3, 3, 56,", "# # Unless required by applicable law or agreed to", "reduce_mean = fluid.layers.reduce_mean( data, dim=[2, -1], keep_dim=True) out = fluid.layers.batch_norm(reduce_mean,", "limitations under the License. from __future__ import print_function import unittest", "True self.trt_parameters = TRTReduceMeanStaticFP16.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half,", "self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanFP16Static(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program,", "data = fluid.data( name=\"data\", shape=[4, 3, 56, 56], dtype=\"float32\") reduce_mean", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanAllTest.DynamicShapeParam({ 'data': [1, 3, 56,", "Version 2.0 (the \"License\"); # you may not use this", "TRTReduceMeanStaticAllTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list", "self.trt_parameters = TRTReduceMeanAllNoBatchTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False,", "56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True)", "} self.enable_trt = True self.trt_parameters = TRTReduceMeanFP16Static.TensorRTParam( 1 << 30,", "= TRTReduceMeanAllNoBatchTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False)", "= True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanFP16Static(InferencePassTest): def setUp(self):", "implied. # See the License for the specific language governing", "fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { \"data\": np.random.random([3, 3, 56, 56]).astype(\"float32\"),", "1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanAllNoBatchTest.DynamicShapeParam(", "use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllTest(InferencePassTest): def", "setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\", shape=[4, 3,", "under the Apache License, Version 2.0 (the \"License\"); # you", "3, 56, 56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean( data, dim=[2, -1],", "= [out] self.dynamic_shape_params = TRTReduceMeanAllNoBatchTest.DynamicShapeParam( { 'data': [1, 3, 16,", "TRTReduceMeanTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list", "TRTReduceMeanAllNoBatchTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list", "= True self.trt_parameters = TRTReduceMeanTestFP16.TensorRTParam( 1 << 30, 32, 1,", "by applicable law or agreed to in writing, software #", "data = fluid.data( name=\"data\", shape=[3, 3, 56, 56], dtype=\"float32\") reduce_mean", "class TRTReduceMeanTestFP16(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(", "self.enable_trt = True self.trt_parameters = TRTReduceMeanAllTest.TensorRTParam( 1 << 30, 32,", "self.enable_trt = True self.trt_parameters = TRTReduceMeanTest.TensorRTParam( 1 << 30, 32,", "import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.core", "3, 56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanStaticAllTest.TensorRTParam(", "56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanStaticFP16.TensorRTParam( 1", "} self.enable_trt = True self.trt_parameters = TRTReduceMeanAllNoBatchTest.TensorRTParam( 1 << 30,", "3, 56, 56]}, {'data': [3, 3, 56, 56]}, False) def", "True self.trt_parameters = TRTReduceMeanTestStatic.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32,", "use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanFP16Static(InferencePassTest): def", "False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanAllTest.DynamicShapeParam({ 'data': [1, 3,", "30, 32, 1, AnalysisConfig.Precision.Half, False, False) self.fetch_list = [out] self.dynamic_shape_params", "shape=[-1, 3, 56, 56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out", "True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllTest(InferencePassTest): def setUp(self): with", "56]}, {'data': [3, 3, 56, 56]}, False) def test_check_output(self): if", "56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanStaticAllTest.TensorRTParam( 1", "self.trt_parameters = TRTReduceMeanStaticFP16.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False,", "if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class", "core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestStatic(InferencePassTest):", "flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticFP16(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program):", "= fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { \"data\": np.random.random([4, 3, 56,", "self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanAllNoBatchTest.DynamicShapeParam( { 'data': [1, 3,", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Unless required by applicable law or agreed to in writing,", "import unittest import numpy as np from inference_pass_test import InferencePassTest", "self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanTest.DynamicShapeParam({ 'data': [1, 3, 16,", "from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import AnalysisConfig class TRTReduceMeanTest(InferencePassTest):", "56] }, {'data': [3, 3, 56, 56]}, {'data': [3, 3,", "self.trt_parameters = TRTReduceMeanStaticAllTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False,", "= TRTReduceMeanAllNoBatchTest.DynamicShapeParam( { 'data': [1, 3, 16, 16] }, {'data':", "TRTReduceMeanTestStatic(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\",", "} self.enable_trt = True self.trt_parameters = TRTReduceMeanTestStatic.TensorRTParam( 1 << 30,", "the specific language governing permissions and # limitations under the", "use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticAllTest(InferencePassTest): def", "applicable law or agreed to in writing, software # distributed", "1 << 30, 32, 1, AnalysisConfig.Precision.Half, True, False) self.fetch_list =", "AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] def test_check_output(self): if core.is_compiled_with_cuda():", "3, 56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanFP16Static.TensorRTParam(", "PaddlePaddle Authors. All Rights Reserved. # # Licensed under the", "self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program,", "shape=[-1, 3, -1, -1], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean( data, dim=[2,", "3, 56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanStaticFP16.TensorRTParam(", "56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanTestStatic.TensorRTParam( 1", "= True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) if __name__ == \"__main__\":", "in writing, software # distributed under the License is distributed", "the License. from __future__ import print_function import unittest import numpy", "name=\"data\", shape=[-1, 3, -1, -1], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True)", "flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanFP16Static(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program):", "self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanTestFP16.DynamicShapeParam({ 'data': [1, 3, 16,", "name=\"data\", shape=[-1, 3, -1, -1], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean( data,", "PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanFP16Static(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data =", "flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program):", "TRTReduceMeanAllTest.DynamicShapeParam({ 'data': [1, 3, 56, 56] }, {'data': [3, 3,", "= True self.trt_parameters = TRTReduceMeanStaticFP16.TensorRTParam( 1 << 30, 32, 1,", "import paddle.fluid.core as core from paddle.fluid.core import PassVersionChecker from paddle.fluid.core", "self.dynamic_shape_params = TRTReduceMeanAllNoBatchTest.DynamicShapeParam( { 'data': [1, 3, 16, 16] },", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "56, 56] }, {'data': [3, 3, 56, 56]}, {'data': [3,", "License, Version 2.0 (the \"License\"); # you may not use", "core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanFP16Static(InferencePassTest):", "3, 16, 16] }, {'data': [3, 3, 56, 56]}, {'data':", "# You may obtain a copy of the License at", "TRTReduceMeanAllNoBatchTest.DynamicShapeParam( { 'data': [1, 3, 16, 16] }, {'data': [3,", "3, 56, 56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out =", "self.fetch_list = [out] def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True", "False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanTest.DynamicShapeParam({ 'data': [1, 3,", "AnalysisConfig.Precision.Half, False, False) self.fetch_list = [out] def test_check_output(self): if core.is_compiled_with_cuda():", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "False, False) self.fetch_list = [out] def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu", "PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data =", "Authors. All Rights Reserved. # # Licensed under the Apache", "use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestFP16(InferencePassTest): def", "paddle.fluid.core import AnalysisConfig class TRTReduceMeanTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program):", "1, AnalysisConfig.Precision.Half, False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanTestFP16.DynamicShapeParam({", "under the License. from __future__ import print_function import unittest import", "the License for the specific language governing permissions and #", "= True self.trt_parameters = TRTReduceMeanFP16Static.TensorRTParam( 1 << 30, 32, 1,", "Apache License, Version 2.0 (the \"License\"); # you may not", "def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\", shape=[4,", "AnalysisConfig.Precision.Half, True, False) self.fetch_list = [out] def test_check_output(self): if core.is_compiled_with_cuda():", "unittest import numpy as np from inference_pass_test import InferencePassTest import", "either express or implied. # See the License for the", "PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestStatic(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data =", "1, AnalysisConfig.Precision.Half, False, False) self.fetch_list = [out] def test_check_output(self): if", "fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\", shape=[-1, 3, -1, -1],", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "56]}, False) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu,", "= True self.trt_parameters = TRTReduceMeanAllTest.TensorRTParam( 1 << 30, 32, 1,", "License. from __future__ import print_function import unittest import numpy as", "dim=[2, -1], keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = {", "True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanFP16Static(InferencePassTest): def setUp(self): with", "30, 32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] def", "InferencePassTest import paddle.fluid as fluid import paddle.fluid.core as core from", "[1, 3, 56, 56] }, {'data': [3, 3, 56, 56]},", "True self.trt_parameters = TRTReduceMeanFP16Static.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half,", "56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanAllTest.TensorRTParam( 1 <<", "56, 56]).astype(\"float32\"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanTestFP16.TensorRTParam( 1", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "-1], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean( data, dim=[2, -1], keep_dim=True) out", "True self.trt_parameters = TRTReduceMeanStaticAllTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32,", "shape=[3, 3, 56, 56], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean( data, dim=[2,", "inference_pass_test import InferencePassTest import paddle.fluid as fluid import paddle.fluid.core as", "PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestFP16(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data =", "class TRTReduceMeanTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(", "= { \"data\": np.random.random([4, 3, 56, 56]).astype(\"float32\"), } self.enable_trt =", "False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanAllNoBatchTest.DynamicShapeParam( { 'data':", "= True self.trt_parameters = TRTReduceMeanTest.TensorRTParam( 1 << 30, 32, 1,", "= True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllNoBatchTest(InferencePassTest): def setUp(self):", "= True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestFP16(InferencePassTest): def setUp(self):", "True, False) self.fetch_list = [out] def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu", "\"License\"); # you may not use this file except in", "flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestFP16(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program):", "from __future__ import print_function import unittest import numpy as np", "= True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticFP16(InferencePassTest): def setUp(self):", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "= [out] self.dynamic_shape_params = TRTReduceMeanAllTest.DynamicShapeParam({ 'data': [1, 3, 56, 56]", "= fluid.data( name=\"data\", shape=[4, 3, 56, 56], dtype=\"float32\") reduce_mean =", "}, {'data': [3, 3, 56, 56]}, {'data': [3, 3, 56,", "self.dynamic_shape_params = TRTReduceMeanTest.DynamicShapeParam({ 'data': [1, 3, 16, 16] }, {'data':", "# distributed under the License is distributed on an \"AS", "16, 16] }, {'data': [3, 3, 56, 56]}, {'data': [3,", "dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds", "# Unless required by applicable law or agreed to in", "TRTReduceMeanAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\",", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "You may obtain a copy of the License at #", "False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanTestFP16.DynamicShapeParam({ 'data': [1, 3,", "flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program):", "as np from inference_pass_test import InferencePassTest import paddle.fluid as fluid", "is_test=True) self.feeds = { \"data\": np.random.random([3, 3, 56, 56]).astype(\"float32\"), }", "-1, -1], dtype=\"float32\") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean,", "flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestStatic(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program):", "the Apache License, Version 2.0 (the \"License\"); # you may", "setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name=\"data\", shape=[3, 3,", "if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) if", "fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { \"data\":" ]
[ "False, True, True), in_channels=512, position='after_conv2') ] ), neck=dict( type='RFP', rfp_steps=2,", "groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True),", "3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNeXt', depth=101, groups=32, base_width=4, num_stages=4,", "model = dict( pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='DetectoRS_ResNeXt', pretrained='open-mmlab://resnext101_32x4d', depth=101, groups=32, base_width=4,", "_base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', './dataset_base.py', './scheduler_base.py', '../_base_/default_runtime.py' ] model =", "stage_with_sac=(False, True, True, True), pretrained='open-mmlab://resnext101_32x4d', style='pytorch')), roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead',", "use_deform=True), stage_with_sac=(False, True, True, True), output_img=True, plugins=[ dict( cfg=dict( type='GeneralizedAttention',", "rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2,", "use_deform=True), stage_with_sac=(False, True, True, True), pretrained='open-mmlab://resnext101_32x4d', style='pytorch')), roi_head=dict( bbox_head=[ dict(", "'./scheduler_base.py', '../_base_/default_runtime.py' ] model = dict( pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='DetectoRS_ResNeXt', pretrained='open-mmlab://resnext101_32x4d',", "'../_base_/default_runtime.py' ] model = dict( pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='DetectoRS_ResNeXt', pretrained='open-mmlab://resnext101_32x4d', depth=101,", "bbox_head=[ dict( type='Shared2FCBBoxHead', num_classes=14 ), dict( type='Shared2FCBBoxHead', num_classes=14 ), dict(", "type='Shared2FCBBoxHead', num_classes=14 ) ] ), test_cfg=dict( rpn=dict( nms_thr=0.7 ), rcnn=dict(", "position='after_conv2') ] ), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6,", "aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNeXt', depth=101, groups=32,", "roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', num_classes=14 ), dict( type='Shared2FCBBoxHead', num_classes=14 ),", "True, True), in_channels=512, position='after_conv2') ] ), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64,", "base_width=4, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), output_img=True, plugins=[", "norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), pretrained='open-mmlab://resnext101_32x4d', style='pytorch')),", "type='Shared2FCBBoxHead', num_classes=14 ), dict( type='Shared2FCBBoxHead', num_classes=14 ), dict( type='Shared2FCBBoxHead', num_classes=14", "rfp_inplanes=256, type='DetectoRS_ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3),", "cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False, True, True),", "plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False,", "test_cfg=dict( rpn=dict( nms_thr=0.7 ), rcnn=dict( score_thr=0.0, nms=dict(type='nms', iou_threshold=0.4) ) )", "), dict( type='Shared2FCBBoxHead', num_classes=14 ) ] ), test_cfg=dict( rpn=dict( nms_thr=0.7", "True, True, True), output_img=True, plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8,", "), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict(", "num_classes=14 ), dict( type='Shared2FCBBoxHead', num_classes=14 ), dict( type='Shared2FCBBoxHead', num_classes=14 )", "base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True,", "norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True),", "rpn=dict( nms_thr=0.7 ), rcnn=dict( score_thr=0.0, nms=dict(type='nms', iou_threshold=0.4) ) ) )", "type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNeXt',", "6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0,", "[ '../_base_/models/cascade_rcnn_r50_fpn.py', './dataset_base.py', './scheduler_base.py', '../_base_/default_runtime.py' ] model = dict( pretrained='open-mmlab://resnext101_32x4d',", "True), pretrained='open-mmlab://resnext101_32x4d', style='pytorch')), roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', num_classes=14 ), dict(", "conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), pretrained='open-mmlab://resnext101_32x4d', style='pytorch')), roi_head=dict(", "stages=(False, False, True, True), in_channels=512, position='after_conv2') ] ), neck=dict( type='RFP',", "dict( type='Shared2FCBBoxHead', num_classes=14 ), dict( type='Shared2FCBBoxHead', num_classes=14 ), dict( type='Shared2FCBBoxHead',", "dict( type='Shared2FCBBoxHead', num_classes=14 ) ] ), test_cfg=dict( rpn=dict( nms_thr=0.7 ),", "depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN',", "1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1,", "groups=32, base_width=4, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), output_img=True,", "= [ '../_base_/models/cascade_rcnn_r50_fpn.py', './dataset_base.py', './scheduler_base.py', '../_base_/default_runtime.py' ] model = dict(", "num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False, True, True), in_channels=512, position='after_conv2') ]", ") ] ), test_cfg=dict( rpn=dict( nms_thr=0.7 ), rcnn=dict( score_thr=0.0, nms=dict(type='nms',", "in_channels=512, position='after_conv2') ] ), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3,", "rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNeXt', depth=101,", "dict( type='Shared2FCBBoxHead', num_classes=14 ), dict( type='Shared2FCBBoxHead', num_classes=14 ) ] ),", "= dict( pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='DetectoRS_ResNeXt', pretrained='open-mmlab://resnext101_32x4d', depth=101, groups=32, base_width=4, conv_cfg=dict(type='ConvAWS'),", "pretrained='open-mmlab://resnext101_32x4d', depth=101, groups=32, base_width=4, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True,", "1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True),", "attention_type='0010', kv_stride=2), stages=(False, False, True, True), in_channels=512, position='after_conv2') ] ),", "True), in_channels=512, position='after_conv2') ] ), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1,", "True), output_img=True, plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2),", "sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), output_img=True, plugins=[ dict( cfg=dict(", "aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNeXt', depth=101, groups=32, base_width=4,", "sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), pretrained='open-mmlab://resnext101_32x4d', style='pytorch')), roi_head=dict( bbox_head=[", "'./dataset_base.py', './scheduler_base.py', '../_base_/default_runtime.py' ] model = dict( pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='DetectoRS_ResNeXt',", "backbone=dict( type='DetectoRS_ResNeXt', pretrained='open-mmlab://resnext101_32x4d', depth=101, groups=32, base_width=4, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False,", "conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), output_img=True, plugins=[ dict(", "dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False, True,", "num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'),", "neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256,", "2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False,", "True, True), output_img=True, plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010',", "depth=101, groups=32, base_width=4, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True),", "output_img=True, plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False,", "dict( pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='DetectoRS_ResNeXt', pretrained='open-mmlab://resnext101_32x4d', depth=101, groups=32, base_width=4, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC',", "stage_with_sac=(False, True, True, True), output_img=True, plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1,", "style='pytorch')), roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', num_classes=14 ), dict( type='Shared2FCBBoxHead', num_classes=14", "type='DetectoRS_ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1,", "num_classes=14 ) ] ), test_cfg=dict( rpn=dict( nms_thr=0.7 ), rcnn=dict( score_thr=0.0,", "pretrained='open-mmlab://resnext101_32x4d', style='pytorch')), roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', num_classes=14 ), dict( type='Shared2FCBBoxHead',", "] ), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1),", "] ), test_cfg=dict( rpn=dict( nms_thr=0.7 ), rcnn=dict( score_thr=0.0, nms=dict(type='nms', iou_threshold=0.4)", "requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), pretrained='open-mmlab://resnext101_32x4d',", "pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='DetectoRS_ResNeXt', pretrained='open-mmlab://resnext101_32x4d', depth=101, groups=32, base_width=4, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True),", "type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False, True, True), in_channels=512,", "True, True), pretrained='open-mmlab://resnext101_32x4d', style='pytorch')), roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', num_classes=14 ),", "type='Shared2FCBBoxHead', num_classes=14 ), dict( type='Shared2FCBBoxHead', num_classes=14 ) ] ), test_cfg=dict(", "frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True,", "num_classes=14 ), dict( type='Shared2FCBBoxHead', num_classes=14 ) ] ), test_cfg=dict( rpn=dict(", "kv_stride=2), stages=(False, False, True, True), in_channels=512, position='after_conv2') ] ), neck=dict(", "'../_base_/models/cascade_rcnn_r50_fpn.py', './dataset_base.py', './scheduler_base.py', '../_base_/default_runtime.py' ] model = dict( pretrained='open-mmlab://resnext101_32x4d', backbone=dict(", "), dict( type='Shared2FCBBoxHead', num_classes=14 ), dict( type='Shared2FCBBoxHead', num_classes=14 ) ]", "out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC',", "type='DetectoRS_ResNeXt', pretrained='open-mmlab://resnext101_32x4d', depth=101, groups=32, base_width=4, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True,", "spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False, True, True), in_channels=512, position='after_conv2')", "3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True,", "] model = dict( pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='DetectoRS_ResNeXt', pretrained='open-mmlab://resnext101_32x4d', depth=101, groups=32,", "), test_cfg=dict( rpn=dict( nms_thr=0.7 ), rcnn=dict( score_thr=0.0, nms=dict(type='nms', iou_threshold=0.4) )", "True, True, True), pretrained='open-mmlab://resnext101_32x4d', style='pytorch')), roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', num_classes=14" ]
[ "_validate_input(self.EmptyDeeplyNested, None, None, None) self.assertEqual(num_points, 1) self.assertEqual(num_samples, 1) def test_validate_input_empty_point(self):", "< 1.4.0 creates two Line2D instances, mpl 1.4.0 creates one,", "2, 3], '^', 0.77, 1, 1.5, 'stdv') self.assertEqual(result.get_sizes(), 20) def", "[\"Infants\", \"Children\", \"Teens\"], ['b', 'r', 'g'], \"x-axis label\", \"y-axis label\",", "32, 6, 8], [5, 4, 8, 13], [1, 1, 2]],", "3) for patch in ax.patches: npt.assert_almost_equal( patch.get_facecolor(), (1.0, 0.7529411764705882, 0.796078431372549,", "self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) fig,", "legend=('foo', 'bar', 'baz')) def test_color_box_plot(self): fig, ax = plt.subplots() box_plot", "0.5, False) np.testing.assert_allclose(ticks, [1.25, 5.25, 9.25, 11.25]) ticks = _calc_data_point_ticks(np.array([0]),", "from skbio.draw import boxplots, grouped_distributions from skbio.draw._distributions import ( _calc_data_point_locations,", "test_plot_bar_data_empty(self): fig, ax = plt.subplots() result = _plot_bar_data(ax, [], 'red',", "plt.subplots() result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'sem')", "self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) # second distribution (empty) should have", "colors are None. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData)", "'red', 0.5, 3.75, 1.5, 'sem') self.assertTrue(result is None) def test_plot_scatter_data(self):", "data to be plotted by the boxplot function. self.ValidTypicalBoxData =", "label\", x_tick_labels=[\"T0\", \"T1\", \"T2\"], y_min='car', y_max=30) def test_set_axes_options_invalid_x_tick_labels_orientation(self): fig, ax", "= ('box', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\",", "fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue',", "test_set_axes_options(self): fig, ax = plt.subplots() _set_axes_options(ax, \"Plot Title\", \"x-axis label\",", "should have a nan for its y # value lines", "identical between the two versions. # see: # https://github.com/pydata/pandas/issues/8382#issuecomment-56840974 #", "(1.0, 0.0, 0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0)) #", "[2.3, 4, 5, 88, 9, 10, 11, 1, 0, 3,", "test_boxplots_empty_distributions(self): fig = boxplots([[1, 2, 3], [], [4, 5, 6]],", "1.0, 1.0, 1.0))) self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0))) self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0))) self.assertFalse(_is_single_matplotlib_color(['w',", "1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0)) self.assertTrue(np.isnan(ax.patches[0].xy[0][1])) self.assertFalse(np.isnan(ax.patches[1].xy[0][1])) self.assertTrue(np.isnan(ax.patches[2].xy[0][1])) def", "self.assertTrue(_is_single_matplotlib_color('w')) self.assertTrue(_is_single_matplotlib_color('white')) self.assertTrue(_is_single_matplotlib_color([1, 1, 1])) self.assertTrue(_is_single_matplotlib_color([1, 1, 1, 1])) self.assertTrue(_is_single_matplotlib_color((1,", "data list. self.Null = None # Test empty data list.", "None, 0), []) self.assertEqual(_get_distribution_markers('symbols', ['^'], 0), []) def test_get_distribution_markers_negative_num_markers(self): with", "'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig, -1, 0) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size))", "5), ['^', '>', '<', '^', '>']) def test_get_distribution_markers_bad_marker_type(self): with npt.assert_raises(ValueError):", "# second distribution (empty) should have nans since it is", "args = ('box', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\",", "with npt.assert_raises(ValueError): boxplots([[1, 'foo', 3]]) # Number of colors doesn't", "list. self.EmptyNested = [[]] # Test nested empty data list", "(1.0, 1.0, 0.0, 1.0)) # patch location should include at", "empty data list (for bar/scatter plots). self.EmptyDeeplyNested = [[[]]] #", "\"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'r'], \"x-axis label\", \"y-axis", "1.0, 0.0, 1.0)) # patch location should include at least", "\"y-axis label\", x_tick_labels=[\"T0\", \"T1\"], x_tick_labels_orientation='brofist') def test_create_legend(self): fig, ax =", "this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function", "1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0, 1.0))) self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0)))", "down from 3..12 to 1..4. locs = _calc_data_point_locations(4, [3, 4,", "plt.subplots() _create_legend(ax, ['b', 'r'], ['dist1', 'dist2'], 'colors') self.assertEqual(len(ax.get_legend().get_texts()), 2) fig,", "np.array([1, 1.33333333, 3.33333333, 4])) # Sorted order shouldn't affect scaling.", "[] # Test nested empty data list. self.EmptyNested = [[]]", "x_tick_labels_orientation='vertical') npt.assert_warns(RuntimeWarning, _set_figure_size, fig, 3, 3) npt.assert_array_equal(fig.get_size_inches(), (3, 3)) if", "self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.1125, 2.0125, 3.8125, 4.1125])", "test_get_distribution_markers_insufficient_markers(self): self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'colors', None, 10), ['b', 'g', 'r', 'c',", "we don't clean up our figures. plt.close('all') def test_validate_input_null(self): with", "def test_validate_input_invalid_sample_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, None, [\"Men\", \"Women\"]) def", "Test null data list. self.Null = None # Test empty", "All colors are None. fig, ax = plt.subplots() box_plot =", "[], [4, 5, 6]], box_colors=['blue', 'red']) # Invalid legend. with", "-1) def test_plot_bar_data(self): fig, ax = plt.subplots() result = _plot_bar_data(ax,", "'stdv') self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0,", "1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0,", "[\"T0\", \"T1\"], None) def test_validate_input_invalid_sample_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, None,", "1, 1.5, 'stdv') self.assertEqual(result.get_sizes(), 20) def test_plot_scatter_data_empty(self): fig, ax =", "label\", \"y-axis label\", \"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_scatter(self): fig", "test_validate_x_values_invalid_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([1, 2, 3, 4], [\"T0\", \"T1\", \"T2\"],", "self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'symbols', ['^', '>', '<'], 5), ['^', '>', '<',", "4]) def test_calc_data_point_locations_custom_spacing(self): # Scaling down from 3..12 to 1..4.", "boxplots([[1, 2, 3]], legend=('foo', 'bar', 'baz')) def test_color_box_plot(self): fig, ax", "# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. #", "Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\", \"T2\"], y_min='car', y_max=30)", "10, 12]) np.testing.assert_allclose(locs, np.array([1, 1.33333333, 3.33333333, 4])) # Sorted order", "10, 33, 32, 6, 7, 8]]] # Test valid data", "[1, 2, 3], 'red', 0.5, 3.75, 1.5, 'stdv') self.assertEqual(result[0].__class__.__name__, \"Rectangle\")", "'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig, -1, 0)", "test_get_distribution_markers_bad_marker_type(self): with npt.assert_raises(ValueError): _get_distribution_markers('shapes', [], 3) def test_get_distribution_markers_zero_markers(self): self.assertEqual(_get_distribution_markers('symbols', None,", "# see: # https://github.com/pydata/pandas/issues/8382#issuecomment-56840974 # https://github.com/matplotlib/matplotlib/issues/3544 self.assertTrue(len(result['fliers']) == 1 or", "_validate_input(self.ValidSingleSampleData, None, [\"T0\", \"T1\"], None) def test_validate_input_invalid_sample_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData,", "distributions. fig = boxplots([[], [], []], box_colors=['blue', 'red', 'yellow']) ax", "or len(result['fliers']) == 2) self.assertEqual(len(result['caps']), 2) def test_plot_box_data_empty(self): fig, ax", "def test_validate_input_empty_nested(self): with npt.assert_raises(ValueError): _validate_input(self.EmptyNested, None, None, None) def test_validate_input_empty_deeply_nested(self):", "[1, 2, 3], 'red', 0.5, 3.75, 1.5, 'var') def test_plot_bar_data_empty(self):", "fig, ax = plt.subplots() result = _plot_bar_data(ax, [], 'red', 0.5,", "3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) def test_boxplots_empty_distributions(self): fig = boxplots([[1,", "'r'], ['dist1', 'dist2'], 'colors') self.assertEqual(len(ax.get_legend().get_texts()), 2) fig, ax = plt.subplots()", "its y # value lines = ax.get_lines() self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) # line", "# Test valid data with one sample (for bar/scatter plots).", "test_validate_input_empty(self): with npt.assert_raises(ValueError): _validate_input(self.Empty, None, None, None) def test_validate_input_empty_nested(self): with", "1, 0, 3, -8], [2, 9, 7, 5, 6]] def", "1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0,", "up our figures. plt.close('all') def test_validate_input_null(self): with npt.assert_raises(ValueError): _validate_input(self.Null, None,", "it is hidden. # boxplots in mpl < 1.4.0 have", "_plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'var') def", "ax = plt.subplots() with npt.assert_raises(ValueError): _plot_bar_data(ax, [1, 2, 3], 'red',", "'^', 0.77, 1, 1.5, 'stdv') self.assertTrue(result is None) def test_plot_box_data(self):", "Test valid data with three samples and four data points", "'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') _set_figure_size(fig, 3, 4) self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4)))", "npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'], 'foo') def", "\"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), '42') self.assertEqual(ax.get_xticklabels()[1].get_text(), '45') self.assertEqual(ax.get_xticklabels()[2].get_text(),", "\"y-axis label\", x_tick_labels=[\"T0\", \"T1\", \"T2\"], y_min='car', y_max=30) def test_set_axes_options_invalid_x_tick_labels_orientation(self): fig,", "# though the resulting plot looks identical between the two", "\"Teens\"], [], \"x-axis label\", \"y-axis label\", \"Test\") def test_grouped_distributions_box(self): fig", "1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0)) # patch location should", "*args) def test_grouped_distributions_scatter(self): fig = grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10,", "plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', 'w', (1, 1,", "4) np.testing.assert_allclose(ax.get_xticks(), [1.1125, 2.0125, 3.8125, 4.1125]) def test_grouped_distributions_insufficient_colors(self): args =", "'r'], \"x-axis label\", \"y-axis label\", \"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def", "x_tick_labels=[\"T0\", \"T1\", \"T2\"], y_min=0, y_max=1) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis", "['^', '<', '>'], ['dist1', 'dist2', 'dist3'], 'symbols') self.assertEqual(len(ax.get_legend().get_texts()), 3) def", "test_boxplots(self): fig = boxplots(self.ValidTypicalBoxData, [1, 4, 10], [\"Data 1\", \"Data", "[\"T0\", \"T1\", \"T2\"], len(self.ValidSingleSampleData)) def test_validate_x_values_invalid_x_tick_labels(self): with npt.assert_raises(ValueError): _validate_x_values(None, [\"T0\"],", "we specified self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0,", "'r', 'c']) def test_get_distribution_markers_insufficient_markers(self): self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'colors', None, 10), ['b',", "with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2'], 'symbols') with", "test_get_distribution_markers_null_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 5), ['b', 'g', 'r', 'c', 'm']) def", "resulting plot looks identical between the two versions. # see:", "def test_validate_input_invalid_data_point_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, [\"T0\", \"T1\"], None) def", "in ax.patches: self.assertTrue(np.isnan(patch.xy[0][1])) fig = boxplots([[], [], []], box_colors='pink') ax", "self.assertFalse(_is_single_matplotlib_color(['w', 'r'])) self.assertFalse(_is_single_matplotlib_color(['w'])) self.assertFalse(_is_single_matplotlib_color(('w',))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),", "= boxplots([[], [], []], box_colors=['blue', 'red', 'yellow']) ax = fig.get_axes()[0]", "_calc_data_point_locations(4) np.testing.assert_allclose(locs, [1, 2, 3, 4]) def test_calc_data_point_locations_custom_spacing(self): # Scaling", "(for bar/scatter plots). self.ValidTypicalData = [[[1.0, 2, 3.5, 5], [2,", "7, 8]], [[4, 7, 10, 33, 32, 6, 7, 8]]]", "ax = plt.subplots() result = _plot_box_data(ax, [0, 0, 7, 8,", "TestCase, main import numpy as np import numpy.testing as npt", "2.0) fig, ax = plt.subplots() result = _plot_bar_data(ax, [1, 2,", "with npt.assert_raises(ValueError): _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5,", "creates one, # though the resulting plot looks identical between", "3], [4, 5]], []], None, None, None) def test_validate_input_invalid_num_samples(self): with", "2.0125, 3.8125, 4.1125]) def test_grouped_distributions_insufficient_colors(self): args = ('bar', self.ValidTypicalData, [1,", "33, 32, 6, 7, 8]]] # Test valid data with", "'red', 'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) # patch colors", "3) self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0,", "\"y-axis label\") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis label\")", "'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size))", "\"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'r', 'g'], \"x-axis", "(1, 1, 0.9)]) # All colors are None. fig, ax", "7, 8]]] # Test valid data with three samples and", "= fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) # patch colors should match what", "empty, and thus hidden for patch in ax.patches: self.assertTrue(np.isnan(patch.xy[0][1])) fig", "9, 7, 5, 6]] def tearDown(self): # We get a", "'symbols') with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'],", "import boxplots, grouped_distributions from skbio.draw._distributions import ( _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot,", "1 or len(result['fliers']) == 2) self.assertEqual(len(result['caps']), 2) def test_plot_box_data_empty(self): fig,", "data with one sample (for bar/scatter plots). self.ValidSingleSampleData = [[[1,", "npt.assert_raises(ValueError): _validate_input(self.EmptyNested, None, None, None) def test_validate_input_empty_deeply_nested(self): num_points, num_samples =", "num_samples = _validate_input(self.EmptyDeeplyNested, None, None, None) self.assertEqual(num_points, 1) self.assertEqual(num_samples, 1)", "4, 8], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"]), (4,", "numpy as np import numpy.testing as npt import matplotlib.pyplot as", "the two versions. # see: # https://github.com/pydata/pandas/issues/8382#issuecomment-56840974 # https://github.com/matplotlib/matplotlib/issues/3544 self.assertTrue(len(result['fliers'])", "for its y # value lines = ax.get_lines() self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) #", "2\", \"Data 3\"], \"Test\", \"x-axis label\", \"y-axis label\", legend=(('blue', 'red'),", "['b', 'g', 'r', 'c', 'm', 'y', 'w', 'b', 'g', 'r'])", "= boxplots([[], [1, 2, 3.5], []], box_colors=['blue', 'red', 'yellow']) ax", "6, 7, 8]]] # Test valid data with three samples", "[[3.4, 10, 11.67, 12.0, 2, 2, 99.99], [2.3, 4, 5,", "self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1), (0.9, 0.9)))) def test_set_figure_size(self):", "self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775, 4.075]) def test_grouped_distributions_insufficient_symbols(self): args", "'stdv') self.assertTrue(result is None) def test_plot_box_data(self): fig, ax = plt.subplots()", "(c) 2013--, scikit-bio development team. # # Distributed under the", "'symbols') self.assertEqual(len(ax.get_legend().get_texts()), 3) def test_create_legend_invalid_input(self): fig, ax = plt.subplots() with", "\"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'r', 'g'], \"x-axis label\",", "5], [2, 3, 5, 6], [2, 3, 8]], [[4, 7,", "test_validate_x_values_nonnumber_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([\"foo\", 2, 3], None, len(self.ValidSingleSampleData)) def test_validate_x_values_valid_x_values(self):", "Line2D instances, mpl 1.4.0 creates one, # though the resulting", "_set_figure_size(fig, -1, 0) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_long_labels(self): fig, ax =", "'red', 0.5, 3.75, 1.5, 'stdv') self.assertTrue(result is None) fig, ax", "samples in data list (for bar/scatter plots). self.InvalidNumSamples = [[[1,", "99.99], [2.3, 4, 5, 88, 9, 10, 11, 1, 0,", "2, 3], 'red', 0.5, 3.75, 1.5, 'sem') self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result),", "\"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") def test_set_axes_options_ylim(self): fig, ax = plt.subplots() _set_axes_options(ax,", "test_plot_box_data(self): fig, ax = plt.subplots() result = _plot_box_data(ax, [0, 0,", "label\") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) lines = ax.get_lines()", "None) def test_plot_box_data(self): fig, ax = plt.subplots() result = _plot_box_data(ax,", "y_min='car', y_max=30) def test_set_axes_options_invalid_x_tick_labels_orientation(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError):", "_validate_x_values) class DistributionsTests(TestCase): def setUp(self): # Test null data list.", "[\"Infants\", \"Children\", \"Teens\"], ['^', '>', '<'], \"x-axis label\", \"y-axis label\",", "\"x-axis label\", \"y-axis label\", \"Test\") def test_grouped_distributions_negative_distribution_width(self): args = ('box',", "3.775, 4.075]) def test_grouped_distributions_error(self): with npt.assert_raises(ValueError): grouped_distributions('pie', self.ValidTypicalData, [1, 4,", "9, 10, 11, 1, 0, 3, -8], [2, 9, 7,", "1.0)) self.assertTrue(np.isnan(patch.xy[0][1])) # Coloring works with some empty distributions. fig", "test_plot_scatter_data(self): fig, ax = plt.subplots() result = _plot_scatter_data(ax, [1, 2,", "test_get_distribution_markers_zero_markers(self): self.assertEqual(_get_distribution_markers('symbols', None, 0), []) self.assertEqual(_get_distribution_markers('symbols', ['^'], 0), []) def", "0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) fig, ax = plt.subplots() result", "self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\",", "\"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.1125, 2.0125, 3.8125, 4.1125]) def", "one nan since the distribution # is empty, and thus", "\"Teens\"], ['b', 'g', 'y'], \"x-axis label\", \"y-axis label\", \"Test\") with", "= plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['red', 'foobarbaz', 'blue']) #", "= boxplots([[1, 2, 3], [], [4, 5, 6]], [1, 4,", "plots). self.EmptyDeeplyNested = [[[]]] # Test invalid number of samples", "with npt.assert_raises(ValueError): _validate_input(self.InvalidNumSamples, None, None, None) def test_validate_input_invalid_data_point_names(self): with npt.assert_raises(ValueError):", "1) self.assertEqual(len(result['whiskers']), 2) # mpl < 1.4.0 creates two Line2D", "self.assertEqual(ax.get_xticklabels()[0].get_text(), '42') self.assertEqual(ax.get_xticklabels()[1].get_text(), '45') self.assertEqual(ax.get_xticklabels()[2].get_text(), '800') def test_set_axes_options_bad_ylim(self): fig, ax", "[1.25, 5.25, 9.25, 11.25]) ticks = _calc_data_point_ticks(np.array([0]), 3, 0.5, False)", "= boxplots(self.ValidTypicalBoxData, [1, 4, 10], [\"Data 1\", \"Data 2\", \"Data", "in ax.patches: npt.assert_almost_equal( patch.get_facecolor(), (1.0, 0.7529411764705882, 0.796078431372549, 1.0)) self.assertTrue(np.isnan(patch.xy[0][1])) #", "None, None) def test_validate_input_empty_nested(self): with npt.assert_raises(ValueError): _validate_input(self.EmptyNested, None, None, None)", "box_colors=['blue', 'red', 'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) # patch", "with some empty distributions. fig = boxplots([[], [1, 2, 3.5],", "1.0))) self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0))) self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0))) self.assertFalse(_is_single_matplotlib_color(['w', 'r'])) self.assertFalse(_is_single_matplotlib_color(['w']))", "= fig.get_size_inches() _set_figure_size(fig) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_invalid(self): fig, ax =", "# line in first distribution should *not* have nan for", "[1, 10.5]) def test_calc_data_point_locations_default_spacing(self): locs = _calc_data_point_locations(4) np.testing.assert_allclose(locs, [1, 2,", "\"T3\"], [\"Infants\", \"Children\", \"Teens\"]), (4, 3)) def test_validate_x_values_invalid_x_values(self): with npt.assert_raises(ValueError):", "npt.assert_raises(ValueError): _get_distribution_markers('symbols', [], -1) def test_plot_bar_data(self): fig, ax = plt.subplots()", "0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) def test_plot_bar_data_bad_error_bar_type(self):", "4, 10])) lines = ax.get_lines() self.assertTrue(np.isnan(lines[0].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[16].get_xydata()[0][1])) def test_boxplots_box_colors(self):", "6, 7, 8]]] # Test typical data to be plotted", "def test_set_figure_size_long_labels(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo',", "Invalid legend. with npt.assert_raises(ValueError): boxplots([[1, 2, 3]], legend=('foo', 'bar', 'baz'))", "'>']) def test_get_distribution_markers_bad_marker_type(self): with npt.assert_raises(ValueError): _get_distribution_markers('shapes', [], 3) def test_get_distribution_markers_zero_markers(self):", "[1, 4, 10, 11], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\",", "\"Women\"]) def test_validate_input_all_valid_input(self): self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3, 4, 8], [\"T0\", \"T1\",", "with npt.assert_raises(ValueError): _get_distribution_markers('shapes', [], 3) def test_get_distribution_markers_zero_markers(self): self.assertEqual(_get_distribution_markers('symbols', None, 0),", "self.assertEqual(_get_distribution_markers('symbols', ['^'], 0), []) def test_get_distribution_markers_negative_num_markers(self): with npt.assert_raises(ValueError): _get_distribution_markers('symbols', [],", "[\"Infants\", \"Children\", \"Teens\"], [], \"x-axis label\", \"y-axis label\", \"Test\") def", "3, 3) npt.assert_array_equal(fig.get_size_inches(), (3, 3)) if __name__ == '__main__': main()", "\"y-axis label\", \"Test\") def test_grouped_distributions_negative_distribution_width(self): args = ('box', self.ValidTypicalData, [1,", "'barbarbar'], x_tick_labels_orientation='vertical') npt.assert_warns(RuntimeWarning, _set_figure_size, fig, 3, 3) npt.assert_array_equal(fig.get_size_inches(), (3, 3))", "'red'), ('foo', 'bar'))) ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis", "None, None) def test_validate_input_invalid_data_point_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, [\"T0\", \"T1\"],", "# Test nested empty data list (for bar/scatter plots). self.EmptyDeeplyNested", "'barbarbar'], x_tick_labels_orientation='vertical') _set_figure_size(fig, 3, 4) self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4))) def test_set_figure_size_defaults(self):", "from mpl if we don't clean up our figures. plt.close('all')", "None) def test_validate_input_invalid_data_point_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, [\"T0\", \"T1\"], None)", "'y'], \"x-axis label\", \"y-axis label\", \"Test\") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(),", "\"T1\", \"T2\"], y_min=0, y_max=1) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\")", "plots). self.InvalidNumSamples = [[[1, 2, 3, 4, 5]], [[4, 5,", "fig, ax = plt.subplots() result = _plot_box_data(ax, [0, 0, 7,", "mpl 1.4.0 has # 7. in either case, the line", "None, 5), ['b', 'g', 'r', 'c', 'm']) def test_get_distribution_markers_empty_marker_list(self): self.assertEqual(_get_distribution_markers('colors',", "[4, 3, 12, 10]) np.testing.assert_allclose(locs, np.array([1.33333333, 1, 4, 3.33333333])) #", "'m']) def test_get_distribution_markers_empty_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 4), ['b', 'g', 'r', 'c'])", "2, 3], [4, 5]], []], None, None, None) def test_validate_input_invalid_num_samples(self):", "5), ['b', 'g', 'r', 'c', 'm']) def test_get_distribution_markers_empty_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None,", "test_is_single_matplotlib_color(self): self.assertTrue(_is_single_matplotlib_color('w')) self.assertTrue(_is_single_matplotlib_color('white')) self.assertTrue(_is_single_matplotlib_color([1, 1, 1])) self.assertTrue(_is_single_matplotlib_color([1, 1, 1, 1]))", "Invalid color. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) with", "0.87]) np.testing.assert_allclose(locs, np.array([1, 1.58296893, 3])) def test_calc_data_point_ticks(self): ticks = _calc_data_point_ticks(np.array([1,", "see: # https://github.com/pydata/pandas/issues/8382#issuecomment-56840974 # https://github.com/matplotlib/matplotlib/issues/3544 self.assertTrue(len(result['fliers']) == 1 or len(result['fliers'])", "boxplots, grouped_distributions from skbio.draw._distributions import ( _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend,", "line at index 8 should have a nan for its", "11], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['^'], \"x-axis", "have 8 lines per boxplot, while mpl 1.4.0 has #", "result = _plot_scatter_data(ax, [], '^', 0.77, 1, 1.5, 'stdv') self.assertTrue(result", "'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0,", "'<', '>'], ['dist1', 'dist2', 'dist3'], 'foo') def test_grouped_distributions_bar(self): fig =", "have nan for its y value self.assertFalse(np.isnan(lines[0].get_xydata()[0][1])) # All distributions", "def test_get_distribution_markers_null_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 5), ['b', 'g', 'r', 'c', 'm'])", "[], []], [1, 4, 10], [\"Data 1\", \"Data 2\", \"Data", "from 3..12 to 1..4. locs = _calc_data_point_locations(4, [3, 4, 10,", "of colors doesn't match number of distributions. with npt.assert_raises(ValueError): boxplots([[1,", "fig, 3, 3) npt.assert_array_equal(fig.get_size_inches(), (3, 3)) if __name__ == '__main__':", "box_plot, ['blue', None, (1, 1, 0.9)]) # All colors are", "def test_set_figure_size_defaults(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo',", "npt.assert_raises(ValueError): grouped_distributions('pie', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\",", "\"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['^', '>', '<'], \"x-axis label\", \"y-axis", "Non-numeric entries in distribution. with npt.assert_raises(ValueError): boxplots([[1, 'foo', 3]]) #", "8]], [[4, 7, 10, 33, 32, 6, 7, 8]]] #", "3.75, 1.5, 'stdv') self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(),", "\"Test\", \"x-axis label\", \"y-axis label\", legend=(('blue', 'red'), ('foo', 'bar'))) ax", "self.assertAlmostEqual(result[0].get_height(), 2.0) def test_plot_bar_data_bad_error_bar_type(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError):", "np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775, 4.075]) def test_grouped_distributions_insufficient_symbols(self): args = ('scatter',", "(for bar/scatter plots). self.InvalidNumSamples = [[[1, 2, 3, 4, 5]],", "'45') self.assertEqual(ax.get_xticklabels()[2].get_text(), '800') def test_set_axes_options_bad_ylim(self): fig, ax = plt.subplots() with", "'foobarbaz', 'blue']) # Wrong number of colors. fig, ax =", "under the terms of the Modified BSD License. # #", "x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def", "is hidden. # boxplots in mpl < 1.4.0 have 8", "self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") def test_set_axes_options_ylim(self): fig,", "# ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function from unittest", "def test_validate_x_values_nonnumber_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([\"foo\", 2, 3], None, len(self.ValidSingleSampleData)) def", "10]) np.testing.assert_allclose(locs, np.array([1.33333333, 1, 4, 3.33333333])) # Scaling up from", "self.ValidTypicalData = [[[1.0, 2, 3.5, 5], [2, 3, 5, 6],", "def test_validate_input_invalid_num_samples(self): with npt.assert_raises(ValueError): _validate_input(self.InvalidNumSamples, None, None, None) def test_validate_input_invalid_data_point_names(self):", "'stdv') self.assertEqual(result.__class__.__name__, \"dict\") self.assertEqual(len(result['boxes']), 1) self.assertEqual(len(result['medians']), 1) self.assertEqual(len(result['whiskers']), 2) #", "label\") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.1125, 2.0125, 3.8125, 4.1125]) def test_grouped_distributions_insufficient_colors(self):", "'dist3'], 'foo') def test_grouped_distributions_bar(self): fig = grouped_distributions('bar', self.ValidTypicalData, [1, 4,", "Test nested empty data list. self.EmptyNested = [[]] # Test", "ax.patches: self.assertTrue(np.isnan(patch.xy[0][1])) fig = boxplots([[], [], []], box_colors='pink') ax =", "label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\"], x_tick_labels_orientation='brofist') def test_create_legend(self): fig, ax", "entries in distribution. with npt.assert_raises(ValueError): boxplots([[1, 'foo', 3]]) # Number", "= plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', 'w', (1, 1, 0.9)]) #", "None, None, None) def test_validate_input_empty_deeply_nested(self): num_points, num_samples = _validate_input(self.EmptyDeeplyNested, None,", "should include at least one nan since the distribution #", "typical data to be plotted by the boxplot function. self.ValidTypicalBoxData", "0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) def test_plot_bar_data_bad_error_bar_type(self): fig, ax = plt.subplots()", "either case, the line at index 8 should have a", "self.assertTrue(result is None) def test_plot_scatter_data(self): fig, ax = plt.subplots() result", "(0.0, 1.0)) def test_set_axes_options_x_values_as_tick_labels(self): fig, ax = plt.subplots() _set_axes_options(ax, \"Plot", "'sem') self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0,", "ax = plt.subplots() _create_legend(ax, ['b', 'r'], ['dist1', 'dist2'], 'colors') self.assertEqual(len(ax.get_legend().get_texts()),", "self.Empty = [] # Test nested empty data list. self.EmptyNested", "'blue']) # Wrong number of colors. fig, ax = plt.subplots()", "list (for bar/scatter plots). self.EmptyDeeplyNested = [[[]]] # Test invalid", "self.assertEqual(_get_distribution_markers('colors', None, 5), ['b', 'g', 'r', 'c', 'm']) def test_get_distribution_markers_empty_marker_list(self):", "= _calc_data_point_locations(3, [0.001, 0.2543, 0.87]) np.testing.assert_allclose(locs, np.array([1, 1.58296893, 3])) def", "3, 2]], [[4, 7, 10, 33, 32, 6, 7, 8]]]", "\"x-axis label\", \"y-axis label\", \"Test\") def test_grouped_distributions_box(self): fig = grouped_distributions('box',", "def test_create_legend_invalid_input(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _create_legend(ax, ['^',", "ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot,", "'c']) def test_get_distribution_markers_insufficient_markers(self): self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'colors', None, 10), ['b', 'g',", "(1, 1, 0.9)]) def test_is_single_matplotlib_color(self): self.assertTrue(_is_single_matplotlib_color('w')) self.assertTrue(_is_single_matplotlib_color('white')) self.assertTrue(_is_single_matplotlib_color([1, 1, 1]))", "1.5, 'sem') self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0,", "= _plot_scatter_data(ax, [1, 2, 3], '^', 0.77, 1, 1.5, 'stdv')", "def test_validate_x_values_invalid_x_tick_labels(self): with npt.assert_raises(ValueError): _validate_x_values(None, [\"T0\"], len(self.ValidSingleSampleData)) def test_validate_x_values_nonnumber_x_values(self): with", "[\"Infants\", \"Children\", \"Teens\"], ['^'], \"x-axis label\", \"y-axis label\", \"Test\") npt.assert_warns(RuntimeWarning,", "9, 10, 11], [9.0, 4, 1, 1]], [[4, 33, 32,", "1.0)) self.assertTrue(np.isnan(ax.patches[0].xy[0][1])) self.assertFalse(np.isnan(ax.patches[1].xy[0][1])) self.assertTrue(np.isnan(ax.patches[2].xy[0][1])) def test_boxplots_invalid_input(self): # Non-numeric entries in", "least one nan since the distribution # is empty, and", "4) np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775, 4.075]) def test_grouped_distributions_error(self): with npt.assert_raises(ValueError):", "plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') _set_figure_size(fig, 3,", "0.33, 55, 1.5, 'stdv') self.assertEqual(result.__class__.__name__, \"dict\") self.assertEqual(len(result['boxes']), 1) self.assertEqual(len(result['medians']), 1)", "'stdv') self.assertEqual(result.get_sizes(), 20) def test_plot_scatter_data_empty(self): fig, ax = plt.subplots() result", "def test_set_axes_options_x_values_as_tick_labels(self): fig, ax = plt.subplots() _set_axes_options(ax, \"Plot Title\", \"x-axis", "_calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size,", "'foo', 3]]) # Number of colors doesn't match number of", "= _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'stdv')", "5, 6]] def tearDown(self): # We get a warning from", "Distributed under the terms of the Modified BSD License. #", "_validate_input(self.Null, None, None, None) def test_validate_input_empty(self): with npt.assert_raises(ValueError): _validate_input(self.Empty, None,", "def test_set_axes_options_invalid_x_tick_labels_orientation(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _set_axes_options(ax, \"Plot", "8]]] # Test valid data with three samples and four", "'g', 'r', 'c', 'm', 'y', 'w', 'b', 'g', 'r']) self.assertEqual(npt.assert_warns(RuntimeWarning,", "main import numpy as np import numpy.testing as npt import", "2, 3], 'red', 0.5, 3.75, 1.5, 'stdv') self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result),", "['dist1', 'dist2'], 'symbols') with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'], ['dist1',", "label\", x_tick_labels=[\"T0\", \"T1\", \"T2\"], y_min=0, y_max=1) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(),", "label\", \"Test\") def test_grouped_distributions_negative_distribution_width(self): args = ('box', self.ValidTypicalData, [1, 4,", "_set_figure_size, fig, 3, 3) npt.assert_array_equal(fig.get_size_inches(), (3, 3)) if __name__ ==", "in first distribution should *not* have nan for its y", "# Non-numeric entries in distribution. with npt.assert_raises(ValueError): boxplots([[1, 'foo', 3]])", "= plt.subplots() result = _plot_scatter_data(ax, [1, 2, 3], '^', 0.77,", "[0, 0, 7, 8, -3, 44], 'blue', 0.33, 55, 1.5,", "to 1..4. locs = _calc_data_point_locations(4, [3, 4, 10, 12]) np.testing.assert_allclose(locs,", "None, None) def test_validate_input_empty_deeply_nested(self): num_points, num_samples = _validate_input(self.EmptyDeeplyNested, None, None,", "plt.subplots() result = _plot_box_data(ax, [0, 0, 7, 8, -3, 44],", "= plt.subplots() with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2'],", "'red', 0.5, 3.75, 1.5, 'sem') self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(),", "= plt.subplots() with npt.assert_raises(ValueError): _plot_bar_data(ax, [1, 2, 3], 'red', 0.5,", "[], 'red', 0.5, 3.75, 1.5, 'sem') self.assertTrue(result is None) def", "\"Children\", \"Teens\"], ['b', 'r', 'g'], \"x-axis label\", \"y-axis label\", \"Test\")", "(for bar/scatter plots). self.EmptyDeeplyNested = [[[]]] # Test invalid number", "# Some colors are None. fig, ax = plt.subplots() box_plot", "[1, 1, 2]], [[2, 2, 2, 2], [3, 9, 8],", "32, 6, 7, 8]]] # Test valid data with three", "1.5, 'stdv') self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0,", "fig.get_size_inches() _set_figure_size(fig) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_invalid(self): fig, ax = plt.subplots()", "self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 4)", "boxplot function. self.ValidTypicalBoxData = [[3.4, 10, 11.67, 12.0, 2, 2,", "self.assertEqual(ax.get_xticklabels()[2].get_text(), '800') def test_set_axes_options_bad_ylim(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError):", "npt import matplotlib.pyplot as plt from skbio.draw import boxplots, grouped_distributions", "def test_boxplots(self): fig = boxplots(self.ValidTypicalBoxData, [1, 4, 10], [\"Data 1\",", "ax = plt.subplots() result = _plot_bar_data(ax, [1, 2, 3], 'red',", "(0.9, 0.9)))) def test_set_figure_size(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo',", "distributions are empty. fig = boxplots([[], [], []], [1, 4,", "software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function from", "[1, 4, 10], [\"Data 1\", \"Data 2\", \"Data 3\"], \"Test\",", "self.assertTrue(result is None) fig, ax = plt.subplots() result = _plot_bar_data(ax,", "patch colors should match what we specified self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0,", "self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") self.assertEqual(ax.get_ylim(), (0.0, 1.0))", "test_set_axes_options_x_values_as_tick_labels(self): fig, ax = plt.subplots() _set_axes_options(ax, \"Plot Title\", \"x-axis label\",", "<gh_stars>0 # ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team.", "empty data list. self.EmptyNested = [[]] # Test nested empty", "# The full license is in the file COPYING.txt, distributed", "3.8125, 4.1125]) def test_grouped_distributions_insufficient_colors(self): args = ('bar', self.ValidTypicalData, [1, 4,", "55, 1.5, 'stdv') self.assertEqual(result.__class__.__name__, \"dict\") self.assertEqual(len(result['boxes']), 1) self.assertEqual(len(result['medians']), 1) self.assertEqual(len(result['whiskers']),", "have nans since it is hidden. # boxplots in mpl", "self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_long_labels(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo',", "self.assertTrue(_is_single_matplotlib_color('white')) self.assertTrue(_is_single_matplotlib_color([1, 1, 1])) self.assertTrue(_is_single_matplotlib_color([1, 1, 1, 1])) self.assertTrue(_is_single_matplotlib_color((1, 1,", "= [[]] # Test nested empty data list (for bar/scatter", "'g', 'y'], \"x-axis label\", \"y-axis label\", \"Test\") def test_grouped_distributions_negative_distribution_width(self): args", "def test_set_axes_options_ylim(self): fig, ax = plt.subplots() _set_axes_options(ax, \"Plot Title\", \"x-axis", "3\"], \"Test\", \"x-axis label\", \"y-axis label\") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(),", "if we don't clean up our figures. plt.close('all') def test_validate_input_null(self):", "self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4))) def test_set_figure_size_defaults(self): fig, ax = plt.subplots() _set_axes_options(ax,", "self.ValidTypicalBoxData = [[3.4, 10, 11.67, 12.0, 2, 2, 99.99], [2.3,", "fig = grouped_distributions('bar', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\",", "11], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'g',", "ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo'", "list. self.Empty = [] # Test nested empty data list.", "None) def test_validate_input_empty_deeply_nested(self): num_points, num_samples = _validate_input(self.EmptyDeeplyNested, None, None, None)", "fig, ax = plt.subplots() _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2',", "# 7. in either case, the line at index 8", "and thus hidden for patch in ax.patches: self.assertTrue(np.isnan(patch.xy[0][1])) fig =", "5, 6, 7, 8], [2, 3, 2]], [[4, 7, 10,", "\"Teens\"], ['b', 'r'], \"x-axis label\", \"y-axis label\", \"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions,", "plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, [None, None, None]) def test_color_box_plot_invalid_input(self): # Invalid", "1, 1))) self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0, 1.0))) self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0))) self.assertTrue(_is_single_matplotlib_color((2.0,", "in data list (for bar/scatter plots). self.InvalidNumSamples = [[[1, 2,", "None) def test_calc_data_point_locations_invalid_x_values(self): with npt.assert_raises(ValueError): _calc_data_point_locations(3, [1, 10.5]) def test_calc_data_point_locations_default_spacing(self):", "'dist2'], 'colors') self.assertEqual(len(ax.get_legend().get_texts()), 2) fig, ax = plt.subplots() _create_legend(ax, ['^',", "'stdv') self.assertTrue(result is None) fig, ax = plt.subplots() result =", "(1.0, 0.7529411764705882, 0.796078431372549, 1.0)) self.assertTrue(np.isnan(patch.xy[0][1])) # Coloring works with some", "'>'], ['dist1', 'dist2', 'dist3'], 'symbols') self.assertEqual(len(ax.get_legend().get_texts()), 3) def test_create_legend_invalid_input(self): fig,", "5, 6]], box_colors=['blue', 'red']) # Invalid legend. with npt.assert_raises(ValueError): boxplots([[1,", "Scaling down from 3..12 to 1..4. locs = _calc_data_point_locations(4, [3,", "match what we specified self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(),", "0.0, 1.0)) self.assertTrue(np.isnan(ax.patches[0].xy[0][1])) self.assertFalse(np.isnan(ax.patches[1].xy[0][1])) self.assertTrue(np.isnan(ax.patches[2].xy[0][1])) def test_boxplots_invalid_input(self): # Non-numeric entries", "= plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size", "[[4, 7, 8], [8, 9, 10, 11], [9.0, 4, 1,", "[], [4, 5, 6]], [1, 4, 10], [\"Data 1\", \"Data", "[[[]]] # Test invalid number of samples in data list", "'symbols', ['^', '>', '<'], 5), ['^', '>', '<', '^', '>'])", "= plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') _set_figure_size(fig,", "'42') self.assertEqual(ax.get_xticklabels()[1].get_text(), '45') self.assertEqual(ax.get_xticklabels()[2].get_text(), '800') def test_set_axes_options_bad_ylim(self): fig, ax =", "import TestCase, main import numpy as np import numpy.testing as", "import ( _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data,", "'g', 'r', 'c', 'm']) def test_get_distribution_markers_empty_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 4), ['b',", "patch.get_facecolor(), (1.0, 0.7529411764705882, 0.796078431372549, 1.0)) self.assertTrue(np.isnan(patch.xy[0][1])) # Coloring works with", "self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3, 4, 8], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\",", "orig_fig_size)) def test_set_figure_size_long_labels(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo',", "_validate_x_values([\"foo\", 2, 3], None, len(self.ValidSingleSampleData)) def test_validate_x_values_valid_x_values(self): _validate_x_values([1, 2.0, 3],", "\"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"]), (4, 3)) def test_validate_x_values_invalid_x_values(self): with", "test_plot_bar_data_bad_error_bar_type(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _plot_bar_data(ax, [1, 2,", "1, 4, 3.33333333])) # Scaling up from 0.001..0.87 to 1..3.", "def test_plot_scatter_data_empty(self): fig, ax = plt.subplots() result = _plot_scatter_data(ax, [],", "800]) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), '42') self.assertEqual(ax.get_xticklabels()[1].get_text(),", "4])) # Sorted order shouldn't affect scaling. locs = _calc_data_point_locations(4,", "2, 3], 'red', 0.5, 3.75, 1.5, 'var') def test_plot_bar_data_empty(self): fig,", "[\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['^'], \"x-axis label\",", "4, 10])) def test_boxplots_empty_distributions(self): fig = boxplots([[1, 2, 3], [],", "empty. fig = boxplots([[], [], []], [1, 4, 10], [\"Data", "3], '^', 0.77, 1, 1.5, 'stdv') self.assertEqual(result.get_sizes(), 20) def test_plot_scatter_data_empty(self):", "skbio.draw._distributions import ( _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data,", "self.EmptyDeeplyNested = [[[]]] # Test invalid number of samples in", "empty distributions. fig = boxplots([[], [1, 2, 3.5], []], box_colors=['blue',", "_create_legend(ax, ['b', 'r'], ['dist1', 'dist2'], 'colors') self.assertEqual(len(ax.get_legend().get_texts()), 2) fig, ax", "ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) # patch colors should match", "def test_plot_bar_data_empty(self): fig, ax = plt.subplots() result = _plot_bar_data(ax, [],", "between the two versions. # see: # https://github.com/pydata/pandas/issues/8382#issuecomment-56840974 # https://github.com/matplotlib/matplotlib/issues/3544", "self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) lines", "versions. # see: # https://github.com/pydata/pandas/issues/8382#issuecomment-56840974 # https://github.com/matplotlib/matplotlib/issues/3544 self.assertTrue(len(result['fliers']) == 1", "8]], [[4, 7, 8], [8, 9, 10, 11], [9.0, 4,", "_calc_data_point_locations(3, [1, 10.5]) def test_calc_data_point_locations_default_spacing(self): locs = _calc_data_point_locations(4) np.testing.assert_allclose(locs, [1,", "[1, 4, 10])) # second distribution (empty) should have nans", "self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0))) self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0))) self.assertFalse(_is_single_matplotlib_color(['w', 'r'])) self.assertFalse(_is_single_matplotlib_color(['w'])) self.assertFalse(_is_single_matplotlib_color(('w',)))", "4], [\"T0\", \"T1\", \"T2\"], len(self.ValidSingleSampleData)) def test_validate_x_values_invalid_x_tick_labels(self): with npt.assert_raises(ValueError): _validate_x_values(None,", "ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', 'w',", "def test_get_distribution_markers_zero_markers(self): self.assertEqual(_get_distribution_markers('symbols', None, 0), []) self.assertEqual(_get_distribution_markers('symbols', ['^'], 0), [])", "np.testing.assert_allclose(locs, np.array([1.33333333, 1, 4, 3.33333333])) # Scaling up from 0.001..0.87", "to be plotted by the boxplot function. self.ValidTypicalBoxData = [[3.4,", "# mpl < 1.4.0 creates two Line2D instances, mpl 1.4.0", "test_validate_x_values_valid_x_values(self): _validate_x_values([1, 2.0, 3], None, 3) def test_get_distribution_markers_null_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None,", "the Modified BSD License. # # The full license is", "ax.get_lines() self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) # line in first distribution should *not* have", "colors doesn't match number of distributions. with npt.assert_raises(ValueError): boxplots([[1, 2,", "distributions. fig = boxplots([[], [1, 2, 3.5], []], box_colors=['blue', 'red',", "11], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], [], \"x-axis", "\"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'g', 'y'], \"x-axis label\",", "# Scaling up from 0.001..0.87 to 1..3. locs = _calc_data_point_locations(3,", "'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig,", "1), (0.9, 0.9)))) def test_set_figure_size(self): fig, ax = plt.subplots() _set_axes_options(ax,", "2\", \"Data 3\"], \"Test\", \"x-axis label\", \"y-axis label\") ax =", "self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") self.assertEqual(ax.get_ylim(), (0.0, 1.0)) def test_set_axes_options_x_values_as_tick_labels(self): fig,", "locs = _calc_data_point_locations(4, [4, 3, 12, 10]) np.testing.assert_allclose(locs, np.array([1.33333333, 1,", "\"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") def", "box_colors=['blue', 'red']) # Invalid legend. with npt.assert_raises(ValueError): boxplots([[1, 2, 3]],", "empty data list. self.Empty = [] # Test nested empty", "_plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'sem') self.assertTrue(result is None)", "_set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\"], x_tick_labels_orientation='brofist')", "\"Data 3\"], \"Test\", \"x-axis label\", \"y-axis label\") ax = fig.get_axes()[0]", "# # The full license is in the file COPYING.txt,", "has # 7. in either case, the line at index", "5]], [[4, 5, 6, 7, 8]], [[4, 7, 10, 33,", "x_values=[42, 45, 800]) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(),", "'c', 'm', 'y', 'w', 'b', 'g', 'r']) self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'symbols',", "8 should have a nan for its y # value", "case, the line at index 8 should have a nan", "test_validate_input_all_valid_input(self): self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3, 4, 8], [\"T0\", \"T1\", \"T2\", \"T3\"],", "fig, ax = plt.subplots() with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'],", "self.assertTrue(_is_single_matplotlib_color((1, 1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0, 1.0))) self.assertTrue(_is_single_matplotlib_color((1.0, 1,", "def test_set_axes_options_bad_ylim(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _set_axes_options(ax, \"Plot", "plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['red', 'foobarbaz',", "args = ('scatter', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\",", "6, 8], [5, 4, 8, 13], [1, 1, 2]], [[2,", "[\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"]), (4, 3)) def", "[\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'r', 'g'],", "distribution_width=-42) def test_boxplots(self): fig = boxplots(self.ValidTypicalBoxData, [1, 4, 10], [\"Data", "self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) # line in first distribution should *not* have nan", "['dist1', 'dist2'], 'colors') self.assertEqual(len(ax.get_legend().get_texts()), 2) fig, ax = plt.subplots() _create_legend(ax,", "ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', None,", "_create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'], 'symbols') self.assertEqual(len(ax.get_legend().get_texts()), 3)", "self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[16].get_xydata()[0][1])) def test_boxplots_box_colors(self): # Coloring works with all empty", "[]) self.assertEqual(_get_distribution_markers('symbols', ['^'], 0), []) def test_get_distribution_markers_negative_num_markers(self): with npt.assert_raises(ValueError): _get_distribution_markers('symbols',", "y value self.assertFalse(np.isnan(lines[0].get_xydata()[0][1])) # All distributions are empty. fig =", "'b', 'g', 'r']) self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'symbols', ['^', '>', '<'], 5),", "in either case, the line at index 8 should have", "= plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', None, (1,", "npt.assert_raises(ValueError): _validate_input(self.Null, None, None, None) def test_validate_input_empty(self): with npt.assert_raises(ValueError): _validate_input(self.Empty,", "None, None, None) def test_validate_input_empty(self): with npt.assert_raises(ValueError): _validate_input(self.Empty, None, None,", "for patch in ax.patches: npt.assert_almost_equal( patch.get_facecolor(), (1.0, 0.7529411764705882, 0.796078431372549, 1.0))", "1.4.0 creates two Line2D instances, mpl 1.4.0 creates one, #", "# (for bar/scatter plots). self.ValidTypicalData = [[[1.0, 2, 3.5, 5],", "'red', 0.5, 3.75, 1.5, 'var') def test_plot_bar_data_empty(self): fig, ax =", "distribution_width=0) with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=-42) def test_boxplots(self): fig = boxplots(self.ValidTypicalBoxData,", "# value lines = ax.get_lines() self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) # line in first", "---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # #", "\"Plot Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\", \"T2\"], y_min='car',", "self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.1125, 2.0125, 3.8125, 4.1125]) def test_grouped_distributions_insufficient_colors(self): args", "# boxplots in mpl < 1.4.0 have 8 lines per", "numpy.testing as npt import matplotlib.pyplot as plt from skbio.draw import", "[[4, 7, 10, 33, 32, 6, 7, 8]]] # Test", "thus hidden for patch in ax.patches: self.assertTrue(np.isnan(patch.xy[0][1])) fig = boxplots([[],", "self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) def test_boxplots_empty_distributions(self): fig = boxplots([[1, 2,", "self.assertTrue(result is None) def test_calc_data_point_locations_invalid_x_values(self): with npt.assert_raises(ValueError): _calc_data_point_locations(3, [1, 10.5])", "should *not* have nan for its y value self.assertFalse(np.isnan(lines[0].get_xydata()[0][1])) #", "scaling. locs = _calc_data_point_locations(4, [4, 3, 12, 10]) np.testing.assert_allclose(locs, np.array([1.33333333,", "[5, 4, 8, 13], [1, 1, 2]], [[2, 2, 2,", "[\"Men\", \"Women\"]) def test_validate_input_all_valid_input(self): self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3, 4, 8], [\"T0\",", "[3, 4, 10, 12]) np.testing.assert_allclose(locs, np.array([1, 1.33333333, 3.33333333, 4])) #", "def tearDown(self): # We get a warning from mpl if", "orig_fig_size = fig.get_size_inches() _set_figure_size(fig, -1, 0) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_long_labels(self):", "1])) self.assertTrue(_is_single_matplotlib_color([1, 1, 1, 1])) self.assertTrue(_is_single_matplotlib_color((1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1, 1,", "its y value self.assertFalse(np.isnan(lines[0].get_xydata()[0][1])) # All distributions are empty. fig", "ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, [None, None,", "'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooo', 'barbarbar'], x_tick_labels_orientation='vertical') npt.assert_warns(RuntimeWarning,", "\"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['^'], \"x-axis label\", \"y-axis label\", \"Test\")", "fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'],", "\"T2\"], y_min=0, y_max=1) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(),", "\"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(),", "self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1), (0.9, 0.9)))) def test_set_figure_size(self): fig, ax =", "'>', '<'], \"x-axis label\", \"y-axis label\", \"Test\") ax = fig.get_axes()[0]", "fig = boxplots(self.ValidTypicalBoxData, [1, 4, 10], [\"Data 1\", \"Data 2\",", "[], []], box_colors=['blue', 'red', 'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3)", "nan for its y # value lines = ax.get_lines() self.assertTrue(np.isnan(lines[8].get_xydata()[0][1]))", "1, 0.9)]) # Some colors are None. fig, ax =", "_plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'stdv') self.assertEqual(result[0].__class__.__name__,", "= boxplots([[], [], []], [1, 4, 10], [\"Data 1\", \"Data", "test_grouped_distributions_bar(self): fig = grouped_distributions('bar', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\",", "('bar', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\", \"T3\"],", "with npt.assert_raises(ValueError): _calc_data_point_locations(3, [1, 10.5]) def test_calc_data_point_locations_default_spacing(self): locs = _calc_data_point_locations(4)", "= plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, [None, None, None]) def test_color_box_plot_invalid_input(self): #", "3) def test_create_legend_invalid_input(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _create_legend(ax,", "# https://github.com/matplotlib/matplotlib/issues/3544 self.assertTrue(len(result['fliers']) == 1 or len(result['fliers']) == 2) self.assertEqual(len(result['caps']),", "label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.1125, 2.0125, 3.8125,", "\"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'r'], \"x-axis label\", \"y-axis label\",", "npt.assert_raises(ValueError): _calc_data_point_locations(3, [1, 10.5]) def test_calc_data_point_locations_default_spacing(self): locs = _calc_data_point_locations(4) np.testing.assert_allclose(locs,", "None. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot,", "_plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'sem') self.assertEqual(result[0].__class__.__name__,", "(3, 4))) def test_set_figure_size_defaults(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo',", "all empty distributions. fig = boxplots([[], [], []], box_colors=['blue', 'red',", "\"Plot Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\"], x_tick_labels_orientation='brofist') def", "label\", \"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_scatter(self): fig = grouped_distributions('scatter',", "== 2) self.assertEqual(len(result['caps']), 2) def test_plot_box_data_empty(self): fig, ax = plt.subplots()", "4.1125]) def test_grouped_distributions_insufficient_colors(self): args = ('bar', self.ValidTypicalData, [1, 4, 10,", "8, 13], [1, 1, 2]], [[2, 2, 2, 2], [3,", "plot looks identical between the two versions. # see: #", "3.775, 4.075]) def test_grouped_distributions_insufficient_symbols(self): args = ('scatter', self.ValidTypicalData, [1, 4,", "None, (1, 1, 0.9)]) # All colors are None. fig,", "the resulting plot looks identical between the two versions. #", "4.075]) def test_grouped_distributions_insufficient_symbols(self): args = ('scatter', self.ValidTypicalData, [1, 4, 10,", "plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size =", "Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), '42') self.assertEqual(ax.get_xticklabels()[1].get_text(), '45') self.assertEqual(ax.get_xticklabels()[2].get_text(), '800')", "[], 3) def test_get_distribution_markers_zero_markers(self): self.assertEqual(_get_distribution_markers('symbols', None, 0), []) self.assertEqual(_get_distribution_markers('symbols', ['^'],", "0.9)))) def test_set_figure_size(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo',", "Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") def test_set_axes_options_ylim(self):", "We get a warning from mpl if we don't clean", "https://github.com/pydata/pandas/issues/8382#issuecomment-56840974 # https://github.com/matplotlib/matplotlib/issues/3544 self.assertTrue(len(result['fliers']) == 1 or len(result['fliers']) == 2)", "boxplot, while mpl 1.4.0 has # 7. in either case,", "_validate_input(self.ValidSingleSampleData, None, None, [\"Men\", \"Women\"]) def test_validate_input_all_valid_input(self): self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3,", "'<'], \"x-axis label\", \"y-axis label\", \"Test\") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(),", "label\", legend=(('blue', 'red'), ('foo', 'bar'))) ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\")", "Test invalid number of samples in data list (for bar/scatter", "self.assertEqual(ax.get_ylim(), (0.0, 1.0)) def test_set_axes_options_x_values_as_tick_labels(self): fig, ax = plt.subplots() _set_axes_options(ax,", "['^', '<', '>'], ['dist1', 'dist2', 'dist3'], 'foo') def test_grouped_distributions_bar(self): fig", "\"T1\", \"T2\"], y_min='car', y_max=30) def test_set_axes_options_invalid_x_tick_labels_orientation(self): fig, ax = plt.subplots()", "# Coloring works with some empty distributions. fig = boxplots([[],", "self.assertEqual(ax.get_xticklabels()[1].get_text(), '45') self.assertEqual(ax.get_xticklabels()[2].get_text(), '800') def test_set_axes_options_bad_ylim(self): fig, ax = plt.subplots()", "box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', None, (1, 1, 0.9)])", "_color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input,", "= [[3.4, 10, 11.67, 12.0, 2, 2, 99.99], [2.3, 4,", "'w', (1, 1, 0.9)]) # Some colors are None. fig,", "_color_box_plot(ax, box_plot, ['blue', (1, 1, 0.9)]) def test_is_single_matplotlib_color(self): self.assertTrue(_is_single_matplotlib_color('w')) self.assertTrue(_is_single_matplotlib_color('white'))", "np.testing.assert_allclose(ticks, [0.75]) def test_set_axes_options(self): fig, ax = plt.subplots() _set_axes_options(ax, \"Plot", "in mpl < 1.4.0 have 8 lines per boxplot, while", "= _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'stdv') self.assertTrue(result is", "88, 9, 10, 11, 1, 0, 3, -8], [2, 9,", "def test_plot_bar_data_bad_error_bar_type(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _plot_bar_data(ax, [1,", "fig = grouped_distributions('box', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\",", "plt.close('all') def test_validate_input_null(self): with npt.assert_raises(ValueError): _validate_input(self.Null, None, None, None) def", "test_calc_data_point_locations_custom_spacing(self): # Scaling down from 3..12 to 1..4. locs =", "1, 1])) self.assertTrue(_is_single_matplotlib_color((1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1, 1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1.0,", "5, 88, 9, 10, 11, 1, 0, 3, -8], [2,", "hidden for patch in ax.patches: self.assertTrue(np.isnan(patch.xy[0][1])) fig = boxplots([[], [],", "2.0, 3], None, 3) def test_get_distribution_markers_null_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 5), ['b',", "None, 4), ['b', 'g', 'r', 'c']) def test_get_distribution_markers_insufficient_markers(self): self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers,", "lines = ax.get_lines() self.assertTrue(np.isnan(lines[0].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[16].get_xydata()[0][1])) def test_boxplots_box_colors(self): # Coloring", "'>', '<'], 5), ['^', '>', '<', '^', '>']) def test_get_distribution_markers_bad_marker_type(self):", "8, -3, 44], 'blue', 0.33, 55, 1.5, 'stdv') self.assertEqual(result.__class__.__name__, \"dict\")", "\"Rectangle\") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0))", "7, 10, 33, 32, 6, 7, 8]]] # Test typical", "https://github.com/matplotlib/matplotlib/issues/3544 self.assertTrue(len(result['fliers']) == 1 or len(result['fliers']) == 2) self.assertEqual(len(result['caps']), 2)", "lines per boxplot, while mpl 1.4.0 has # 7. in", "0.001..0.87 to 1..3. locs = _calc_data_point_locations(3, [0.001, 0.2543, 0.87]) np.testing.assert_allclose(locs,", "12]) np.testing.assert_allclose(locs, np.array([1, 1.33333333, 3.33333333, 4])) # Sorted order shouldn't", "def test_is_single_matplotlib_color(self): self.assertTrue(_is_single_matplotlib_color('w')) self.assertTrue(_is_single_matplotlib_color('white')) self.assertTrue(_is_single_matplotlib_color([1, 1, 1])) self.assertTrue(_is_single_matplotlib_color([1, 1, 1,", "scikit-bio development team. # # Distributed under the terms of", "6]] def tearDown(self): # We get a warning from mpl", "def test_plot_box_data(self): fig, ax = plt.subplots() result = _plot_box_data(ax, [0,", "'bar', 'baz')) def test_color_box_plot(self): fig, ax = plt.subplots() box_plot =", "\"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") self.assertEqual(ax.get_ylim(),", "5, 6]], [1, 4, 10], [\"Data 1\", \"Data 2\", \"Data", "with npt.assert_raises(ValueError): _validate_input(self.Empty, None, None, None) def test_validate_input_empty_nested(self): with npt.assert_raises(ValueError):", "[0.75]) def test_set_axes_options(self): fig, ax = plt.subplots() _set_axes_options(ax, \"Plot Title\",", "\"x-axis label\", \"y-axis label\", legend=(('blue', 'red'), ('foo', 'bar'))) ax =", "grouped_distributions from skbio.draw._distributions import ( _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers,", "[1, 2, 3], '^', 0.77, 1, 1.5, 'stdv') self.assertEqual(result.get_sizes(), 20)", "are empty. fig = boxplots([[], [], []], [1, 4, 10],", "test_color_box_plot_invalid_input(self): # Invalid color. fig, ax = plt.subplots() box_plot =", "0.33, 55, 1.5, 'stdv') self.assertTrue(result is None) def test_calc_data_point_locations_invalid_x_values(self): with", "np.testing.assert_allclose(ax.get_xticks(), [1.1125, 2.0125, 3.8125, 4.1125]) def test_grouped_distributions_insufficient_colors(self): args = ('bar',", "\"y-axis label\", x_tick_labels=[\"T0\", \"T1\", \"T2\"], y_min=0, y_max=1) self.assertEqual(ax.get_title(), \"Plot Title\")", "[[4, 5, 6, 7, 8]], [[4, 7, 10, 33, 32,", "1.5, 'var') def test_plot_bar_data_empty(self): fig, ax = plt.subplots() result =", "[2, 3, 2]], [[4, 7, 10, 33, 32, 6, 7,", "test_create_legend(self): fig, ax = plt.subplots() _create_legend(ax, ['b', 'r'], ['dist1', 'dist2'],", "11], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'r',", "plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', None, (1, 1,", "[]], [1, 4, 10], [\"Data 1\", \"Data 2\", \"Data 3\"],", "data with three samples and four data points # (for", "def test_calc_data_point_locations_default_spacing(self): locs = _calc_data_point_locations(4) np.testing.assert_allclose(locs, [1, 2, 3, 4])", "1) self.assertEqual(len(result['medians']), 1) self.assertEqual(len(result['whiskers']), 2) # mpl < 1.4.0 creates", "0.5, False) np.testing.assert_allclose(ticks, [0.75]) def test_set_axes_options(self): fig, ax = plt.subplots()", "3], None, len(self.ValidSingleSampleData)) def test_validate_x_values_valid_x_values(self): _validate_x_values([1, 2.0, 3], None, 3)", "[8, 9, 10, 11], [9.0, 4, 1, 1]], [[4, 33,", "0.796078431372549, 1.0)) self.assertTrue(np.isnan(patch.xy[0][1])) # Coloring works with some empty distributions.", "_set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') _set_figure_size(fig, 3, 4)", "0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0)) # patch location", "box_plot, [None, None, None]) def test_color_box_plot_invalid_input(self): # Invalid color. fig,", "1\", \"Data 2\", \"Data 3\"], \"Test\", \"x-axis label\", \"y-axis label\")", "'>'], ['dist1', 'dist2'], 'symbols') with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'],", "= [[[1, 2, 3, 4, 5]], [[4, 5, 6, 7,", "test_validate_input_null(self): with npt.assert_raises(ValueError): _validate_input(self.Null, None, None, None) def test_validate_input_empty(self): with", "test_calc_data_point_locations_invalid_x_values(self): with npt.assert_raises(ValueError): _calc_data_point_locations(3, [1, 10.5]) def test_calc_data_point_locations_default_spacing(self): locs =", "11]), 1, 0.5, False) np.testing.assert_allclose(ticks, [1.25, 5.25, 9.25, 11.25]) ticks", "test_set_axes_options_ylim(self): fig, ax = plt.subplots() _set_axes_options(ax, \"Plot Title\", \"x-axis label\",", "with three samples and four data points # (for bar/scatter", "License. # # The full license is in the file", "1, 1.0))) self.assertFalse(_is_single_matplotlib_color(['w', 'r'])) self.assertFalse(_is_single_matplotlib_color(['w'])) self.assertFalse(_is_single_matplotlib_color(('w',))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),))) self.assertFalse(_is_single_matplotlib_color(((1.0,", "as plt from skbio.draw import boxplots, grouped_distributions from skbio.draw._distributions import", "['b', 'r'], ['dist1', 'dist2'], 'colors') self.assertEqual(len(ax.get_legend().get_texts()), 2) fig, ax =", "\"x-axis label\", \"y-axis label\", \"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_scatter(self):", "fig, ax = plt.subplots() result = _plot_scatter_data(ax, [1, 2, 3],", "'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_invalid(self):", "_is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values) class DistributionsTests(TestCase):", "\"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['^'], \"x-axis label\", \"y-axis label\",", "'>'], ['dist1', 'dist2', 'dist3'], 'foo') def test_grouped_distributions_bar(self): fig = grouped_distributions('bar',", "0.77, 1, 1.5, 'stdv') self.assertTrue(result is None) def test_plot_box_data(self): fig,", "def test_boxplots_empty_distributions(self): fig = boxplots([[1, 2, 3], [], [4, 5,", "self.assertEqual(ax.get_xlabel(), \"x-axis label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.1125,", "= plt.subplots() _set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\",", "'colors', None, 10), ['b', 'g', 'r', 'c', 'm', 'y', 'w',", "[]], box_colors='pink') ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) for patch in", "distributions. with npt.assert_raises(ValueError): boxplots([[1, 2, 3], [], [4, 5, 6]],", "[9.0, 4, 1, 1]], [[4, 33, 32, 6, 8], [5,", "'y_foo', x_tick_labels=['foofoofooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooo', 'barbarbar'], x_tick_labels_orientation='vertical') npt.assert_warns(RuntimeWarning, _set_figure_size, fig,", "test_validate_input_invalid_data_point_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, [\"T0\", \"T1\"], None) def test_validate_input_invalid_sample_names(self):", "test_set_figure_size_invalid(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo',", "\"dict\") self.assertEqual(len(result['boxes']), 1) self.assertEqual(len(result['medians']), 1) self.assertEqual(len(result['whiskers']), 2) # mpl <", "1.4.0 have 8 lines per boxplot, while mpl 1.4.0 has", "with npt.assert_raises(ValueError): boxplots([[1, 2, 3], [], [4, 5, 6]], box_colors=['blue',", "ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis label\") self.assertEqual(ax.get_ylabel(), \"y-axis", "8], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"]), (4, 3))", "what we specified self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0,", "'^', '>']) def test_get_distribution_markers_bad_marker_type(self): with npt.assert_raises(ValueError): _get_distribution_markers('shapes', [], 3) def", "None, None) def test_validate_input_empty(self): with npt.assert_raises(ValueError): _validate_input(self.Empty, None, None, None)", "\"Children\", \"Teens\"]), (4, 3)) def test_validate_x_values_invalid_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([1, 2,", "0.9)]) def test_is_single_matplotlib_color(self): self.assertTrue(_is_single_matplotlib_color('w')) self.assertTrue(_is_single_matplotlib_color('white')) self.assertTrue(_is_single_matplotlib_color([1, 1, 1])) self.assertTrue(_is_single_matplotlib_color([1, 1,", "'y', 'w', 'b', 'g', 'r']) self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'symbols', ['^', '>',", "10, 11], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['^'],", "grouped_distributions('pie', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\", \"T3\"],", "self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") def test_set_axes_options_ylim(self): fig, ax = plt.subplots()", "'y'], \"x-axis label\", \"y-axis label\", \"Test\") with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=0)", "is None) def test_plot_box_data(self): fig, ax = plt.subplots() result =", "= plt.subplots() _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'], 'symbols')", "fig, ax = plt.subplots() result = _plot_bar_data(ax, [1, 2, 3],", "plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['blue', (1, 1, 0.9)]) def", "_get_distribution_markers, 'symbols', ['^', '>', '<'], 5), ['^', '>', '<', '^',", "self.assertTrue(_is_single_matplotlib_color([1, 1, 1])) self.assertTrue(_is_single_matplotlib_color([1, 1, 1, 1])) self.assertTrue(_is_single_matplotlib_color((1, 1, 1)))", "label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775,", "Test nested empty data list (for bar/scatter plots). self.EmptyDeeplyNested =", "0.77, 1, 1.5, 'stdv') self.assertEqual(result.get_sizes(), 20) def test_plot_scatter_data_empty(self): fig, ax", "test_get_distribution_markers_negative_num_markers(self): with npt.assert_raises(ValueError): _get_distribution_markers('symbols', [], -1) def test_plot_bar_data(self): fig, ax", "_plot_box_data(ax, [], 'blue', 0.33, 55, 1.5, 'stdv') self.assertTrue(result is None)", "10.5]) def test_calc_data_point_locations_default_spacing(self): locs = _calc_data_point_locations(4) np.testing.assert_allclose(locs, [1, 2, 3,", "ax = plt.subplots() result = _plot_scatter_data(ax, [1, 2, 3], '^',", "as np import numpy.testing as npt import matplotlib.pyplot as plt", "import numpy.testing as npt import matplotlib.pyplot as plt from skbio.draw", "list. self.Null = None # Test empty data list. self.Empty", "# Test valid data with three samples and four data", "= [] # Test nested empty data list. self.EmptyNested =", "test_grouped_distributions_empty_marker_list(self): grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\",", "nested empty data list (for bar/scatter plots). self.EmptyDeeplyNested = [[[]]]", "def test_get_distribution_markers_negative_num_markers(self): with npt.assert_raises(ValueError): _get_distribution_markers('symbols', [], -1) def test_plot_bar_data(self): fig,", "fig, ax = plt.subplots() _create_legend(ax, ['b', 'r'], ['dist1', 'dist2'], 'colors')", "test_plot_bar_data(self): fig, ax = plt.subplots() result = _plot_bar_data(ax, [1, 2,", "'red']) # Invalid legend. with npt.assert_raises(ValueError): boxplots([[1, 2, 3]], legend=('foo',", "9.25, 11.25]) ticks = _calc_data_point_ticks(np.array([0]), 3, 0.5, False) np.testing.assert_allclose(ticks, [0.75])", "npt.assert_almost_equal( patch.get_facecolor(), (1.0, 0.7529411764705882, 0.796078431372549, 1.0)) self.assertTrue(np.isnan(patch.xy[0][1])) # Coloring works", "boxplots([[], [1, 2, 3.5], []], box_colors=['blue', 'red', 'yellow']) ax =", "\"Test\") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis label\") self.assertEqual(ax.get_ylabel(),", "3) # patch colors should match what we specified self.assertEqual(ax.patches[0].get_facecolor(),", "= _calc_data_point_locations(4, [4, 3, 12, 10]) np.testing.assert_allclose(locs, np.array([1.33333333, 1, 4,", "def test_validate_x_values_invalid_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([1, 2, 3, 4], [\"T0\", \"T1\",", "'oooooooooooooooooooooooooooooooo' 'oooo', 'barbarbar'], x_tick_labels_orientation='vertical') npt.assert_warns(RuntimeWarning, _set_figure_size, fig, 3, 3) npt.assert_array_equal(fig.get_size_inches(),", "self.assertTrue(np.isnan(ax.patches[0].xy[0][1])) self.assertFalse(np.isnan(ax.patches[1].xy[0][1])) self.assertTrue(np.isnan(ax.patches[2].xy[0][1])) def test_boxplots_invalid_input(self): # Non-numeric entries in distribution.", "fig = boxplots([[], [1, 2, 3.5], []], box_colors=['blue', 'red', 'yellow'])", "# Number of colors doesn't match number of distributions. with", "3, 4) self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4))) def test_set_figure_size_defaults(self): fig, ax =", "color. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError):", "with npt.assert_raises(ValueError): _validate_x_values(None, [\"T0\"], len(self.ValidSingleSampleData)) def test_validate_x_values_nonnumber_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([\"foo\",", "_set_figure_size, _validate_input, _validate_x_values) class DistributionsTests(TestCase): def setUp(self): # Test null", "def test_grouped_distributions_scatter(self): fig = grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10, 11],", "['b', 'r'], \"x-axis label\", \"y-axis label\", \"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions, *args)", "looks identical between the two versions. # see: # https://github.com/pydata/pandas/issues/8382#issuecomment-56840974", "[], 'blue', 0.33, 55, 1.5, 'stdv') self.assertTrue(result is None) def", "Wrong number of colors. fig, ax = plt.subplots() box_plot =", "def test_grouped_distributions_error(self): with npt.assert_raises(ValueError): grouped_distributions('pie', self.ValidTypicalData, [1, 4, 10, 11],", "\"x-axis label\", \"y-axis label\", x_values=[42, 45, 800]) self.assertEqual(ax.get_title(), \"Plot Title\")", "y_max=1) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(),", "4, 5]]] # Test valid data with one sample (for", "\"T1\"], None) def test_validate_input_invalid_sample_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, None, [\"Men\",", "0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) fig, ax = plt.subplots() result =", "3) def test_get_distribution_markers_zero_markers(self): self.assertEqual(_get_distribution_markers('symbols', None, 0), []) self.assertEqual(_get_distribution_markers('symbols', ['^'], 0),", "self.assertTrue(np.isnan(patch.xy[0][1])) fig = boxplots([[], [], []], box_colors='pink') ax = fig.get_axes()[0]", "('scatter', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\", \"T3\"],", "None, [\"T0\", \"T1\"], None) def test_validate_input_invalid_sample_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None,", "Title\", \"x-axis label\", \"y-axis label\", x_values=[42, 45, 800]) self.assertEqual(ax.get_title(), \"Plot", "orig_fig_size)) def test_set_figure_size_invalid(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo',", "\"Teens\"], ['b', 'r', 'g'], \"x-axis label\", \"y-axis label\", \"Test\") ax", "\"Data 2\", \"Data 3\"], \"Test\", \"x-axis label\", \"y-axis label\") ax", "npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['blue', (1, 1, 0.9)]) def test_is_single_matplotlib_color(self): self.assertTrue(_is_single_matplotlib_color('w'))", "# Copyright (c) 2013--, scikit-bio development team. # # Distributed", "'g', 'y'], \"x-axis label\", \"y-axis label\", \"Test\") with self.assertRaises(ValueError): grouped_distributions(*args,", "\"Data 2\", \"Data 3\"], \"Test\", \"x-axis label\", \"y-axis label\", legend=(('blue',", "3.75, 1.5, 'sem') self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(),", "3])) def test_calc_data_point_ticks(self): ticks = _calc_data_point_ticks(np.array([1, 5, 9, 11]), 1,", "plt.subplots() result = _plot_scatter_data(ax, [], '^', 0.77, 1, 1.5, 'stdv')", "1, 1.5, 'stdv') self.assertTrue(result is None) def test_plot_box_data(self): fig, ax", "8], [5, 4, 8, 13], [1, 1, 2]], [[2, 2,", "1) def test_validate_input_empty_point(self): with npt.assert_raises(ValueError): _validate_input([[[1, 2, 3], [4, 5]],", "from __future__ import absolute_import, division, print_function from unittest import TestCase,", "3) def test_get_distribution_markers_null_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 5), ['b', 'g', 'r', 'c',", "\"Teens\"], ['b', 'g', 'y'], \"x-axis label\", \"y-axis label\", \"Test\") def", "= _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'sem')", "test_boxplots_box_colors(self): # Coloring works with all empty distributions. fig =", "ticks = _calc_data_point_ticks(np.array([1, 5, 9, 11]), 1, 0.5, False) np.testing.assert_allclose(ticks,", "[\"T0\"], len(self.ValidSingleSampleData)) def test_validate_x_values_nonnumber_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([\"foo\", 2, 3], None,", "with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['red', 'foobarbaz', 'blue']) # Wrong number", "label\", \"y-axis label\", \"Test\") with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=0) with self.assertRaises(ValueError):", "x_tick_labels_orientation='vertical') _set_figure_size(fig, 3, 4) self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4))) def test_set_figure_size_defaults(self): fig,", "test_grouped_distributions_error(self): with npt.assert_raises(ValueError): grouped_distributions('pie', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\",", "-1, 0) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_long_labels(self): fig, ax = plt.subplots()", "the terms of the Modified BSD License. # # The", "\"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(),", "self.assertEqual(ax.get_xlabel(), \"x-axis label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.075,", "fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax,", "box_plot, ['blue', 'w', (1, 1, 0.9)]) # Some colors are", "1.0))) self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0))) self.assertFalse(_is_single_matplotlib_color(['w', 'r'])) self.assertFalse(_is_single_matplotlib_color(['w'])) self.assertFalse(_is_single_matplotlib_color(('w',))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0,", "0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) def test_plot_bar_data_bad_error_bar_type(self): fig, ax =", "the boxplot function. self.ValidTypicalBoxData = [[3.4, 10, 11.67, 12.0, 2,", "3, -8], [2, 9, 7, 5, 6]] def tearDown(self): #", "def test_get_distribution_markers_empty_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 4), ['b', 'g', 'r', 'c']) def", "6, 7, 8], [2, 3, 2]], [[4, 7, 10, 33,", "3], 'red', 0.5, 3.75, 1.5, 'stdv') self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result), 1)", "self.assertEqual(result.get_sizes(), 20) def test_plot_scatter_data_empty(self): fig, ax = plt.subplots() result =", "3, 4], [\"T0\", \"T1\", \"T2\"], len(self.ValidSingleSampleData)) def test_validate_x_values_invalid_x_tick_labels(self): with npt.assert_raises(ValueError):", "[2, 9, 7, 5, 6]] def tearDown(self): # We get", "with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=0) with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=-42) def test_boxplots(self):", "npt.assert_raises(ValueError): boxplots([[1, 'foo', 3]]) # Number of colors doesn't match", "_plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values) class DistributionsTests(TestCase): def setUp(self): #", "def test_validate_input_null(self): with npt.assert_raises(ValueError): _validate_input(self.Null, None, None, None) def test_validate_input_empty(self):", "['blue', 'w', (1, 1, 0.9)]) # Some colors are None.", "ticks = _calc_data_point_ticks(np.array([0]), 3, 0.5, False) np.testing.assert_allclose(ticks, [0.75]) def test_set_axes_options(self):", "test_validate_input_empty_point(self): with npt.assert_raises(ValueError): _validate_input([[[1, 2, 3], [4, 5]], []], None,", "self.assertFalse(_is_single_matplotlib_color(('w',))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1), (0.9, 0.9)))) def", "2]], [[2, 2, 2, 2], [3, 9, 8], [2, 1,", "self.assertEqual(len(result['boxes']), 1) self.assertEqual(len(result['medians']), 1) self.assertEqual(len(result['whiskers']), 2) # mpl < 1.4.0", "hidden. # boxplots in mpl < 1.4.0 have 8 lines", "with all empty distributions. fig = boxplots([[], [], []], box_colors=['blue',", "x_tick_labels=['foofoofooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooo', 'barbarbar'], x_tick_labels_orientation='vertical') npt.assert_warns(RuntimeWarning, _set_figure_size, fig, 3,", "np.testing.assert_allclose(locs, np.array([1, 1.58296893, 3])) def test_calc_data_point_ticks(self): ticks = _calc_data_point_ticks(np.array([1, 5,", "1.5, 'stdv') self.assertTrue(result is None) def test_plot_box_data(self): fig, ax =", "npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_empty_marker_list(self): grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10,", "order shouldn't affect scaling. locs = _calc_data_point_locations(4, [4, 3, 12,", "\"Plot Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\", \"T2\"], y_min=0,", "= plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, [None, None, None])", "# Invalid color. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData)", "2, 3]], legend=('foo', 'bar', 'baz')) def test_color_box_plot(self): fig, ax =", "None, None) def test_validate_input_invalid_num_samples(self): with npt.assert_raises(ValueError): _validate_input(self.InvalidNumSamples, None, None, None)", "_set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches()", "self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_invalid(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo',", "ax = plt.subplots() with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'], ['dist1',", "= plt.subplots() result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5,", "label\", \"y-axis label\", x_values=[42, 45, 800]) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(),", "samples and four data points # (for bar/scatter plots). self.ValidTypicalData", "\"Test\", \"x-axis label\", \"y-axis label\") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\")", "3\"], \"Test\", \"x-axis label\", \"y-axis label\", legend=(('blue', 'red'), ('foo', 'bar')))", "1.0)) # patch location should include at least one nan", "2]], [[4, 7, 10, 33, 32, 6, 7, 8]]] #", "3]]) # Number of colors doesn't match number of distributions.", "# Distributed under the terms of the Modified BSD License.", "boxplots([[1, 2, 3], [], [4, 5, 6]], box_colors=['blue', 'red']) #", "works with all empty distributions. fig = boxplots([[], [], []],", "mpl if we don't clean up our figures. plt.close('all') def", "\"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) lines =", "8], [8, 9, 10, 11], [9.0, 4, 1, 1]], [[4,", "1.975, 3.775, 4.075]) def test_grouped_distributions_error(self): with npt.assert_raises(ValueError): grouped_distributions('pie', self.ValidTypicalData, [1,", "'y'], \"x-axis label\", \"y-axis label\", \"Test\") def test_grouped_distributions_negative_distribution_width(self): args =", "= _validate_input(self.EmptyDeeplyNested, None, None, None) self.assertEqual(num_points, 1) self.assertEqual(num_samples, 1) def", "fig, ax = plt.subplots() result = _plot_box_data(ax, [], 'blue', 0.33,", "[[[1, 2, 3, 4, 5]], [[4, 5, 6, 7, 8]],", "npt.assert_raises(ValueError): _validate_input(self.InvalidNumSamples, None, None, None) def test_validate_input_invalid_data_point_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData,", "= [[[]]] # Test invalid number of samples in data", "self.assertEqual(_get_distribution_markers('symbols', None, 0), []) self.assertEqual(_get_distribution_markers('symbols', ['^'], 0), []) def test_get_distribution_markers_negative_num_markers(self):", "---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function from unittest import", "\"y-axis label\", x_values=[42, 45, 800]) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis", "points # (for bar/scatter plots). self.ValidTypicalData = [[[1.0, 2, 3.5,", "npt.assert_raises(ValueError): boxplots([[1, 2, 3]], legend=('foo', 'bar', 'baz')) def test_color_box_plot(self): fig,", "test_grouped_distributions_insufficient_symbols(self): args = ('scatter', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\",", "self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0))", "don't clean up our figures. plt.close('all') def test_validate_input_null(self): with npt.assert_raises(ValueError):", "['^', '>', '<', '^', '>']) def test_get_distribution_markers_bad_marker_type(self): with npt.assert_raises(ValueError): _get_distribution_markers('shapes',", "is None) def test_plot_scatter_data(self): fig, ax = plt.subplots() result =", "4))) def test_set_figure_size_defaults(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo',", "'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig, -1,", "3], [], [4, 5, 6]], [1, 4, 10], [\"Data 1\",", "= plt.subplots() _set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis label\", x_values=[42,", "plt.subplots() with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2'], 'symbols')", "data list (for bar/scatter plots). self.EmptyDeeplyNested = [[[]]] # Test", "3], None, 3) def test_get_distribution_markers_null_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 5), ['b', 'g',", "'g'], \"x-axis label\", \"y-axis label\", \"Test\") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(),", "is in the file COPYING.txt, distributed with this software. #", "fig.get_size_inches() _set_figure_size(fig, -1, 0) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_long_labels(self): fig, ax", "self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0))) self.assertFalse(_is_single_matplotlib_color(['w', 'r'])) self.assertFalse(_is_single_matplotlib_color(['w'])) self.assertFalse(_is_single_matplotlib_color(('w',))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),)))", "test_grouped_distributions_negative_distribution_width(self): args = ('box', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\",", "npt.assert_raises(ValueError): _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'var')", "1.5, 'stdv') self.assertTrue(result is None) def test_calc_data_point_locations_invalid_x_values(self): with npt.assert_raises(ValueError): _calc_data_point_locations(3,", "10])) lines = ax.get_lines() self.assertTrue(np.isnan(lines[0].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[16].get_xydata()[0][1])) def test_boxplots_box_colors(self): #", "grouped_distributions(*args, distribution_width=0) with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=-42) def test_boxplots(self): fig =", "development team. # # Distributed under the terms of the", "'blue', 0.33, 55, 1.5, 'stdv') self.assertTrue(result is None) def test_calc_data_point_locations_invalid_x_values(self):", "[4, 5, 6]], [1, 4, 10], [\"Data 1\", \"Data 2\",", "\"T2\"], len(self.ValidSingleSampleData)) def test_validate_x_values_invalid_x_tick_labels(self): with npt.assert_raises(ValueError): _validate_x_values(None, [\"T0\"], len(self.ValidSingleSampleData)) def", "\"T1\") self.assertEqual(ax.get_ylim(), (0.0, 1.0)) def test_set_axes_options_x_values_as_tick_labels(self): fig, ax = plt.subplots()", "\"Test\") def test_grouped_distributions_box(self): fig = grouped_distributions('box', self.ValidTypicalData, [1, 4, 10,", "None) self.assertEqual(num_points, 1) self.assertEqual(num_samples, 1) def test_validate_input_empty_point(self): with npt.assert_raises(ValueError): _validate_input([[[1,", "\"x-axis label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4,", "number of distributions. with npt.assert_raises(ValueError): boxplots([[1, 2, 3], [], [4,", "box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, [None, None, None]) def test_color_box_plot_invalid_input(self):", "from unittest import TestCase, main import numpy as np import", "fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, [None,", "['^'], 0), []) def test_get_distribution_markers_negative_num_markers(self): with npt.assert_raises(ValueError): _get_distribution_markers('symbols', [], -1)", "label\", \"y-axis label\", \"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_empty_marker_list(self): grouped_distributions('scatter',", "= fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) for patch in ax.patches: npt.assert_almost_equal( patch.get_facecolor(),", "'c', 'm']) def test_get_distribution_markers_empty_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 4), ['b', 'g', 'r',", "up from 0.001..0.87 to 1..3. locs = _calc_data_point_locations(3, [0.001, 0.2543,", "_plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values) class DistributionsTests(TestCase): def setUp(self):", "(1.0, 0.0, 0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0)) self.assertTrue(np.isnan(ax.patches[0].xy[0][1]))", "\"x-axis label\", \"y-axis label\", \"Test\") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\")", "\"Children\", \"Teens\"], ['b', 'r'], \"x-axis label\", \"y-axis label\", \"Test\") npt.assert_warns(RuntimeWarning,", "[[[1, 2, 3, 4, 5]], [[4, 5, 6, 7, 8],", "with npt.assert_raises(ValueError): _validate_x_values([1, 2, 3, 4], [\"T0\", \"T1\", \"T2\"], len(self.ValidSingleSampleData))", "label\", \"y-axis label\", \"Test\") def test_grouped_distributions_negative_distribution_width(self): args = ('box', self.ValidTypicalData,", "npt.assert_raises(ValueError): boxplots([[1, 2, 3], [], [4, 5, 6]], box_colors=['blue', 'red'])", "[[4, 5, 6, 7, 8], [2, 3, 2]], [[4, 7,", "label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\", \"T2\"], y_min=0, y_max=1) self.assertEqual(ax.get_title(), \"Plot", "0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) fig, ax", "by the boxplot function. self.ValidTypicalBoxData = [[3.4, 10, 11.67, 12.0,", "33, 32, 6, 7, 8]]] # Test typical data to", "def test_set_figure_size(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo',", "['blue', (1, 1, 0.9)]) def test_is_single_matplotlib_color(self): self.assertTrue(_is_single_matplotlib_color('w')) self.assertTrue(_is_single_matplotlib_color('white')) self.assertTrue(_is_single_matplotlib_color([1, 1,", "1, 1])) self.assertTrue(_is_single_matplotlib_color([1, 1, 1, 1])) self.assertTrue(_is_single_matplotlib_color((1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1,", "second distribution (empty) should have nans since it is hidden.", "\"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775, 4.075]) def", "-8], [2, 9, 7, 5, 6]] def tearDown(self): # We", "1.5, 'stdv') self.assertTrue(result is None) fig, ax = plt.subplots() result", "= _calc_data_point_locations(4) np.testing.assert_allclose(locs, [1, 2, 3, 4]) def test_calc_data_point_locations_custom_spacing(self): #", "0.9)]) # Some colors are None. fig, ax = plt.subplots()", "'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig) self.assertTrue(np.array_equal(fig.get_size_inches(),", "1.0, 1), (0.9, 0.9)))) def test_set_figure_size(self): fig, ax = plt.subplots()", "def test_create_legend(self): fig, ax = plt.subplots() _create_legend(ax, ['b', 'r'], ['dist1',", "test_validate_input_invalid_num_samples(self): with npt.assert_raises(ValueError): _validate_input(self.InvalidNumSamples, None, None, None) def test_validate_input_invalid_data_point_names(self): with", "label\", \"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_empty_marker_list(self): grouped_distributions('scatter', self.ValidTypicalData, [1,", "= ('scatter', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\",", "None, 10), ['b', 'g', 'r', 'c', 'm', 'y', 'w', 'b',", "y_min=0, y_max=1) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\")", "plots). self.ValidTypicalData = [[[1.0, 2, 3.5, 5], [2, 3, 5,", "def test_grouped_distributions_insufficient_colors(self): args = ('bar', self.ValidTypicalData, [1, 4, 10, 11],", "3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) # second distribution (empty) should", "self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0)) # patch location should include", "'r', 'g'], \"x-axis label\", \"y-axis label\", \"Test\") ax = fig.get_axes()[0]", "# Wrong number of colors. fig, ax = plt.subplots() box_plot", "\"y-axis label\", legend=(('blue', 'red'), ('foo', 'bar'))) ax = fig.get_axes()[0] self.assertEqual(ax.get_title(),", "1.0)) def test_set_axes_options_x_values_as_tick_labels(self): fig, ax = plt.subplots() _set_axes_options(ax, \"Plot Title\",", "bar/scatter plots). self.EmptyDeeplyNested = [[[]]] # Test invalid number of", "'w', 'b', 'g', 'r']) self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'symbols', ['^', '>', '<'],", "match number of distributions. with npt.assert_raises(ValueError): boxplots([[1, 2, 3], [],", "npt.assert_warns(RuntimeWarning, _set_figure_size, fig, 3, 3) npt.assert_array_equal(fig.get_size_inches(), (3, 3)) if __name__", "fig = grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\",", "1.975, 3.775, 4.075]) def test_grouped_distributions_insufficient_symbols(self): args = ('scatter', self.ValidTypicalData, [1,", "self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), '42') self.assertEqual(ax.get_xticklabels()[1].get_text(), '45') self.assertEqual(ax.get_xticklabels()[2].get_text(), '800') def", "_create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2'], 'symbols') with npt.assert_raises(ValueError): _create_legend(ax,", "2, 3, 4], [\"T0\", \"T1\", \"T2\"], len(self.ValidSingleSampleData)) def test_validate_x_values_invalid_x_tick_labels(self): with", "npt.assert_raises(ValueError): _validate_x_values([\"foo\", 2, 3], None, len(self.ValidSingleSampleData)) def test_validate_x_values_valid_x_values(self): _validate_x_values([1, 2.0,", "self.assertEqual(len(ax.get_xticklabels()), 3) # patch colors should match what we specified", "grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\", \"T3\"],", "_calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options,", "1, 0.5, False) np.testing.assert_allclose(ticks, [1.25, 5.25, 9.25, 11.25]) ticks =", "npt.assert_raises(ValueError): _set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\"],", "3)) def test_validate_x_values_invalid_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([1, 2, 3, 4], [\"T0\",", "result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5,", "5, 6, 7, 8]], [[4, 7, 10, 33, 32, 6,", "\"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_scatter(self): fig = grouped_distributions('scatter', self.ValidTypicalData,", "[1, 2, 3], 'red', 0.5, 3.75, 1.5, 'sem') self.assertEqual(result[0].__class__.__name__, \"Rectangle\")", "4.075]) def test_grouped_distributions_error(self): with npt.assert_raises(ValueError): grouped_distributions('pie', self.ValidTypicalData, [1, 4, 10,", "\"T1\", \"T2\"], len(self.ValidSingleSampleData)) def test_validate_x_values_invalid_x_tick_labels(self): with npt.assert_raises(ValueError): _validate_x_values(None, [\"T0\"], len(self.ValidSingleSampleData))", "*not* have nan for its y value self.assertFalse(np.isnan(lines[0].get_xydata()[0][1])) # All", "None, None, None) self.assertEqual(num_points, 1) self.assertEqual(num_samples, 1) def test_validate_input_empty_point(self): with", "be plotted by the boxplot function. self.ValidTypicalBoxData = [[3.4, 10,", "'m', 'y', 'w', 'b', 'g', 'r']) self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'symbols', ['^',", "= plt.subplots() result = _plot_box_data(ax, [], 'blue', 0.33, 55, 1.5,", "2, 2], [3, 9, 8], [2, 1, 6, 7, 4,", "with npt.assert_raises(ValueError): _validate_input(self.EmptyNested, None, None, None) def test_validate_input_empty_deeply_nested(self): num_points, num_samples", "\"T1\"]) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(),", "[2, 3, 5, 6], [2, 3, 8]], [[4, 7, 8],", "test_create_legend_invalid_input(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<',", "\"x-axis label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975,", "four data points # (for bar/scatter plots). self.ValidTypicalData = [[[1.0,", "'var') def test_plot_bar_data_empty(self): fig, ax = plt.subplots() result = _plot_bar_data(ax,", "[\"Infants\", \"Children\", \"Teens\"], ['b', 'r'], \"x-axis label\", \"y-axis label\", \"Test\")", "test_grouped_distributions_box(self): fig = grouped_distributions('box', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\",", "plt.subplots() with npt.assert_raises(ValueError): _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75,", "[\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'r'], \"x-axis", "number of samples in data list (for bar/scatter plots). self.InvalidNumSamples", "'baz')) def test_color_box_plot(self): fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData)", "def test_calc_data_point_locations_custom_spacing(self): # Scaling down from 3..12 to 1..4. locs", "'red', 'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0,", "\"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"]), (4, 3)) def test_validate_x_values_invalid_x_values(self):", "BSD License. # # The full license is in the", "1),))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1), (0.9, 0.9)))) def test_set_figure_size(self): fig, ax", "7, 8], [2, 3, 2]], [[4, 7, 10, 33, 32,", "10, 11], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['^',", "[3, 9, 8], [2, 1, 6, 7, 4, 5]]] #", "\"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'g', 'y'], \"x-axis", "result = _plot_box_data(ax, [], 'blue', 0.33, 55, 1.5, 'stdv') self.assertTrue(result", "Test empty data list. self.Empty = [] # Test nested", "['^', '>', '<'], 5), ['^', '>', '<', '^', '>']) def", "test_boxplots_invalid_input(self): # Non-numeric entries in distribution. with npt.assert_raises(ValueError): boxplots([[1, 'foo',", "label\", x_tick_labels=[\"T0\", \"T1\"]) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(),", "nested empty data list. self.EmptyNested = [[]] # Test nested", "our figures. plt.close('all') def test_validate_input_null(self): with npt.assert_raises(ValueError): _validate_input(self.Null, None, None,", "fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) # patch colors should match what we", "\"x-axis label\", \"y-axis label\") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(),", "('box', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\", \"T3\"],", "npt.assert_raises(ValueError): _get_distribution_markers('shapes', [], 3) def test_get_distribution_markers_zero_markers(self): self.assertEqual(_get_distribution_markers('symbols', None, 0), [])", "test_calc_data_point_ticks(self): ticks = _calc_data_point_ticks(np.array([1, 5, 9, 11]), 1, 0.5, False)", "7, 8]]] # Test typical data to be plotted by", "< 1.4.0 have 8 lines per boxplot, while mpl 1.4.0", "doesn't match number of distributions. with npt.assert_raises(ValueError): boxplots([[1, 2, 3],", "10])) # second distribution (empty) should have nans since it", "6, 7, 8]], [[4, 7, 10, 33, 32, 6, 7,", "label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))", "\"Plot Title\", \"x-axis label\", \"y-axis label\", x_values=[42, 45, 800]) self.assertEqual(ax.get_title(),", "# # Distributed under the terms of the Modified BSD", "list (for bar/scatter plots). self.InvalidNumSamples = [[[1, 2, 3, 4,", "[], \"x-axis label\", \"y-axis label\", \"Test\") def test_grouped_distributions_box(self): fig =", "def test_grouped_distributions_box(self): fig = grouped_distributions('box', self.ValidTypicalData, [1, 4, 10, 11],", "[]], None, None, None) def test_validate_input_invalid_num_samples(self): with npt.assert_raises(ValueError): _validate_input(self.InvalidNumSamples, None,", "fig, ax = plt.subplots() result = _plot_scatter_data(ax, [], '^', 0.77,", "\"y-axis label\", x_tick_labels=[\"T0\", \"T1\"]) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\")", "_calc_data_point_ticks(np.array([0]), 3, 0.5, False) np.testing.assert_allclose(ticks, [0.75]) def test_set_axes_options(self): fig, ax", "7, 10, 33, 32, 6, 7, 8]]] # Test valid", "self.assertTrue(np.isnan(patch.xy[0][1])) # Coloring works with some empty distributions. fig =", "y # value lines = ax.get_lines() self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) # line in", "'r']) self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'symbols', ['^', '>', '<'], 5), ['^', '>',", "# Test null data list. self.Null = None # Test", "with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'], 'foo')", "4, 10], [\"Data 1\", \"Data 2\", \"Data 3\"], \"Test\", \"x-axis", "[\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['^', '>', '<'],", "test_set_axes_options_bad_ylim(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _set_axes_options(ax, \"Plot Title\",", "'dist2', 'dist3'], 'symbols') self.assertEqual(len(ax.get_legend().get_texts()), 3) def test_create_legend_invalid_input(self): fig, ax =", "13], [1, 1, 2]], [[2, 2, 2, 2], [3, 9,", "[None, None, None]) def test_color_box_plot_invalid_input(self): # Invalid color. fig, ax", "'<', '>'], ['dist1', 'dist2'], 'symbols') with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<',", "['b', 'r', 'g'], \"x-axis label\", \"y-axis label\", \"Test\") ax =", "\"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), '42') self.assertEqual(ax.get_xticklabels()[1].get_text(), '45') self.assertEqual(ax.get_xticklabels()[2].get_text(), '800') def test_set_axes_options_bad_ylim(self):", "None]) def test_color_box_plot_invalid_input(self): # Invalid color. fig, ax = plt.subplots()", "test_set_figure_size_defaults(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo',", "npt.assert_raises(ValueError): _validate_x_values([1, 2, 3, 4], [\"T0\", \"T1\", \"T2\"], len(self.ValidSingleSampleData)) def", "[1, 2, 3, 4]) def test_calc_data_point_locations_custom_spacing(self): # Scaling down from", "[[]] # Test nested empty data list (for bar/scatter plots).", "\"y-axis label\", \"Test\") def test_grouped_distributions_box(self): fig = grouped_distributions('box', self.ValidTypicalData, [1,", "(1.0, 1.0, 0.0, 1.0)) self.assertTrue(np.isnan(ax.patches[0].xy[0][1])) self.assertFalse(np.isnan(ax.patches[1].xy[0][1])) self.assertTrue(np.isnan(ax.patches[2].xy[0][1])) def test_boxplots_invalid_input(self): #", "= ax.get_lines() self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) # line in first distribution should *not*", "\"y-axis label\", \"Test\") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis", "npt.assert_raises(ValueError): _set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\",", "self.assertAlmostEqual(result[0].get_height(), 2.0) fig, ax = plt.subplots() result = _plot_bar_data(ax, [1,", "4, 10, 12]) np.testing.assert_allclose(locs, np.array([1, 1.33333333, 3.33333333, 4])) # Sorted", "plt.subplots() _set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\",", "works with some empty distributions. fig = boxplots([[], [1, 2,", "tearDown(self): # We get a warning from mpl if we", "\"T1\"], x_tick_labels_orientation='brofist') def test_create_legend(self): fig, ax = plt.subplots() _create_legend(ax, ['b',", "10, 11], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b',", "label\", \"y-axis label\", \"Test\") def test_grouped_distributions_box(self): fig = grouped_distributions('box', self.ValidTypicalData,", "[[[1.0, 2, 3.5, 5], [2, 3, 5, 6], [2, 3,", "= plt.subplots() result = _plot_scatter_data(ax, [], '^', 0.77, 1, 1.5,", "self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0,", "1.0, 0.0, 1.0)) self.assertTrue(np.isnan(ax.patches[0].xy[0][1])) self.assertFalse(np.isnan(ax.patches[1].xy[0][1])) self.assertTrue(np.isnan(ax.patches[2].xy[0][1])) def test_boxplots_invalid_input(self): # Non-numeric", "_validate_x_values([1, 2.0, 3], None, 3) def test_get_distribution_markers_null_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 5),", "[]], box_colors=['blue', 'red', 'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) #", "= plt.subplots() with npt.assert_raises(ValueError): _set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis", "with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=-42) def test_boxplots(self): fig = boxplots(self.ValidTypicalBoxData, [1,", "2, 3.5], []], box_colors=['blue', 'red', 'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()),", "test_validate_input_empty_nested(self): with npt.assert_raises(ValueError): _validate_input(self.EmptyNested, None, None, None) def test_validate_input_empty_deeply_nested(self): num_points,", "[], '^', 0.77, 1, 1.5, 'stdv') self.assertTrue(result is None) def", "plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', 'w', (1, 1, 0.9)]) # Some", "['^', '<', '>'], ['dist1', 'dist2'], 'symbols') with npt.assert_raises(ValueError): _create_legend(ax, ['^',", "\"y-axis label\", \"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_empty_marker_list(self): grouped_distributions('scatter', self.ValidTypicalData,", "1.0))) self.assertFalse(_is_single_matplotlib_color(['w', 'r'])) self.assertFalse(_is_single_matplotlib_color(['w'])) self.assertFalse(_is_single_matplotlib_color(('w',))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0,", "test_calc_data_point_locations_default_spacing(self): locs = _calc_data_point_locations(4) np.testing.assert_allclose(locs, [1, 2, 3, 4]) def", "warning from mpl if we don't clean up our figures.", "np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775, 4.075]) def test_grouped_distributions_error(self): with npt.assert_raises(ValueError): grouped_distributions('pie',", "location should include at least one nan since the distribution", "label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") def test_set_axes_options_ylim(self): fig, ax =", "3], 'red', 0.5, 3.75, 1.5, 'sem') self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result), 1)", "with npt.assert_raises(ValueError): _set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\",", "_validate_input(self.Empty, None, None, None) def test_validate_input_empty_nested(self): with npt.assert_raises(ValueError): _validate_input(self.EmptyNested, None,", "Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") self.assertEqual(ax.get_ylim(), (0.0,", "the line at index 8 should have a nan for", "patch location should include at least one nan since the", "_calc_data_point_locations(4, [4, 3, 12, 10]) np.testing.assert_allclose(locs, np.array([1.33333333, 1, 4, 3.33333333]))", "of colors. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) with", "\"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) # second", "2, 3], None, len(self.ValidSingleSampleData)) def test_validate_x_values_valid_x_values(self): _validate_x_values([1, 2.0, 3], None,", "per boxplot, while mpl 1.4.0 has # 7. in either", "\"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\"]) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(),", "['dist1', 'dist2', 'dist3'], 'foo') def test_grouped_distributions_bar(self): fig = grouped_distributions('bar', self.ValidTypicalData,", "0.0, 0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0)) self.assertTrue(np.isnan(ax.patches[0].xy[0][1])) self.assertFalse(np.isnan(ax.patches[1].xy[0][1]))", "since the distribution # is empty, and thus hidden for", "result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'stdv') self.assertTrue(result", "a nan for its y # value lines = ax.get_lines()", "have a nan for its y # value lines =", "len(self.ValidSingleSampleData)) def test_validate_x_values_nonnumber_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([\"foo\", 2, 3], None, len(self.ValidSingleSampleData))", "npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_scatter(self): fig = grouped_distributions('scatter', self.ValidTypicalData, [1,", "with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['blue', (1, 1, 0.9)]) def test_is_single_matplotlib_color(self):", "\"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) def test_boxplots_empty_distributions(self):", "_calc_data_point_ticks(np.array([1, 5, 9, 11]), 1, 0.5, False) np.testing.assert_allclose(ticks, [1.25, 5.25,", "2, 2, 99.99], [2.3, 4, 5, 88, 9, 10, 11,", "function. self.ValidTypicalBoxData = [[3.4, 10, 11.67, 12.0, 2, 2, 99.99],", "1) self.assertEqual(num_samples, 1) def test_validate_input_empty_point(self): with npt.assert_raises(ValueError): _validate_input([[[1, 2, 3],", "plt.subplots() _set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\"])", "\"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\"], x_tick_labels_orientation='brofist') def test_create_legend(self): fig,", "self.assertEqual(num_samples, 1) def test_validate_input_empty_point(self): with npt.assert_raises(ValueError): _validate_input([[[1, 2, 3], [4,", "box_plot, ['blue', (1, 1, 0.9)]) def test_is_single_matplotlib_color(self): self.assertTrue(_is_single_matplotlib_color('w')) self.assertTrue(_is_single_matplotlib_color('white')) self.assertTrue(_is_single_matplotlib_color([1,", "np.array([1.33333333, 1, 4, 3.33333333])) # Scaling up from 0.001..0.87 to", "_validate_input(self.EmptyNested, None, None, None) def test_validate_input_empty_deeply_nested(self): num_points, num_samples = _validate_input(self.EmptyDeeplyNested,", "with npt.assert_raises(ValueError): _validate_input([[[1, 2, 3], [4, 5]], []], None, None,", "instances, mpl 1.4.0 creates one, # though the resulting plot", "_set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\", \"T2\"],", "7, 8, -3, 44], 'blue', 0.33, 55, 1.5, 'stdv') self.assertEqual(result.__class__.__name__,", "x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig, -1, 0) self.assertTrue(np.array_equal(fig.get_size_inches(),", "5.25, 9.25, 11.25]) ticks = _calc_data_point_ticks(np.array([0]), 3, 0.5, False) np.testing.assert_allclose(ticks,", "line in first distribution should *not* have nan for its", "['b', 'g', 'r', 'c', 'm']) def test_get_distribution_markers_empty_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 4),", "'800') def test_set_axes_options_bad_ylim(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _set_axes_options(ax,", "box_colors=['blue', 'red', 'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) self.assertEqual(ax.patches[0].get_facecolor(), (0.0,", "grouped_distributions, *args) def test_grouped_distributions_empty_marker_list(self): grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10, 11],", "with npt.assert_raises(ValueError): _validate_input(self.Null, None, None, None) def test_validate_input_empty(self): with npt.assert_raises(ValueError):", "fig, ax = plt.subplots() with npt.assert_raises(ValueError): _set_axes_options(ax, \"Plot Title\", \"x-axis", "test_validate_input_invalid_sample_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, None, [\"Men\", \"Women\"]) def test_validate_input_all_valid_input(self):", "['b', 'g', 'y'], \"x-axis label\", \"y-axis label\", \"Test\") with self.assertRaises(ValueError):", "= _plot_box_data(ax, [], 'blue', 0.33, 55, 1.5, 'stdv') self.assertTrue(result is", "skbio.draw import boxplots, grouped_distributions from skbio.draw._distributions import ( _calc_data_point_locations, _calc_data_point_ticks,", "data list. self.Empty = [] # Test nested empty data", "x_tick_labels=[\"T0\", \"T1\"], x_tick_labels_orientation='brofist') def test_create_legend(self): fig, ax = plt.subplots() _create_legend(ax,", "0.0, 0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0)) # patch", "test_plot_scatter_data_empty(self): fig, ax = plt.subplots() result = _plot_scatter_data(ax, [], '^',", "fig = boxplots([[], [], []], [1, 4, 10], [\"Data 1\",", "with npt.assert_raises(ValueError): boxplots([[1, 2, 3]], legend=('foo', 'bar', 'baz')) def test_color_box_plot(self):", "# Test invalid number of samples in data list (for", "0.7529411764705882, 0.796078431372549, 1.0)) self.assertTrue(np.isnan(patch.xy[0][1])) # Coloring works with some empty", "= plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['red',", "55, 1.5, 'stdv') self.assertTrue(result is None) def test_calc_data_point_locations_invalid_x_values(self): with npt.assert_raises(ValueError):", "'r', 'c', 'm']) def test_get_distribution_markers_empty_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 4), ['b', 'g',", "box_plot, ['red', 'foobarbaz', 'blue']) # Wrong number of colors. fig,", "0.9)]) # All colors are None. fig, ax = plt.subplots()", "terms of the Modified BSD License. # # The full", "ax.get_lines() self.assertTrue(np.isnan(lines[0].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[16].get_xydata()[0][1])) def test_boxplots_box_colors(self): # Coloring works with", "self.assertEqual(len(result['medians']), 1) self.assertEqual(len(result['whiskers']), 2) # mpl < 1.4.0 creates two", "distribution. with npt.assert_raises(ValueError): boxplots([[1, 'foo', 3]]) # Number of colors", "_get_distribution_markers('symbols', [], -1) def test_plot_bar_data(self): fig, ax = plt.subplots() result", "'blue', 0.33, 55, 1.5, 'stdv') self.assertEqual(result.__class__.__name__, \"dict\") self.assertEqual(len(result['boxes']), 1) self.assertEqual(len(result['medians']),", "print_function from unittest import TestCase, main import numpy as np", "test_grouped_distributions_scatter(self): fig = grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\",", "label\", \"y-axis label\", \"Test\") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(),", "result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'sem') self.assertTrue(result", "locs = _calc_data_point_locations(4) np.testing.assert_allclose(locs, [1, 2, 3, 4]) def test_calc_data_point_locations_custom_spacing(self):", "32, 6, 7, 8]]] # Test typical data to be", "sample (for bar/scatter plots). self.ValidSingleSampleData = [[[1, 2, 3, 4,", "\"x-axis label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.1125, 2.0125,", "valid data with three samples and four data points #", "[\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'g', 'y'],", "self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775, 4.075]) def test_grouped_distributions_error(self): with", "plt.subplots() with npt.assert_raises(ValueError): _set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis label\",", "self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(),", "1..4. locs = _calc_data_point_locations(4, [3, 4, 10, 12]) np.testing.assert_allclose(locs, np.array([1,", "self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=0) with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=-42) def test_boxplots(self): fig", "1, 2]], [[2, 2, 2, 2], [3, 9, 8], [2,", "1.33333333, 3.33333333, 4])) # Sorted order shouldn't affect scaling. locs", "\"y-axis label\", \"Test\") with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=0) with self.assertRaises(ValueError): grouped_distributions(*args,", "plt from skbio.draw import boxplots, grouped_distributions from skbio.draw._distributions import (", "data points # (for bar/scatter plots). self.ValidTypicalData = [[[1.0, 2,", "_plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values) class DistributionsTests(TestCase): def", "_plot_box_data(ax, [0, 0, 7, 8, -3, 44], 'blue', 0.33, 55,", "fig = boxplots([[], [], []], box_colors=['blue', 'red', 'yellow']) ax =", "colors should match what we specified self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0,", "self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) def", "in the file COPYING.txt, distributed with this software. # ----------------------------------------------------------------------------", "'foo') def test_grouped_distributions_bar(self): fig = grouped_distributions('bar', self.ValidTypicalData, [1, 4, 10,", "fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()),", "of the Modified BSD License. # # The full license", "_set_figure_size(fig) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_invalid(self): fig, ax = plt.subplots() _set_axes_options(ax,", "'r', 'c', 'm', 'y', 'w', 'b', 'g', 'r']) self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers,", "__future__ import absolute_import, division, print_function from unittest import TestCase, main", "nan for its y value self.assertFalse(np.isnan(lines[0].get_xydata()[0][1])) # All distributions are", "= fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(),", "4, 8, 13], [1, 1, 2]], [[2, 2, 2, 2],", "None) def test_validate_input_empty_nested(self): with npt.assert_raises(ValueError): _validate_input(self.EmptyNested, None, None, None) def", "_color_box_plot(ax, box_plot, [None, None, None]) def test_color_box_plot_invalid_input(self): # Invalid color.", "plt.subplots() _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'], 'symbols') self.assertEqual(len(ax.get_legend().get_texts()),", "distribution should *not* have nan for its y value self.assertFalse(np.isnan(lines[0].get_xydata()[0][1]))", "\"Teens\"], ['^'], \"x-axis label\", \"y-axis label\", \"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions, *args)", "5, 9, 11]), 1, 0.5, False) np.testing.assert_allclose(ticks, [1.25, 5.25, 9.25,", "label\") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) # second distribution", "number of colors. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData)", "9, 11]), 1, 0.5, False) np.testing.assert_allclose(ticks, [1.25, 5.25, 9.25, 11.25])", "3.75, 1.5, 'var') def test_plot_bar_data_empty(self): fig, ax = plt.subplots() result", "# Test empty data list. self.Empty = [] # Test", "self.assertEqual(ax.get_xlabel(), \"x-axis label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1,", "self.EmptyNested = [[]] # Test nested empty data list (for", "(4, 3)) def test_validate_x_values_invalid_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([1, 2, 3, 4],", "box_plot = plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['red', 'foobarbaz', 'blue'])", "3..12 to 1..4. locs = _calc_data_point_locations(4, [3, 4, 10, 12])", "len(result['fliers']) == 2) self.assertEqual(len(result['caps']), 2) def test_plot_box_data_empty(self): fig, ax =", "['b', 'g', 'y'], \"x-axis label\", \"y-axis label\", \"Test\") def test_grouped_distributions_negative_distribution_width(self):", "y_max=30) def test_set_axes_options_invalid_x_tick_labels_orientation(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _set_axes_options(ax,", "self.assertFalse(np.isnan(lines[0].get_xydata()[0][1])) # All distributions are empty. fig = boxplots([[], [],", "creates two Line2D instances, mpl 1.4.0 creates one, # though", "self.assertTrue(np.isnan(lines[0].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[16].get_xydata()[0][1])) def test_boxplots_box_colors(self): # Coloring works with all", "7, 4, 5]]] # Test valid data with one sample", "7, 8], [8, 9, 10, 11], [9.0, 4, 1, 1]],", "-3, 44], 'blue', 0.33, 55, 1.5, 'stdv') self.assertEqual(result.__class__.__name__, \"dict\") self.assertEqual(len(result['boxes']),", "4) np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775, 4.075]) def test_grouped_distributions_insufficient_symbols(self): args =", "\"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], [], \"x-axis label\", \"y-axis", "to 1..3. locs = _calc_data_point_locations(3, [0.001, 0.2543, 0.87]) np.testing.assert_allclose(locs, np.array([1,", "one, # though the resulting plot looks identical between the", "_set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\"]) self.assertEqual(ax.get_title(),", "ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical')", "11], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['^', '>',", "label\") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis label\") self.assertEqual(ax.get_ylabel(),", "[1.075, 1.975, 3.775, 4.075]) def test_grouped_distributions_error(self): with npt.assert_raises(ValueError): grouped_distributions('pie', self.ValidTypicalData,", "plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['blue', (1,", "in distribution. with npt.assert_raises(ValueError): boxplots([[1, 'foo', 3]]) # Number of", "label\", \"Test\") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis label\")", "fig, ax = plt.subplots() with npt.assert_raises(ValueError): _plot_bar_data(ax, [1, 2, 3],", "affect scaling. locs = _calc_data_point_locations(4, [4, 3, 12, 10]) np.testing.assert_allclose(locs,", "for patch in ax.patches: self.assertTrue(np.isnan(patch.xy[0][1])) fig = boxplots([[], [], []],", "boxplots(self.ValidTypicalBoxData, [1, 4, 10], [\"Data 1\", \"Data 2\", \"Data 3\"],", "= plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['blue', (1, 1, 0.9)])", "plt.subplots() result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75,", "'g', 'r', 'c']) def test_get_distribution_markers_insufficient_markers(self): self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'colors', None, 10),", "a warning from mpl if we don't clean up our", "'g', 'y'], \"x-axis label\", \"y-axis label\", \"Test\") ax = fig.get_axes()[0]", "fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0,", "7, 5, 6]] def tearDown(self): # We get a warning", "distribution # is empty, and thus hidden for patch in", "4, 5]], [[4, 5, 6, 7, 8]], [[4, 7, 10,", "mpl < 1.4.0 creates two Line2D instances, mpl 1.4.0 creates", "= grouped_distributions('bar', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\",", "# is empty, and thus hidden for patch in ax.patches:", "None, None) self.assertEqual(num_points, 1) self.assertEqual(num_samples, 1) def test_validate_input_empty_point(self): with npt.assert_raises(ValueError):", "3, 5, 6], [2, 3, 8]], [[4, 7, 8], [8,", "import absolute_import, division, print_function from unittest import TestCase, main import", "11, 1, 0, 3, -8], [2, 9, 7, 5, 6]]", "3.75, 1.5, 'sem') self.assertTrue(result is None) def test_plot_scatter_data(self): fig, ax", "10), ['b', 'g', 'r', 'c', 'm', 'y', 'w', 'b', 'g',", "0.5, 3.75, 1.5, 'stdv') self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5)", "the distribution # is empty, and thus hidden for patch", "with npt.assert_raises(ValueError): _validate_x_values([\"foo\", 2, 3], None, len(self.ValidSingleSampleData)) def test_validate_x_values_valid_x_values(self): _validate_x_values([1,", "self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775, 4.075])", "[1.075, 1.975, 3.775, 4.075]) def test_grouped_distributions_insufficient_symbols(self): args = ('scatter', self.ValidTypicalData,", "[1.1125, 2.0125, 3.8125, 4.1125]) def test_grouped_distributions_insufficient_colors(self): args = ('bar', self.ValidTypicalData,", "label\", x_tick_labels=[\"T0\", \"T1\"], x_tick_labels_orientation='brofist') def test_create_legend(self): fig, ax = plt.subplots()", "\"Children\", \"Teens\"], [], \"x-axis label\", \"y-axis label\", \"Test\") def test_grouped_distributions_box(self):", "boxplots([[], [], []], box_colors=['blue', 'red', 'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()),", "test_grouped_distributions_insufficient_colors(self): args = ('bar', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\",", "for its y value self.assertFalse(np.isnan(lines[0].get_xydata()[0][1])) # All distributions are empty.", "Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\"]) self.assertEqual(ax.get_title(), \"Plot Title\")", "x_tick_labels=[\"T0\", \"T1\", \"T2\"], y_min='car', y_max=30) def test_set_axes_options_invalid_x_tick_labels_orientation(self): fig, ax =", "'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') _set_figure_size(fig, 3, 4) self.assertTrue(np.array_equal(fig.get_size_inches(), (3,", "= plt.subplots() result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5,", "\"x-axis label\", \"y-axis label\", \"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_empty_marker_list(self):", "though the resulting plot looks identical between the two versions.", "with one sample (for bar/scatter plots). self.ValidSingleSampleData = [[[1, 2,", "boxplots in mpl < 1.4.0 have 8 lines per boxplot,", "0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0)) self.assertTrue(np.isnan(ax.patches[0].xy[0][1])) self.assertFalse(np.isnan(ax.patches[1].xy[0][1])) self.assertTrue(np.isnan(ax.patches[2].xy[0][1]))", "1, 0.9)]) # All colors are None. fig, ax =", "x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig, -1, 0) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def", "plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, [None, None, None]) def", "All distributions are empty. fig = boxplots([[], [], []], [1,", "'r'])) self.assertFalse(_is_single_matplotlib_color(['w'])) self.assertFalse(_is_single_matplotlib_color(('w',))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1), (0.9,", "20) def test_plot_scatter_data_empty(self): fig, ax = plt.subplots() result = _plot_scatter_data(ax,", "at index 8 should have a nan for its y", "== 1 or len(result['fliers']) == 2) self.assertEqual(len(result['caps']), 2) def test_plot_box_data_empty(self):", "= ('bar', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\",", "8], [2, 1, 6, 7, 4, 5]]] # Test valid", "label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\"]) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis", "= plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['blue',", "= _calc_data_point_ticks(np.array([1, 5, 9, 11]), 1, 0.5, False) np.testing.assert_allclose(ticks, [1.25,", "_validate_input, _validate_x_values) class DistributionsTests(TestCase): def setUp(self): # Test null data", "def test_validate_x_values_valid_x_values(self): _validate_x_values([1, 2.0, 3], None, 3) def test_get_distribution_markers_null_marker_list(self): self.assertEqual(_get_distribution_markers('colors',", "self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") def test_set_axes_options_ylim(self): fig, ax = plt.subplots() _set_axes_options(ax, \"Plot", "plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['red', 'foobarbaz', 'blue']) # Wrong", "6]], box_colors=['blue', 'red']) # Invalid legend. with npt.assert_raises(ValueError): boxplots([[1, 2,", "is empty, and thus hidden for patch in ax.patches: self.assertTrue(np.isnan(patch.xy[0][1]))", "3, 4, 5]], [[4, 5, 6, 7, 8], [2, 3,", "def test_validate_input_empty_point(self): with npt.assert_raises(ValueError): _validate_input([[[1, 2, 3], [4, 5]], []],", "locs = _calc_data_point_locations(4, [3, 4, 10, 12]) np.testing.assert_allclose(locs, np.array([1, 1.33333333,", "(for bar/scatter plots). self.ValidSingleSampleData = [[[1, 2, 3, 4, 5]],", "8], [2, 3, 2]], [[4, 7, 10, 33, 32, 6,", "None) def test_validate_input_invalid_num_samples(self): with npt.assert_raises(ValueError): _validate_input(self.InvalidNumSamples, None, None, None) def", "2) self.assertEqual(len(result['caps']), 2) def test_plot_box_data_empty(self): fig, ax = plt.subplots() result", "valid data with one sample (for bar/scatter plots). self.ValidSingleSampleData =", "def test_plot_box_data_empty(self): fig, ax = plt.subplots() result = _plot_box_data(ax, [],", "test_color_box_plot(self): fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot,", "'stdv') self.assertTrue(result is None) def test_calc_data_point_locations_invalid_x_values(self): with npt.assert_raises(ValueError): _calc_data_point_locations(3, [1,", "4, 3.33333333])) # Scaling up from 0.001..0.87 to 1..3. locs", "orig_fig_size = fig.get_size_inches() _set_figure_size(fig) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_invalid(self): fig, ax", "(empty) should have nans since it is hidden. # boxplots", "self.assertFalse(_is_single_matplotlib_color(['w'])) self.assertFalse(_is_single_matplotlib_color(('w',))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1), (0.9, 0.9))))", "box_colors='pink') ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) for patch in ax.patches:", "0.2543, 0.87]) np.testing.assert_allclose(locs, np.array([1, 1.58296893, 3])) def test_calc_data_point_ticks(self): ticks =", "'>', '<', '^', '>']) def test_get_distribution_markers_bad_marker_type(self): with npt.assert_raises(ValueError): _get_distribution_markers('shapes', [],", "1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) fig, ax = plt.subplots() result = _plot_bar_data(ax,", "npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2'], 'symbols') with npt.assert_raises(ValueError):", "\"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") def test_set_axes_options_ylim(self): fig, ax", "distribution (empty) should have nans since it is hidden. #", "self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) def test_plot_bar_data_bad_error_bar_type(self): fig,", "_get_distribution_markers, 'colors', None, 10), ['b', 'g', 'r', 'c', 'm', 'y',", "_set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooo', 'barbarbar'], x_tick_labels_orientation='vertical')", "'<'], 5), ['^', '>', '<', '^', '>']) def test_get_distribution_markers_bad_marker_type(self): with", "value lines = ax.get_lines() self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) # line in first distribution", "# All colors are None. fig, ax = plt.subplots() box_plot", "1, 1, 1])) self.assertTrue(_is_single_matplotlib_color((1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1, 1, 1, 1)))", "self.assertTrue(result is None) def test_plot_box_data(self): fig, ax = plt.subplots() result", "= boxplots([[], [], []], box_colors='pink') ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3)", "self.assertEqual(len(ax.get_legend().get_texts()), 3) def test_create_legend_invalid_input(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError):", "specified self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0,", "_validate_input(self.InvalidNumSamples, None, None, None) def test_validate_input_invalid_data_point_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None,", "10, 11], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], [],", "def test_plot_bar_data(self): fig, ax = plt.subplots() result = _plot_bar_data(ax, [1,", "_color_box_plot(ax, box_plot, ['blue', None, (1, 1, 0.9)]) # All colors", "[], -1) def test_plot_bar_data(self): fig, ax = plt.subplots() result =", "6]], [1, 4, 10], [\"Data 1\", \"Data 2\", \"Data 3\"],", "\"T1\") def test_set_axes_options_ylim(self): fig, ax = plt.subplots() _set_axes_options(ax, \"Plot Title\",", "8 lines per boxplot, while mpl 1.4.0 has # 7.", "3, 0.5, False) np.testing.assert_allclose(ticks, [0.75]) def test_set_axes_options(self): fig, ax =", "self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) fig, ax =", "# All distributions are empty. fig = boxplots([[], [], []],", "1))) self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0, 1.0))) self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0))) self.assertTrue(_is_single_matplotlib_color((2.0, 1,", "= grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\",", "should match what we specified self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0))", "npt.assert_raises(ValueError): _validate_x_values(None, [\"T0\"], len(self.ValidSingleSampleData)) def test_validate_x_values_nonnumber_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([\"foo\", 2,", "data list. self.EmptyNested = [[]] # Test nested empty data", "0, 7, 8, -3, 44], 'blue', 0.33, 55, 1.5, 'stdv')", "1, 1]], [[4, 33, 32, 6, 8], [5, 4, 8,", "self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0))", "1])) self.assertTrue(_is_single_matplotlib_color((1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1, 1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1.0, 1.0,", "\"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\", \"T2\"], y_min=0, y_max=1) self.assertEqual(ax.get_title(),", "DistributionsTests(TestCase): def setUp(self): # Test null data list. self.Null =", "\"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['^', '>', '<'], \"x-axis label\",", "fig = boxplots([[1, 2, 3], [], [4, 5, 6]], [1,", "\"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\", \"T2\"], y_min='car', y_max=30) def", "'x_foo', 'y_foo', x_tick_labels=['foofoofooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooo', 'barbarbar'], x_tick_labels_orientation='vertical') npt.assert_warns(RuntimeWarning, _set_figure_size,", "0.5, 3.75, 1.5, 'stdv') self.assertTrue(result is None) fig, ax =", "_validate_x_values([1, 2, 3, 4], [\"T0\", \"T1\", \"T2\"], len(self.ValidSingleSampleData)) def test_validate_x_values_invalid_x_tick_labels(self):", "bar/scatter plots). self.ValidTypicalData = [[[1.0, 2, 3.5, 5], [2, 3,", "'oooo', 'barbarbar'], x_tick_labels_orientation='vertical') npt.assert_warns(RuntimeWarning, _set_figure_size, fig, 3, 3) npt.assert_array_equal(fig.get_size_inches(), (3,", "[0.001, 0.2543, 0.87]) np.testing.assert_allclose(locs, np.array([1, 1.58296893, 3])) def test_calc_data_point_ticks(self): ticks", "3], [], [4, 5, 6]], box_colors=['blue', 'red']) # Invalid legend.", "test_validate_x_values_invalid_x_tick_labels(self): with npt.assert_raises(ValueError): _validate_x_values(None, [\"T0\"], len(self.ValidSingleSampleData)) def test_validate_x_values_nonnumber_x_values(self): with npt.assert_raises(ValueError):", "('foo', 'bar'))) ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis label\")", "= _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'sem') self.assertTrue(result is", "Test typical data to be plotted by the boxplot function.", "self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0, 1.0))) self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0))) self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0)))", "_set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis label\", x_values=[42, 45, 800])", "label\", \"Test\") with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=0) with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=-42)", "_plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'stdv') self.assertTrue(result is None)", "[4, 5]], []], None, None, None) def test_validate_input_invalid_num_samples(self): with npt.assert_raises(ValueError):", "legend. with npt.assert_raises(ValueError): boxplots([[1, 2, 3]], legend=('foo', 'bar', 'baz')) def", "4) self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4))) def test_set_figure_size_defaults(self): fig, ax = plt.subplots()", "2.0) def test_plot_bar_data_bad_error_bar_type(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _plot_bar_data(ax,", "of samples in data list (for bar/scatter plots). self.InvalidNumSamples =", "= plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooo',", "0.5, 3.75, 1.5, 'sem') self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5)", "plt.subplots() result = _plot_scatter_data(ax, [1, 2, 3], '^', 0.77, 1,", "'oooooooooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooo', 'barbarbar'], x_tick_labels_orientation='vertical') npt.assert_warns(RuntimeWarning, _set_figure_size, fig, 3, 3)", "def test_calc_data_point_ticks(self): ticks = _calc_data_point_ticks(np.array([1, 5, 9, 11]), 1, 0.5,", "\"x-axis label\", \"y-axis label\", \"Test\") with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=0) with", "self.assertEqual(num_points, 1) self.assertEqual(num_samples, 1) def test_validate_input_empty_point(self): with npt.assert_raises(ValueError): _validate_input([[[1, 2,", "with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, None, [\"Men\", \"Women\"]) def test_validate_input_all_valid_input(self): self.assertEqual(_validate_input(self.ValidTypicalData,", "ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) for patch in ax.patches: npt.assert_almost_equal(", "[\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], [], \"x-axis label\",", "three samples and four data points # (for bar/scatter plots).", "5, 6], [2, 3, 8]], [[4, 7, 8], [8, 9,", "and four data points # (for bar/scatter plots). self.ValidTypicalData =", "include at least one nan since the distribution # is", "0, 3, -8], [2, 9, 7, 5, 6]] def tearDown(self):", "locs = _calc_data_point_locations(3, [0.001, 0.2543, 0.87]) np.testing.assert_allclose(locs, np.array([1, 1.58296893, 3]))", "label\", \"Test\") def test_grouped_distributions_box(self): fig = grouped_distributions('box', self.ValidTypicalData, [1, 4,", "plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', None, (1, 1, 0.9)]) # All", "[\"Infants\", \"Children\", \"Teens\"]), (4, 3)) def test_validate_x_values_invalid_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([1,", "0.5, 3.75, 1.5, 'var') def test_plot_bar_data_empty(self): fig, ax = plt.subplots()", "[1, 2, 3.5], []], box_colors=['blue', 'red', 'yellow']) ax = fig.get_axes()[0]", "'bar'))) ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis label\") self.assertEqual(ax.get_ylabel(),", "clean up our figures. plt.close('all') def test_validate_input_null(self): with npt.assert_raises(ValueError): _validate_input(self.Null,", "2], [3, 9, 8], [2, 1, 6, 7, 4, 5]]]", "1, 0.9)]) def test_is_single_matplotlib_color(self): self.assertTrue(_is_single_matplotlib_color('w')) self.assertTrue(_is_single_matplotlib_color('white')) self.assertTrue(_is_single_matplotlib_color([1, 1, 1])) self.assertTrue(_is_single_matplotlib_color([1,", "\"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'g', 'y'], \"x-axis label\", \"y-axis", "fig, ax = plt.subplots() _set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis", "def test_boxplots_box_colors(self): # Coloring works with all empty distributions. fig", "5]], [[4, 5, 6, 7, 8], [2, 3, 2]], [[4,", "\"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['^'], \"x-axis label\", \"y-axis", "_plot_scatter_data(ax, [], '^', 0.77, 1, 1.5, 'stdv') self.assertTrue(result is None)", "3, 8]], [[4, 7, 8], [8, 9, 10, 11], [9.0,", "( _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data,", "4, 5, 88, 9, 10, 11, 1, 0, 3, -8],", "is None) def test_calc_data_point_locations_invalid_x_values(self): with npt.assert_raises(ValueError): _calc_data_point_locations(3, [1, 10.5]) def", "with npt.assert_raises(ValueError): _get_distribution_markers('symbols', [], -1) def test_plot_bar_data(self): fig, ax =", "2) def test_plot_box_data_empty(self): fig, ax = plt.subplots() result = _plot_box_data(ax,", "Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\", \"T2\"], y_min=0, y_max=1)", "mpl < 1.4.0 have 8 lines per boxplot, while mpl", "self.assertEqual(_get_distribution_markers('colors', None, 4), ['b', 'g', 'r', 'c']) def test_get_distribution_markers_insufficient_markers(self): self.assertEqual(npt.assert_warns(RuntimeWarning,", "5]]] # Test valid data with one sample (for bar/scatter", "[2, 1, 6, 7, 4, 5]]] # Test valid data", "\"Teens\"]), (4, 3)) def test_validate_x_values_invalid_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([1, 2, 3,", "2, 3, 4]) def test_calc_data_point_locations_custom_spacing(self): # Scaling down from 3..12", "None, None, [\"Men\", \"Women\"]) def test_validate_input_all_valid_input(self): self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3, 4,", "test_set_figure_size(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo',", "as npt import matplotlib.pyplot as plt from skbio.draw import boxplots,", "6, 7, 4, 5]]] # Test valid data with one", "mpl 1.4.0 creates one, # though the resulting plot looks", "10, 11], [9.0, 4, 1, 1]], [[4, 33, 32, 6,", "[]) def test_get_distribution_markers_negative_num_markers(self): with npt.assert_raises(ValueError): _get_distribution_markers('symbols', [], -1) def test_plot_bar_data(self):", "(0.0, 0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(),", "1, 1.0))) self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0))) self.assertFalse(_is_single_matplotlib_color(['w', 'r'])) self.assertFalse(_is_single_matplotlib_color(['w'])) self.assertFalse(_is_single_matplotlib_color(('w',))) self.assertFalse(_is_single_matplotlib_color(((1.0,", "np.testing.assert_allclose(locs, np.array([1, 1.33333333, 3.33333333, 4])) # Sorted order shouldn't affect", "label\") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775, 4.075]) def test_grouped_distributions_insufficient_symbols(self):", "colors. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError):", "test_set_figure_size_long_labels(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofooooooooooooooooooooooooo'", "11.25]) ticks = _calc_data_point_ticks(np.array([0]), 3, 0.5, False) np.testing.assert_allclose(ticks, [0.75]) def", "1\", \"Data 2\", \"Data 3\"], \"Test\", \"x-axis label\", \"y-axis label\",", "should have nans since it is hidden. # boxplots in", "2, 3.5, 5], [2, 3, 5, 6], [2, 3, 8]],", "None) def test_validate_input_invalid_sample_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, None, [\"Men\", \"Women\"])", "= _plot_box_data(ax, [0, 0, 7, 8, -3, 44], 'blue', 0.33,", "full license is in the file COPYING.txt, distributed with this", "None, [\"Men\", \"Women\"]) def test_validate_input_all_valid_input(self): self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3, 4, 8],", "x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') _set_figure_size(fig, 3, 4) self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4))) def", "def test_set_figure_size_invalid(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo',", "7. in either case, the line at index 8 should", "\"Teens\"], ['^', '>', '<'], \"x-axis label\", \"y-axis label\", \"Test\") ax", "result = _plot_box_data(ax, [0, 0, 7, 8, -3, 44], 'blue',", "x_tick_labels_orientation='brofist') def test_create_legend(self): fig, ax = plt.subplots() _create_legend(ax, ['b', 'r'],", "2, 99.99], [2.3, 4, 5, 88, 9, 10, 11, 1,", "fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo'", "[1, 4, 10])) lines = ax.get_lines() self.assertTrue(np.isnan(lines[0].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[16].get_xydata()[0][1])) def", "= plt.subplots() result = _plot_box_data(ax, [0, 0, 7, 8, -3,", "class DistributionsTests(TestCase): def setUp(self): # Test null data list. self.Null", "\"Teens\"], ['b', 'g', 'y'], \"x-axis label\", \"y-axis label\", \"Test\") ax", "np.testing.assert_allclose(locs, [1, 2, 3, 4]) def test_calc_data_point_locations_custom_spacing(self): # Scaling down", "_color_box_plot(ax, box_plot, ['blue', 'w', (1, 1, 0.9)]) # Some colors", "2, 2, 2], [3, 9, 8], [2, 1, 6, 7,", "legend=(('blue', 'red'), ('foo', 'bar'))) ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(),", "Test valid data with one sample (for bar/scatter plots). self.ValidSingleSampleData", "10, 11.67, 12.0, 2, 2, 99.99], [2.3, 4, 5, 88,", "\"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") self.assertEqual(ax.get_ylim(), (0.0, 1.0)) def", "def test_color_box_plot_invalid_input(self): # Invalid color. fig, ax = plt.subplots() box_plot", "3, 12, 10]) np.testing.assert_allclose(locs, np.array([1.33333333, 1, 4, 3.33333333])) # Scaling", "one sample (for bar/scatter plots). self.ValidSingleSampleData = [[[1, 2, 3,", "10, 11, 1, 0, 3, -8], [2, 9, 7, 5,", "from 0.001..0.87 to 1..3. locs = _calc_data_point_locations(3, [0.001, 0.2543, 0.87])", "1, 1))) self.assertTrue(_is_single_matplotlib_color((1, 1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0, 1.0)))", "\"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['^', '>', '<'], \"x-axis", "ax.patches: npt.assert_almost_equal( patch.get_facecolor(), (1.0, 0.7529411764705882, 0.796078431372549, 1.0)) self.assertTrue(np.isnan(patch.xy[0][1])) # Coloring", "npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, [\"T0\", \"T1\"], None) def test_validate_input_invalid_sample_names(self): with npt.assert_raises(ValueError):", "box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', 'w', (1, 1, 0.9)])", "[\"Data 1\", \"Data 2\", \"Data 3\"], \"Test\", \"x-axis label\", \"y-axis", "Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\"], x_tick_labels_orientation='brofist') def test_create_legend(self):", "1.5, 'stdv') self.assertEqual(result.__class__.__name__, \"dict\") self.assertEqual(len(result['boxes']), 1) self.assertEqual(len(result['medians']), 1) self.assertEqual(len(result['whiskers']), 2)", "'red', 0.5, 3.75, 1.5, 'stdv') self.assertEqual(result[0].__class__.__name__, \"Rectangle\") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(),", "def test_validate_input_empty(self): with npt.assert_raises(ValueError): _validate_input(self.Empty, None, None, None) def test_validate_input_empty_nested(self):", "self.Null = None # Test empty data list. self.Empty =", "*args) def test_grouped_distributions_empty_marker_list(self): grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\",", "0.5, 3.75, 1.5, 'sem') self.assertTrue(result is None) def test_plot_scatter_data(self): fig,", "boxplots([[1, 'foo', 3]]) # Number of colors doesn't match number", "\"T2\"], y_min='car', y_max=30) def test_set_axes_options_invalid_x_tick_labels_orientation(self): fig, ax = plt.subplots() with", "self.ValidSingleSampleData = [[[1, 2, 3, 4, 5]], [[4, 5, 6,", "bar/scatter plots). self.ValidSingleSampleData = [[[1, 2, 3, 4, 5]], [[4,", "ax = plt.subplots() _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'],", "grouped_distributions(*args, distribution_width=-42) def test_boxplots(self): fig = boxplots(self.ValidTypicalBoxData, [1, 4, 10],", "npt.assert_raises(ValueError): _validate_input(self.Empty, None, None, None) def test_validate_input_empty_nested(self): with npt.assert_raises(ValueError): _validate_input(self.EmptyNested,", "some empty distributions. fig = boxplots([[], [1, 2, 3.5], []],", "two versions. # see: # https://github.com/pydata/pandas/issues/8382#issuecomment-56840974 # https://github.com/matplotlib/matplotlib/issues/3544 self.assertTrue(len(result['fliers']) ==", "1]], [[4, 33, 32, 6, 8], [5, 4, 8, 13],", "label\") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775, 4.075]) def test_grouped_distributions_error(self):", "1.4.0 has # 7. in either case, the line at", "test_plot_box_data_empty(self): fig, ax = plt.subplots() result = _plot_box_data(ax, [], 'blue',", "label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") self.assertEqual(ax.get_ylim(), (0.0, 1.0)) def test_set_axes_options_x_values_as_tick_labels(self):", "first distribution should *not* have nan for its y value", "Sorted order shouldn't affect scaling. locs = _calc_data_point_locations(4, [4, 3,", "_validate_x_values(None, [\"T0\"], len(self.ValidSingleSampleData)) def test_validate_x_values_nonnumber_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([\"foo\", 2, 3],", "fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) for patch in ax.patches: npt.assert_almost_equal( patch.get_facecolor(), (1.0,", "3.5, 5], [2, 3, 5, 6], [2, 3, 8]], [[4,", "get a warning from mpl if we don't clean up", "0), []) self.assertEqual(_get_distribution_markers('symbols', ['^'], 0), []) def test_get_distribution_markers_negative_num_markers(self): with npt.assert_raises(ValueError):", "nan since the distribution # is empty, and thus hidden", "['^'], \"x-axis label\", \"y-axis label\", \"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def", "boxplots([[1, 2, 3], [], [4, 5, 6]], [1, 4, 10],", "plotted by the boxplot function. self.ValidTypicalBoxData = [[3.4, 10, 11.67,", "['b', 'g', 'r', 'c']) def test_get_distribution_markers_insufficient_markers(self): self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'colors', None,", "\"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") self.assertEqual(ax.get_ylim(), (0.0, 1.0)) def test_set_axes_options_x_values_as_tick_labels(self): fig, ax", "self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) def test_boxplots_empty_distributions(self): fig =", "self.assertTrue(_is_single_matplotlib_color([1, 1, 1, 1])) self.assertTrue(_is_single_matplotlib_color((1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1, 1, 1,", "grouped_distributions('box', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\", \"T3\"],", "The full license is in the file COPYING.txt, distributed with", "self.assertTrue(np.isnan(lines[16].get_xydata()[0][1])) def test_boxplots_box_colors(self): # Coloring works with all empty distributions.", "4, 5]], [[4, 5, 6, 7, 8], [2, 3, 2]],", "[[4, 33, 32, 6, 8], [5, 4, 8, 13], [1,", "1.5, 'sem') self.assertTrue(result is None) def test_plot_scatter_data(self): fig, ax =", "[[2, 2, 2, 2], [3, 9, 8], [2, 1, 6,", "12, 10]) np.testing.assert_allclose(locs, np.array([1.33333333, 1, 4, 3.33333333])) # Scaling up", "8]]] # Test typical data to be plotted by the", "Some colors are None. fig, ax = plt.subplots() box_plot =", "value self.assertFalse(np.isnan(lines[0].get_xydata()[0][1])) # All distributions are empty. fig = boxplots([[],", "\"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'r', 'g'], \"x-axis label\", \"y-axis", "0) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_long_labels(self): fig, ax = plt.subplots() _set_axes_options(ax,", "6], [2, 3, 8]], [[4, 7, 8], [8, 9, 10,", "[], 'red', 0.5, 3.75, 1.5, 'stdv') self.assertTrue(result is None) fig,", "['dist1', 'dist2', 'dist3'], 'symbols') self.assertEqual(len(ax.get_legend().get_texts()), 3) def test_create_legend_invalid_input(self): fig, ax", "patch in ax.patches: self.assertTrue(np.isnan(patch.xy[0][1])) fig = boxplots([[], [], []], box_colors='pink')", "def test_plot_scatter_data(self): fig, ax = plt.subplots() result = _plot_scatter_data(ax, [1,", "\"Plot Title\", \"x-axis label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\"]) self.assertEqual(ax.get_title(), \"Plot", "1) self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0)", "2, 3], [], [4, 5, 6]], [1, 4, 10], [\"Data", "_create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values)", "None) fig, ax = plt.subplots() result = _plot_bar_data(ax, [], 'red',", "None) def test_validate_input_empty(self): with npt.assert_raises(ValueError): _validate_input(self.Empty, None, None, None) def", "3.75, 1.5, 'stdv') self.assertTrue(result is None) fig, ax = plt.subplots()", "label\", \"y-axis label\", legend=(('blue', 'red'), ('foo', 'bar'))) ax = fig.get_axes()[0]", "'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') _set_figure_size(fig, 3, 4) self.assertTrue(np.array_equal(fig.get_size_inches(),", "3]], legend=('foo', 'bar', 'baz')) def test_color_box_plot(self): fig, ax = plt.subplots()", "license is in the file COPYING.txt, distributed with this software.", "npt.assert_raises(ValueError): _validate_input([[[1, 2, 3], [4, 5]], []], None, None, None)", "= _calc_data_point_ticks(np.array([0]), 3, 0.5, False) np.testing.assert_allclose(ticks, [0.75]) def test_set_axes_options(self): fig,", "grouped_distributions, *args) def test_grouped_distributions_scatter(self): fig = grouped_distributions('scatter', self.ValidTypicalData, [1, 4,", "\"Test\") with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=0) with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=-42) def", "patch in ax.patches: npt.assert_almost_equal( patch.get_facecolor(), (1.0, 0.7529411764705882, 0.796078431372549, 1.0)) self.assertTrue(np.isnan(patch.xy[0][1]))", "def test_grouped_distributions_empty_marker_list(self): grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\",", "plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooo', 'barbarbar'],", "[], []], box_colors='pink') ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) for patch", "self.assertEqual(len(ax.get_xticklabels()), 3) for patch in ax.patches: npt.assert_almost_equal( patch.get_facecolor(), (1.0, 0.7529411764705882,", "3.5], []], box_colors=['blue', 'red', 'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3)", "Coloring works with some empty distributions. fig = boxplots([[], [1,", "_set_axes_options, _set_figure_size, _validate_input, _validate_x_values) class DistributionsTests(TestCase): def setUp(self): # Test", "2) # mpl < 1.4.0 creates two Line2D instances, mpl", "= [[[1.0, 2, 3.5, 5], [2, 3, 5, 6], [2,", "test_set_axes_options_invalid_x_tick_labels_orientation(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _set_axes_options(ax, \"Plot Title\",", "at least one nan since the distribution # is empty,", "None, 3) def test_get_distribution_markers_null_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 5), ['b', 'g', 'r',", "is None) fig, ax = plt.subplots() result = _plot_bar_data(ax, [],", "def test_grouped_distributions_bar(self): fig = grouped_distributions('bar', self.ValidTypicalData, [1, 4, 10, 11],", "self.assertEqual(len(ax.get_xticklabels()), 3) self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0,", "11], [9.0, 4, 1, 1]], [[4, 33, 32, 6, 8],", "self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\") self.assertEqual(ax.get_ylim(), (0.0, 1.0)) def test_set_axes_options_x_values_as_tick_labels(self): fig, ax =", "absolute_import, division, print_function from unittest import TestCase, main import numpy", "def test_calc_data_point_locations_invalid_x_values(self): with npt.assert_raises(ValueError): _calc_data_point_locations(3, [1, 10.5]) def test_calc_data_point_locations_default_spacing(self): locs", "= fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\")", "boxplots([[], [], []], box_colors='pink') ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) for", "def test_grouped_distributions_negative_distribution_width(self): args = ('box', self.ValidTypicalData, [1, 4, 10, 11],", "npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['red', 'foobarbaz', 'blue']) # Wrong number of", "self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=-42) def test_boxplots(self): fig = boxplots(self.ValidTypicalBoxData, [1, 4,", "= grouped_distributions('box', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\",", "self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis label\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 3)", "figures. plt.close('all') def test_validate_input_null(self): with npt.assert_raises(ValueError): _validate_input(self.Null, None, None, None)", "1, 6, 7, 4, 5]]] # Test valid data with", "9, 8], [2, 1, 6, 7, 4, 5]]] # Test", "1..3. locs = _calc_data_point_locations(3, [0.001, 0.2543, 0.87]) np.testing.assert_allclose(locs, np.array([1, 1.58296893,", "_get_distribution_markers('shapes', [], 3) def test_get_distribution_markers_zero_markers(self): self.assertEqual(_get_distribution_markers('symbols', None, 0), []) self.assertEqual(_get_distribution_markers('symbols',", "3, 4]) def test_calc_data_point_locations_custom_spacing(self): # Scaling down from 3..12 to", "[2, 3, 8]], [[4, 7, 8], [8, 9, 10, 11],", "self.assertEqual(len(result['caps']), 2) def test_plot_box_data_empty(self): fig, ax = plt.subplots() result =", "Copyright (c) 2013--, scikit-bio development team. # # Distributed under", "label\", \"y-axis label\") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), \"Test\") self.assertEqual(ax.get_xlabel(), \"x-axis", "4, 10, 11], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"],", "[\"Infants\", \"Children\", \"Teens\"], ['b', 'g', 'y'], \"x-axis label\", \"y-axis label\",", "'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) # patch colors should", "= plt.subplots() _create_legend(ax, ['b', 'r'], ['dist1', 'dist2'], 'colors') self.assertEqual(len(ax.get_legend().get_texts()), 2)", "np.array([1, 1.58296893, 3])) def test_calc_data_point_ticks(self): ticks = _calc_data_point_ticks(np.array([1, 5, 9,", "\"Children\", \"Teens\"], ['b', 'g', 'y'], \"x-axis label\", \"y-axis label\", \"Test\")", "invalid number of samples in data list (for bar/scatter plots).", "1.58296893, 3])) def test_calc_data_point_ticks(self): ticks = _calc_data_point_ticks(np.array([1, 5, 9, 11]),", "_validate_input([[[1, 2, 3], [4, 5]], []], None, None, None) def", "self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) lines = ax.get_lines() self.assertTrue(np.isnan(lines[0].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[16].get_xydata()[0][1]))", "result = _plot_scatter_data(ax, [1, 2, 3], '^', 0.77, 1, 1.5,", "_create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'], 'foo') def test_grouped_distributions_bar(self):", "are None. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax,", "from skbio.draw._distributions import ( _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color,", "with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division,", "Number of colors doesn't match number of distributions. with npt.assert_raises(ValueError):", "# https://github.com/pydata/pandas/issues/8382#issuecomment-56840974 # https://github.com/matplotlib/matplotlib/issues/3544 self.assertTrue(len(result['fliers']) == 1 or len(result['fliers']) ==", "since it is hidden. # boxplots in mpl < 1.4.0", "1.4.0 creates one, # though the resulting plot looks identical", "file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__", "self.assertEqual(len(result['whiskers']), 2) # mpl < 1.4.0 creates two Line2D instances,", "'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig)", "None, None, None) def test_validate_input_invalid_num_samples(self): with npt.assert_raises(ValueError): _validate_input(self.InvalidNumSamples, None, None,", "self.assertTrue(np.isnan(ax.patches[2].xy[0][1])) def test_boxplots_invalid_input(self): # Non-numeric entries in distribution. with npt.assert_raises(ValueError):", "ax = plt.subplots() result = _plot_scatter_data(ax, [], '^', 0.77, 1,", "= _plot_scatter_data(ax, [], '^', 0.77, 1, 1.5, 'stdv') self.assertTrue(result is", "= ax.get_lines() self.assertTrue(np.isnan(lines[0].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[16].get_xydata()[0][1])) def test_boxplots_box_colors(self): # Coloring works", "0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0,", "lines = ax.get_lines() self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) # line in first distribution should", "label\") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) def test_boxplots_empty_distributions(self): fig", "self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'colors', None, 10), ['b', 'g', 'r', 'c', 'm',", "_set_figure_size(fig, 3, 4) self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4))) def test_set_figure_size_defaults(self): fig, ax", "of distributions. with npt.assert_raises(ValueError): boxplots([[1, 2, 3], [], [4, 5,", "None, len(self.ValidSingleSampleData)) def test_validate_x_values_valid_x_values(self): _validate_x_values([1, 2.0, 3], None, 3) def", "None # Test empty data list. self.Empty = [] #", "args = ('bar', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\",", "(1.0, 0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) fig, ax = plt.subplots()", "box_plot = plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['blue', (1, 1,", "11], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'r'],", "def setUp(self): # Test null data list. self.Null = None", "'dist2', 'dist3'], 'foo') def test_grouped_distributions_bar(self): fig = grouped_distributions('bar', self.ValidTypicalData, [1,", "= fig.get_size_inches() _set_figure_size(fig, -1, 0) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_long_labels(self): fig,", "3.33333333, 4])) # Sorted order shouldn't affect scaling. locs =", "bar/scatter plots). self.InvalidNumSamples = [[[1, 2, 3, 4, 5]], [[4,", "self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0)) self.assertTrue(np.isnan(ax.patches[0].xy[0][1])) self.assertFalse(np.isnan(ax.patches[1].xy[0][1])) self.assertTrue(np.isnan(ax.patches[2].xy[0][1])) def test_boxplots_invalid_input(self):", "10, 33, 32, 6, 7, 8]]] # Test typical data", "def test_color_box_plot(self): fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax,", "1.0, 1.0))) self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0))) self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0))) self.assertFalse(_is_single_matplotlib_color(['w', 'r']))", "plt.subplots() result = _plot_box_data(ax, [], 'blue', 0.33, 55, 1.5, 'stdv')", "label\", \"y-axis label\", x_tick_labels=[\"T0\", \"T1\", \"T2\"], y_min='car', y_max=30) def test_set_axes_options_invalid_x_tick_labels_orientation(self):", "index 8 should have a nan for its y #", "plt.subplots() _set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis label\", x_values=[42, 45,", "2, 3, 4, 5]], [[4, 5, 6, 7, 8], [2,", "len(self.ValidSingleSampleData)) def test_validate_x_values_valid_x_values(self): _validate_x_values([1, 2.0, 3], None, 3) def test_get_distribution_markers_null_marker_list(self):", "ax = plt.subplots() _set_axes_options(ax, \"Plot Title\", \"x-axis label\", \"y-axis label\",", "[]], box_colors=['blue', 'red', 'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) self.assertEqual(ax.patches[0].get_facecolor(),", "_plot_scatter_data(ax, [1, 2, 3], '^', 0.77, 1, 1.5, 'stdv') self.assertEqual(result.get_sizes(),", "self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\") self.assertEqual(ax.get_xticklabels()[1].get_text(), \"T1\")", "\"Children\", \"Teens\"], ['^'], \"x-axis label\", \"y-axis label\", \"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions,", "nans since it is hidden. # boxplots in mpl <", "\"Test\") def test_grouped_distributions_negative_distribution_width(self): args = ('box', self.ValidTypicalData, [1, 4, 10,", "1.0, 1),))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1), (0.9, 0.9)))) def test_set_figure_size(self): fig,", "[1, 4, 10])) def test_boxplots_empty_distributions(self): fig = boxplots([[1, 2, 3],", "def test_get_distribution_markers_insufficient_markers(self): self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'colors', None, 10), ['b', 'g', 'r',", "1.5, 'stdv') self.assertEqual(result.get_sizes(), 20) def test_plot_scatter_data_empty(self): fig, ax = plt.subplots()", "False) np.testing.assert_allclose(ticks, [0.75]) def test_set_axes_options(self): fig, ax = plt.subplots() _set_axes_options(ax,", "label\", x_values=[42, 45, 800]) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\")", "# patch colors should match what we specified self.assertEqual(ax.patches[0].get_facecolor(), (0.0,", "(1, 1, 0.9)]) # Some colors are None. fig, ax", "= None # Test empty data list. self.Empty = []", "None) def test_plot_scatter_data(self): fig, ax = plt.subplots() result = _plot_scatter_data(ax,", "len(self.ValidSingleSampleData)) def test_validate_x_values_invalid_x_tick_labels(self): with npt.assert_raises(ValueError): _validate_x_values(None, [\"T0\"], len(self.ValidSingleSampleData)) def test_validate_x_values_nonnumber_x_values(self):", "['^', '>', '<'], \"x-axis label\", \"y-axis label\", \"Test\") ax =", "ax = plt.subplots() with npt.assert_raises(ValueError): _set_axes_options(ax, \"Plot Title\", \"x-axis label\",", "x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_invalid(self): fig,", "# Test nested empty data list. self.EmptyNested = [[]] #", "10])) def test_boxplots_empty_distributions(self): fig = boxplots([[1, 2, 3], [], [4,", "def test_boxplots_invalid_input(self): # Non-numeric entries in distribution. with npt.assert_raises(ValueError): boxplots([[1,", "setUp(self): # Test null data list. self.Null = None #", "self.assertFalse(np.isnan(ax.patches[1].xy[0][1])) self.assertTrue(np.isnan(ax.patches[2].xy[0][1])) def test_boxplots_invalid_input(self): # Non-numeric entries in distribution. with", "3, 4, 5]], [[4, 5, 6, 7, 8]], [[4, 7,", "1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) def test_plot_bar_data_bad_error_bar_type(self): fig, ax = plt.subplots() with", "[1, 3, 4, 8], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\",", "\"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_empty_marker_list(self): grouped_distributions('scatter', self.ValidTypicalData, [1, 4,", "Modified BSD License. # # The full license is in", "x_tick_labels=[\"T0\", \"T1\"]) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), \"T0\")", "np import numpy.testing as npt import matplotlib.pyplot as plt from", "# Scaling down from 3..12 to 1..4. locs = _calc_data_point_locations(4,", "3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) lines = ax.get_lines() self.assertTrue(np.isnan(lines[0].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[8].get_xydata()[0][1]))", "division, print_function from unittest import TestCase, main import numpy as", "self.assertEqual(len(ax.get_legend().get_texts()), 2) fig, ax = plt.subplots() _create_legend(ax, ['^', '<', '>'],", "matplotlib.pyplot as plt from skbio.draw import boxplots, grouped_distributions from skbio.draw._distributions", "def test_validate_input_all_valid_input(self): self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3, 4, 8], [\"T0\", \"T1\", \"T2\",", "2) fig, ax = plt.subplots() _create_legend(ax, ['^', '<', '>'], ['dist1',", "= plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', 'w', (1,", "\"Data 3\"], \"Test\", \"x-axis label\", \"y-axis label\", legend=(('blue', 'red'), ('foo',", "test_validate_input_empty_deeply_nested(self): num_points, num_samples = _validate_input(self.EmptyDeeplyNested, None, None, None) self.assertEqual(num_points, 1)", "self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) lines = ax.get_lines() self.assertTrue(np.isnan(lines[0].get_xydata()[0][1]))", "False) np.testing.assert_allclose(ticks, [1.25, 5.25, 9.25, 11.25]) ticks = _calc_data_point_ticks(np.array([0]), 3,", "\"y-axis label\", \"Test\") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_scatter(self): fig =", "['red', 'foobarbaz', 'blue']) # Wrong number of colors. fig, ax", "self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) #", "empty distributions. fig = boxplots([[], [], []], box_colors=['blue', 'red', 'yellow'])", "label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), '42') self.assertEqual(ax.get_xticklabels()[1].get_text(), '45') self.assertEqual(ax.get_xticklabels()[2].get_text(), '800') def test_set_axes_options_bad_ylim(self): fig,", "5]], []], None, None, None) def test_validate_input_invalid_num_samples(self): with npt.assert_raises(ValueError): _validate_input(self.InvalidNumSamples,", "4, 1, 1]], [[4, 33, 32, 6, 8], [5, 4,", "0.0, 1.0)) # patch location should include at least one", "'sem') self.assertTrue(result is None) def test_plot_scatter_data(self): fig, ax = plt.subplots()", "with npt.assert_raises(ValueError): grouped_distributions('pie', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\",", "distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import,", "'dist3'], 'symbols') self.assertEqual(len(ax.get_legend().get_texts()), 3) def test_create_legend_invalid_input(self): fig, ax = plt.subplots()", "ax = plt.subplots() result = _plot_box_data(ax, [], 'blue', 0.33, 55,", "num_points, num_samples = _validate_input(self.EmptyDeeplyNested, None, None, None) self.assertEqual(num_points, 1) self.assertEqual(num_samples,", "npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, None, [\"Men\", \"Women\"]) def test_validate_input_all_valid_input(self): self.assertEqual(_validate_input(self.ValidTypicalData, [1,", "= plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', None, (1, 1, 0.9)]) #", "def test_grouped_distributions_insufficient_symbols(self): args = ('scatter', self.ValidTypicalData, [1, 4, 10, 11],", "while mpl 1.4.0 has # 7. in either case, the", "_get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values) class", "def test_set_axes_options(self): fig, ax = plt.subplots() _set_axes_options(ax, \"Plot Title\", \"x-axis", "# Invalid legend. with npt.assert_raises(ValueError): boxplots([[1, 2, 3]], legend=('foo', 'bar',", "2013--, scikit-bio development team. # # Distributed under the terms", "test_get_distribution_markers_empty_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 4), ['b', 'g', 'r', 'c']) def test_get_distribution_markers_insufficient_markers(self):", "_calc_data_point_locations(3, [0.001, 0.2543, 0.87]) np.testing.assert_allclose(locs, np.array([1, 1.58296893, 3])) def test_calc_data_point_ticks(self):", "plots). self.ValidSingleSampleData = [[[1, 2, 3, 4, 5]], [[4, 5,", "def test_get_distribution_markers_bad_marker_type(self): with npt.assert_raises(ValueError): _get_distribution_markers('shapes', [], 3) def test_get_distribution_markers_zero_markers(self): self.assertEqual(_get_distribution_markers('symbols',", "None, None, None) def test_validate_input_empty_nested(self): with npt.assert_raises(ValueError): _validate_input(self.EmptyNested, None, None,", "[4, 5, 6]], box_colors=['blue', 'red']) # Invalid legend. with npt.assert_raises(ValueError):", "10], [\"Data 1\", \"Data 2\", \"Data 3\"], \"Test\", \"x-axis label\",", "= _calc_data_point_locations(4, [3, 4, 10, 12]) np.testing.assert_allclose(locs, np.array([1, 1.33333333, 3.33333333,", "null data list. self.Null = None # Test empty data", "'g', 'r']) self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'symbols', ['^', '>', '<'], 5), ['^',", "self.InvalidNumSamples = [[[1, 2, 3, 4, 5]], [[4, 5, 6,", "3.33333333])) # Scaling up from 0.001..0.87 to 1..3. locs =", "None, None, None) def test_validate_input_invalid_data_point_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, [\"T0\",", "3, 4, 8], [\"T0\", \"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"]),", "4, 10])) # second distribution (empty) should have nans since", "12.0, 2, 2, 99.99], [2.3, 4, 5, 88, 9, 10,", "'colors') self.assertEqual(len(ax.get_legend().get_texts()), 2) fig, ax = plt.subplots() _create_legend(ax, ['^', '<',", "COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import", "33, 32, 6, 8], [5, 4, 8, 13], [1, 1,", "3], 'red', 0.5, 3.75, 1.5, 'var') def test_plot_bar_data_empty(self): fig, ax", "# Sorted order shouldn't affect scaling. locs = _calc_data_point_locations(4, [4,", "import numpy as np import numpy.testing as npt import matplotlib.pyplot", "_color_box_plot(ax, box_plot, ['red', 'foobarbaz', 'blue']) # Wrong number of colors.", "# We get a warning from mpl if we don't", "import matplotlib.pyplot as plt from skbio.draw import boxplots, grouped_distributions from", "self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) # second distribution (empty)", "'<', '^', '>']) def test_get_distribution_markers_bad_marker_type(self): with npt.assert_raises(ValueError): _get_distribution_markers('shapes', [], 3)", "11.67, 12.0, 2, 2, 99.99], [2.3, 4, 5, 88, 9,", "2, 3, 4, 5]], [[4, 5, 6, 7, 8]], [[4,", "ax = plt.subplots() result = _plot_bar_data(ax, [], 'red', 0.5, 3.75,", "with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, [\"T0\", \"T1\"], None) def test_validate_input_invalid_sample_names(self): with", "# patch location should include at least one nan since", "self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), '42') self.assertEqual(ax.get_xticklabels()[1].get_text(), '45')", "np.testing.assert_allclose(ticks, [1.25, 5.25, 9.25, 11.25]) ticks = _calc_data_point_ticks(np.array([0]), 3, 0.5,", "'<', '>'], ['dist1', 'dist2', 'dist3'], 'symbols') self.assertEqual(len(ax.get_legend().get_texts()), 3) def test_create_legend_invalid_input(self):", "['b', 'g', 'y'], \"x-axis label\", \"y-axis label\", \"Test\") ax =", "team. # # Distributed under the terms of the Modified", "0), []) def test_get_distribution_markers_negative_num_markers(self): with npt.assert_raises(ValueError): _get_distribution_markers('symbols', [], -1) def", "the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from", "def test_validate_input_empty_deeply_nested(self): num_points, num_samples = _validate_input(self.EmptyDeeplyNested, None, None, None) self.assertEqual(num_points,", "\"T1\", \"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], ['b', 'r'], \"x-axis label\",", "# Coloring works with all empty distributions. fig = boxplots([[],", "self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) def", "self.assertEqual(result.__class__.__name__, \"dict\") self.assertEqual(len(result['boxes']), 1) self.assertEqual(len(result['medians']), 1) self.assertEqual(len(result['whiskers']), 2) # mpl", "\"T3\"], [\"Infants\", \"Children\", \"Teens\"], [], \"x-axis label\", \"y-axis label\", \"Test\")", "grouped_distributions('bar', self.ValidTypicalData, [1, 4, 10, 11], [\"T0\", \"T1\", \"T2\", \"T3\"],", "2, 3], [], [4, 5, 6]], box_colors=['blue', 'red']) # Invalid", "boxplots([[], [], []], [1, 4, 10], [\"Data 1\", \"Data 2\",", "self.assertTrue(len(result['fliers']) == 1 or len(result['fliers']) == 2) self.assertEqual(len(result['caps']), 2) def", "None, None]) def test_color_box_plot_invalid_input(self): # Invalid color. fig, ax =", "Coloring works with all empty distributions. fig = boxplots([[], [],", "self.assertTrue(_is_single_matplotlib_color((1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1, 1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0,", "45, 800]) self.assertEqual(ax.get_title(), \"Plot Title\") self.assertEqual(ax.get_ylabel(), \"y-axis label\") self.assertEqual(ax.get_xticklabels()[0].get_text(), '42')", "# Test typical data to be plotted by the boxplot", "data list (for bar/scatter plots). self.InvalidNumSamples = [[[1, 2, 3,", "'^', 0.77, 1, 1.5, 'stdv') self.assertEqual(result.get_sizes(), 20) def test_plot_scatter_data_empty(self): fig,", "'dist2'], 'symbols') with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2',", "44], 'blue', 0.33, 55, 1.5, 'stdv') self.assertEqual(result.__class__.__name__, \"dict\") self.assertEqual(len(result['boxes']), 1)", "\"T2\", \"T3\"], [\"Infants\", \"Children\", \"Teens\"], [], \"x-axis label\", \"y-axis label\",", "(1.0, 0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) def test_plot_bar_data_bad_error_bar_type(self): fig, ax", "unittest import TestCase, main import numpy as np import numpy.testing", "shouldn't affect scaling. locs = _calc_data_point_locations(4, [4, 3, 12, 10])", "_calc_data_point_locations(4, [3, 4, 10, 12]) np.testing.assert_allclose(locs, np.array([1, 1.33333333, 3.33333333, 4]))", "plt.subplots() result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'stdv')", "['blue', None, (1, 1, 0.9)]) # All colors are None.", "Scaling up from 0.001..0.87 to 1..3. locs = _calc_data_point_locations(3, [0.001,", "fig = boxplots([[], [], []], box_colors='pink') ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()),", "1))) self.assertTrue(_is_single_matplotlib_color((1, 1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0, 1.0))) self.assertTrue(_is_single_matplotlib_color((1.0,", "\"Children\", \"Teens\"], ['^', '>', '<'], \"x-axis label\", \"y-axis label\", \"Test\")", "ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0))", "4), ['b', 'g', 'r', 'c']) def test_get_distribution_markers_insufficient_markers(self): self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'colors',", "two Line2D instances, mpl 1.4.0 creates one, # though the" ]
[ "<gh_stars>10-100 from lmsrvcore.api.interfaces.user import User from lmsrvcore.api.interfaces.git import GitCommit, GitRef,", "from lmsrvcore.api.interfaces.user import User from lmsrvcore.api.interfaces.git import GitCommit, GitRef, GitRepository" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "# ============================================================================ \"\"\"Tests for Bijector.\"\"\" from __future__ import absolute_import from", "bijector, lower_x=1e-3, upper_x=1.5, eval_func=self.evaluate, rtol=0.05) def testShapeGetters(self): bijector = tfb.Invert(", "= tfb.Invert( tfb.SoftmaxCentered(validate_args=True)) x = tf.TensorShape([2]) y = tf.TensorShape([1]) self.assertAllEqual(y,", "self.assertAllEqual( tensorshape_util.as_list(x), self.evaluate( bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y)))) def testDocstringExample(self): exp_gamma_distribution = ( tfd.TransformedDistribution(", "permissions and # limitations under the License. # ============================================================================ \"\"\"Tests", "distributed under the License is distributed on an \"AS IS\"", "scale_diag=[2., 3.]), tfb.Softplus(), tfb.SoftmaxCentered(), ]: rev = tfb.Invert(fwd) self.assertStartsWith(rev.name, \"_\".join([\"invert\",", "def testDocstringExample(self): exp_gamma_distribution = ( tfd.TransformedDistribution( distribution=tfd.Gamma(concentration=1., rate=2.), bijector=tfb.Invert(tfb.Exp()))) self.assertAllEqual(", "import distributions as tfd from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.internal", "the Y = Invert(bij) transformation.\"\"\" def testBijector(self): for fwd in", "tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import", "the specific language governing permissions and # limitations under the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "as tfp_test_util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes", "tensorshape_util from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow.python.framework import", "tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes class InvertBijectorTest(tf.test.TestCase): \"\"\"Tests", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "bijector_test_util from tensorflow_probability.python.internal import tensorshape_util from tensorflow_probability.python.internal import test_util as", "= ( tfd.TransformedDistribution( distribution=tfd.Gamma(concentration=1., rate=2.), bijector=tfb.Invert(tfb.Exp()))) self.assertAllEqual( [], self.evaluate( tf.shape(", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "bijector=tfb.Invert(tfb.Exp()))) self.assertAllEqual( [], self.evaluate( tf.shape( exp_gamma_distribution.sample(seed=tfp_test_util.test_seed())))) if __name__ == \"__main__\":", "not use this file except in compliance with the License.", "tfd from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.internal import tensorshape_util from", "tfb.Exp(), tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]), tfb.Softplus(), tfb.SoftmaxCentered(), ]: rev =", "writing, software # distributed under the License is distributed on", "3.]]] self.assertAllClose( self.evaluate(fwd.inverse(x)), self.evaluate(rev.forward(x))) self.assertAllClose( self.evaluate(fwd.forward(x)), self.evaluate(rev.inverse(x))) self.assertAllClose( self.evaluate(fwd.forward_log_det_jacobian(x, event_ndims=1)),", "in writing, software # distributed under the License is distributed", "self.assertAllClose( self.evaluate(fwd.forward_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.inverse_log_det_jacobian(x, event_ndims=1))) self.assertAllClose( self.evaluate(fwd.inverse_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.forward_log_det_jacobian(x, event_ndims=1)))", "you may not use this file except in compliance with", "from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.internal import tensorshape_util from tensorflow_probability.python.internal", "rev = tfb.Invert(fwd) self.assertStartsWith(rev.name, \"_\".join([\"invert\", fwd.name])) x = [[[1., 2.],", "testScalarCongruency(self): bijector = tfb.Invert(tfb.Exp()) bijector_test_util.assert_scalar_congruency( bijector, lower_x=1e-3, upper_x=1.5, eval_func=self.evaluate, rtol=0.05)", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "= Invert(bij) transformation.\"\"\" def testBijector(self): for fwd in [ tfb.Identity(),", "event_ndims=1))) def testScalarCongruency(self): bijector = tfb.Invert(tfb.Exp()) bijector_test_util.assert_scalar_congruency( bijector, lower_x=1e-3, upper_x=1.5,", "bijector.inverse_event_shape(y)) self.assertAllEqual( tensorshape_util.as_list(x), self.evaluate( bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y)))) def testDocstringExample(self): exp_gamma_distribution = (", "# Copyright 2018 The TensorFlow Probability Authors. # # Licensed", "tfb.Invert(fwd) self.assertStartsWith(rev.name, \"_\".join([\"invert\", fwd.name])) x = [[[1., 2.], [2., 3.]]]", "event_ndims=1)), self.evaluate(rev.inverse_log_det_jacobian(x, event_ndims=1))) self.assertAllClose( self.evaluate(fwd.inverse_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.forward_log_det_jacobian(x, event_ndims=1))) def testScalarCongruency(self):", "fwd in [ tfb.Identity(), tfb.Exp(), tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]), tfb.Softplus(),", "self.assertAllClose( self.evaluate(fwd.inverse_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.forward_log_det_jacobian(x, event_ndims=1))) def testScalarCongruency(self): bijector = tfb.Invert(tfb.Exp())", "from __future__ import print_function import tensorflow.compat.v2 as tf from tensorflow_probability.python", "import tensorshape_util from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow.python.framework", "tfb.SoftmaxCentered(), ]: rev = tfb.Invert(fwd) self.assertStartsWith(rev.name, \"_\".join([\"invert\", fwd.name])) x =", "import bijectors as tfb from tensorflow_probability.python import distributions as tfd", "use this file except in compliance with the License. #", "pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes class InvertBijectorTest(tf.test.TestCase): \"\"\"Tests the correctness of the", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "1.], scale_diag=[2., 3.]), tfb.Softplus(), tfb.SoftmaxCentered(), ]: rev = tfb.Invert(fwd) self.assertStartsWith(rev.name,", "self.assertAllClose( self.evaluate(fwd.inverse(x)), self.evaluate(rev.forward(x))) self.assertAllClose( self.evaluate(fwd.forward(x)), self.evaluate(rev.inverse(x))) self.assertAllClose( self.evaluate(fwd.forward_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.inverse_log_det_jacobian(x,", "self.assertAllClose( self.evaluate(fwd.forward(x)), self.evaluate(rev.inverse(x))) self.assertAllClose( self.evaluate(fwd.forward_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.inverse_log_det_jacobian(x, event_ndims=1))) self.assertAllClose( self.evaluate(fwd.inverse_log_det_jacobian(x,", "testShapeGetters(self): bijector = tfb.Invert( tfb.SoftmaxCentered(validate_args=True)) x = tf.TensorShape([2]) y =", "Authors. # # Licensed under the Apache License, Version 2.0", "x = [[[1., 2.], [2., 3.]]] self.assertAllClose( self.evaluate(fwd.inverse(x)), self.evaluate(rev.forward(x))) self.assertAllClose(", "import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes class InvertBijectorTest(tf.test.TestCase): \"\"\"Tests the", "2018 The TensorFlow Probability Authors. # # Licensed under the", "tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from", "self.evaluate(fwd.forward(x)), self.evaluate(rev.inverse(x))) self.assertAllClose( self.evaluate(fwd.forward_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.inverse_log_det_jacobian(x, event_ndims=1))) self.assertAllClose( self.evaluate(fwd.inverse_log_det_jacobian(x, event_ndims=1)),", "CONDITIONS OF ANY KIND, either express or implied. # See", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow.python.framework import test_util", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "Invert(bij) transformation.\"\"\" def testBijector(self): for fwd in [ tfb.Identity(), tfb.Exp(),", "def testScalarCongruency(self): bijector = tfb.Invert(tfb.Exp()) bijector_test_util.assert_scalar_congruency( bijector, lower_x=1e-3, upper_x=1.5, eval_func=self.evaluate,", "# You may obtain a copy of the License at", "bijector = tfb.Invert( tfb.SoftmaxCentered(validate_args=True)) x = tf.TensorShape([2]) y = tf.TensorShape([1])", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow.python.framework import test_util #", "bijector.forward_event_shape_tensor(tensorshape_util.as_list(x)))) self.assertAllEqual(x, bijector.inverse_event_shape(y)) self.assertAllEqual( tensorshape_util.as_list(x), self.evaluate( bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y)))) def testDocstringExample(self): exp_gamma_distribution", "from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes class InvertBijectorTest(tf.test.TestCase):", "and # limitations under the License. # ============================================================================ \"\"\"Tests for", "from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.bijectors import bijector_test_util", "self.evaluate(fwd.forward_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.inverse_log_det_jacobian(x, event_ndims=1))) self.assertAllClose( self.evaluate(fwd.inverse_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.forward_log_det_jacobian(x, event_ndims=1))) def", "under the License is distributed on an \"AS IS\" BASIS,", "import test_util as tfp_test_util from tensorflow.python.framework import test_util # pylint:", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "testBijector(self): for fwd in [ tfb.Identity(), tfb.Exp(), tfb.Affine(shift=[0., 1.], scale_diag=[2.,", "License for the specific language governing permissions and # limitations", "from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions", "distributions as tfd from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.internal import", "self.evaluate(rev.forward(x))) self.assertAllClose( self.evaluate(fwd.forward(x)), self.evaluate(rev.inverse(x))) self.assertAllClose( self.evaluate(fwd.forward_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.inverse_log_det_jacobian(x, event_ndims=1))) self.assertAllClose(", "as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python", "tfd.TransformedDistribution( distribution=tfd.Gamma(concentration=1., rate=2.), bijector=tfb.Invert(tfb.Exp()))) self.assertAllEqual( [], self.evaluate( tf.shape( exp_gamma_distribution.sample(seed=tfp_test_util.test_seed())))) if", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.internal import tensorshape_util from tensorflow_probability.python.internal import", "License. # ============================================================================ \"\"\"Tests for Bijector.\"\"\" from __future__ import absolute_import", "= tf.TensorShape([1]) self.assertAllEqual(y, bijector.forward_event_shape(x)) self.assertAllEqual( tensorshape_util.as_list(y), self.evaluate( bijector.forward_event_shape_tensor(tensorshape_util.as_list(x)))) self.assertAllEqual(x, bijector.inverse_event_shape(y))", "rtol=0.05) def testShapeGetters(self): bijector = tfb.Invert( tfb.SoftmaxCentered(validate_args=True)) x = tf.TensorShape([2])", "self.evaluate(fwd.inverse(x)), self.evaluate(rev.forward(x))) self.assertAllClose( self.evaluate(fwd.forward(x)), self.evaluate(rev.inverse(x))) self.assertAllClose( self.evaluate(fwd.forward_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.inverse_log_det_jacobian(x, event_ndims=1)))", "self.evaluate(rev.forward_log_det_jacobian(x, event_ndims=1))) def testScalarCongruency(self): bijector = tfb.Invert(tfb.Exp()) bijector_test_util.assert_scalar_congruency( bijector, lower_x=1e-3,", "upper_x=1.5, eval_func=self.evaluate, rtol=0.05) def testShapeGetters(self): bijector = tfb.Invert( tfb.SoftmaxCentered(validate_args=True)) x", "exp_gamma_distribution = ( tfd.TransformedDistribution( distribution=tfd.Gamma(concentration=1., rate=2.), bijector=tfb.Invert(tfb.Exp()))) self.assertAllEqual( [], self.evaluate(", "self.evaluate(rev.inverse_log_det_jacobian(x, event_ndims=1))) self.assertAllClose( self.evaluate(fwd.inverse_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.forward_log_det_jacobian(x, event_ndims=1))) def testScalarCongruency(self): bijector", "the License. # ============================================================================ \"\"\"Tests for Bijector.\"\"\" from __future__ import", "the License for the specific language governing permissions and #", "(the \"License\"); # you may not use this file except", "bijector_test_util.assert_scalar_congruency( bijector, lower_x=1e-3, upper_x=1.5, eval_func=self.evaluate, rtol=0.05) def testShapeGetters(self): bijector =", "Apache License, Version 2.0 (the \"License\"); # you may not", "disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes class InvertBijectorTest(tf.test.TestCase): \"\"\"Tests the correctness of the Y", "# you may not use this file except in compliance", "either express or implied. # See the License for the", "transformation.\"\"\" def testBijector(self): for fwd in [ tfb.Identity(), tfb.Exp(), tfb.Affine(shift=[0.,", "from __future__ import division from __future__ import print_function import tensorflow.compat.v2", "( tfd.TransformedDistribution( distribution=tfd.Gamma(concentration=1., rate=2.), bijector=tfb.Invert(tfb.Exp()))) self.assertAllEqual( [], self.evaluate( tf.shape( exp_gamma_distribution.sample(seed=tfp_test_util.test_seed()))))", "OR CONDITIONS OF ANY KIND, either express or implied. #", "y = tf.TensorShape([1]) self.assertAllEqual(y, bijector.forward_event_shape(x)) self.assertAllEqual( tensorshape_util.as_list(y), self.evaluate( bijector.forward_event_shape_tensor(tensorshape_util.as_list(x)))) self.assertAllEqual(x,", "event_ndims=1))) self.assertAllClose( self.evaluate(fwd.inverse_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.forward_log_det_jacobian(x, event_ndims=1))) def testScalarCongruency(self): bijector =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "[ tfb.Identity(), tfb.Exp(), tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]), tfb.Softplus(), tfb.SoftmaxCentered(), ]:", "the License is distributed on an \"AS IS\" BASIS, #", "from __future__ import absolute_import from __future__ import division from __future__", "fwd.name])) x = [[[1., 2.], [2., 3.]]] self.assertAllClose( self.evaluate(fwd.inverse(x)), self.evaluate(rev.forward(x)))", "tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]), tfb.Softplus(), tfb.SoftmaxCentered(), ]: rev = tfb.Invert(fwd)", "tfb.SoftmaxCentered(validate_args=True)) x = tf.TensorShape([2]) y = tf.TensorShape([1]) self.assertAllEqual(y, bijector.forward_event_shape(x)) self.assertAllEqual(", "in compliance with the License. # You may obtain a", "tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as", "for fwd in [ tfb.Identity(), tfb.Exp(), tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]),", "software # distributed under the License is distributed on an", "tfb from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.bijectors import", "= [[[1., 2.], [2., 3.]]] self.assertAllClose( self.evaluate(fwd.inverse(x)), self.evaluate(rev.forward(x))) self.assertAllClose( self.evaluate(fwd.forward(x)),", "TensorFlow Probability Authors. # # Licensed under the Apache License,", "rate=2.), bijector=tfb.Invert(tfb.Exp()))) self.assertAllEqual( [], self.evaluate( tf.shape( exp_gamma_distribution.sample(seed=tfp_test_util.test_seed())))) if __name__ ==", "as tfb from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.bijectors", "Bijector.\"\"\" from __future__ import absolute_import from __future__ import division from", "eval_func=self.evaluate, rtol=0.05) def testShapeGetters(self): bijector = tfb.Invert( tfb.SoftmaxCentered(validate_args=True)) x =", "# # Unless required by applicable law or agreed to", "= tfb.Invert(fwd) self.assertStartsWith(rev.name, \"_\".join([\"invert\", fwd.name])) x = [[[1., 2.], [2.,", "tfb.Softplus(), tfb.SoftmaxCentered(), ]: rev = tfb.Invert(fwd) self.assertStartsWith(rev.name, \"_\".join([\"invert\", fwd.name])) x", "tfb.Invert( tfb.SoftmaxCentered(validate_args=True)) x = tf.TensorShape([2]) y = tf.TensorShape([1]) self.assertAllEqual(y, bijector.forward_event_shape(x))", "[[[1., 2.], [2., 3.]]] self.assertAllClose( self.evaluate(fwd.inverse(x)), self.evaluate(rev.forward(x))) self.assertAllClose( self.evaluate(fwd.forward(x)), self.evaluate(rev.inverse(x)))", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "============================================================================ \"\"\"Tests for Bijector.\"\"\" from __future__ import absolute_import from __future__", "self.evaluate(fwd.inverse_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.forward_log_det_jacobian(x, event_ndims=1))) def testScalarCongruency(self): bijector = tfb.Invert(tfb.Exp()) bijector_test_util.assert_scalar_congruency(", "The TensorFlow Probability Authors. # # Licensed under the Apache", "bijector.forward_event_shape(x)) self.assertAllEqual( tensorshape_util.as_list(y), self.evaluate( bijector.forward_event_shape_tensor(tensorshape_util.as_list(x)))) self.assertAllEqual(x, bijector.inverse_event_shape(y)) self.assertAllEqual( tensorshape_util.as_list(x), self.evaluate(", "Version 2.0 (the \"License\"); # you may not use this", "Copyright 2018 The TensorFlow Probability Authors. # # Licensed under", "@test_util.run_all_in_graph_and_eager_modes class InvertBijectorTest(tf.test.TestCase): \"\"\"Tests the correctness of the Y =", "3.]), tfb.Softplus(), tfb.SoftmaxCentered(), ]: rev = tfb.Invert(fwd) self.assertStartsWith(rev.name, \"_\".join([\"invert\", fwd.name]))", "Probability Authors. # # Licensed under the Apache License, Version", "in [ tfb.Identity(), tfb.Exp(), tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]), tfb.Softplus(), tfb.SoftmaxCentered(),", "import print_function import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors", "self.assertAllEqual( tensorshape_util.as_list(y), self.evaluate( bijector.forward_event_shape_tensor(tensorshape_util.as_list(x)))) self.assertAllEqual(x, bijector.inverse_event_shape(y)) self.assertAllEqual( tensorshape_util.as_list(x), self.evaluate( bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y))))", "law or agreed to in writing, software # distributed under", "tfp_test_util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes class", "tensorshape_util.as_list(x), self.evaluate( bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y)))) def testDocstringExample(self): exp_gamma_distribution = ( tfd.TransformedDistribution( distribution=tfd.Gamma(concentration=1.,", "tf.TensorShape([1]) self.assertAllEqual(y, bijector.forward_event_shape(x)) self.assertAllEqual( tensorshape_util.as_list(y), self.evaluate( bijector.forward_event_shape_tensor(tensorshape_util.as_list(x)))) self.assertAllEqual(x, bijector.inverse_event_shape(y)) self.assertAllEqual(", "of the Y = Invert(bij) transformation.\"\"\" def testBijector(self): for fwd", "absolute_import from __future__ import division from __future__ import print_function import", "__future__ import absolute_import from __future__ import division from __future__ import", "implied. # See the License for the specific language governing", "# pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes class InvertBijectorTest(tf.test.TestCase): \"\"\"Tests the correctness of", "self.assertAllEqual(x, bijector.inverse_event_shape(y)) self.assertAllEqual( tensorshape_util.as_list(x), self.evaluate( bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y)))) def testDocstringExample(self): exp_gamma_distribution =", "under the Apache License, Version 2.0 (the \"License\"); # you", "tensorshape_util.as_list(y), self.evaluate( bijector.forward_event_shape_tensor(tensorshape_util.as_list(x)))) self.assertAllEqual(x, bijector.inverse_event_shape(y)) self.assertAllEqual( tensorshape_util.as_list(x), self.evaluate( bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y)))) def", "\"License\"); # you may not use this file except in", "limitations under the License. # ============================================================================ \"\"\"Tests for Bijector.\"\"\" from", "\"\"\"Tests the correctness of the Y = Invert(bij) transformation.\"\"\" def", "for Bijector.\"\"\" from __future__ import absolute_import from __future__ import division", "bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y)))) def testDocstringExample(self): exp_gamma_distribution = ( tfd.TransformedDistribution( distribution=tfd.Gamma(concentration=1., rate=2.), bijector=tfb.Invert(tfb.Exp())))", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "= tfb.Invert(tfb.Exp()) bijector_test_util.assert_scalar_congruency( bijector, lower_x=1e-3, upper_x=1.5, eval_func=self.evaluate, rtol=0.05) def testShapeGetters(self):", "class InvertBijectorTest(tf.test.TestCase): \"\"\"Tests the correctness of the Y = Invert(bij)", "testDocstringExample(self): exp_gamma_distribution = ( tfd.TransformedDistribution( distribution=tfd.Gamma(concentration=1., rate=2.), bijector=tfb.Invert(tfb.Exp()))) self.assertAllEqual( [],", "correctness of the Y = Invert(bij) transformation.\"\"\" def testBijector(self): for", "]: rev = tfb.Invert(fwd) self.assertStartsWith(rev.name, \"_\".join([\"invert\", fwd.name])) x = [[[1.,", "event_ndims=1)), self.evaluate(rev.forward_log_det_jacobian(x, event_ndims=1))) def testScalarCongruency(self): bijector = tfb.Invert(tfb.Exp()) bijector_test_util.assert_scalar_congruency( bijector,", "as tfd from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.internal import tensorshape_util", "distribution=tfd.Gamma(concentration=1., rate=2.), bijector=tfb.Invert(tfb.Exp()))) self.assertAllEqual( [], self.evaluate( tf.shape( exp_gamma_distribution.sample(seed=tfp_test_util.test_seed())))) if __name__", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"\"\"Tests for Bijector.\"\"\" from __future__ import absolute_import from __future__ import", "Y = Invert(bij) transformation.\"\"\" def testBijector(self): for fwd in [", "self.assertStartsWith(rev.name, \"_\".join([\"invert\", fwd.name])) x = [[[1., 2.], [2., 3.]]] self.assertAllClose(", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "the correctness of the Y = Invert(bij) transformation.\"\"\" def testBijector(self):", "division from __future__ import print_function import tensorflow.compat.v2 as tf from", "under the License. # ============================================================================ \"\"\"Tests for Bijector.\"\"\" from __future__", "[2., 3.]]] self.assertAllClose( self.evaluate(fwd.inverse(x)), self.evaluate(rev.forward(x))) self.assertAllClose( self.evaluate(fwd.forward(x)), self.evaluate(rev.inverse(x))) self.assertAllClose( self.evaluate(fwd.forward_log_det_jacobian(x,", "= tf.TensorShape([2]) y = tf.TensorShape([1]) self.assertAllEqual(y, bijector.forward_event_shape(x)) self.assertAllEqual( tensorshape_util.as_list(y), self.evaluate(", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "# limitations under the License. # ============================================================================ \"\"\"Tests for Bijector.\"\"\"", "def testBijector(self): for fwd in [ tfb.Identity(), tfb.Exp(), tfb.Affine(shift=[0., 1.],", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb", "to in writing, software # distributed under the License is", "import bijector_test_util from tensorflow_probability.python.internal import tensorshape_util from tensorflow_probability.python.internal import test_util", "bijectors as tfb from tensorflow_probability.python import distributions as tfd from", "test_util as tfp_test_util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import", "\"_\".join([\"invert\", fwd.name])) x = [[[1., 2.], [2., 3.]]] self.assertAllClose( self.evaluate(fwd.inverse(x)),", "tfb.Invert(tfb.Exp()) bijector_test_util.assert_scalar_congruency( bijector, lower_x=1e-3, upper_x=1.5, eval_func=self.evaluate, rtol=0.05) def testShapeGetters(self): bijector", "lower_x=1e-3, upper_x=1.5, eval_func=self.evaluate, rtol=0.05) def testShapeGetters(self): bijector = tfb.Invert( tfb.SoftmaxCentered(validate_args=True))", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "__future__ import print_function import tensorflow.compat.v2 as tf from tensorflow_probability.python import", "print_function import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as", "bijector = tfb.Invert(tfb.Exp()) bijector_test_util.assert_scalar_congruency( bijector, lower_x=1e-3, upper_x=1.5, eval_func=self.evaluate, rtol=0.05) def", "tf.TensorShape([2]) y = tf.TensorShape([1]) self.assertAllEqual(y, bijector.forward_event_shape(x)) self.assertAllEqual( tensorshape_util.as_list(y), self.evaluate( bijector.forward_event_shape_tensor(tensorshape_util.as_list(x))))", "You may obtain a copy of the License at #", "import division from __future__ import print_function import tensorflow.compat.v2 as tf", "language governing permissions and # limitations under the License. #", "tfb.Identity(), tfb.Exp(), tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]), tfb.Softplus(), tfb.SoftmaxCentered(), ]: rev", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "def testShapeGetters(self): bijector = tfb.Invert( tfb.SoftmaxCentered(validate_args=True)) x = tf.TensorShape([2]) y", "x = tf.TensorShape([2]) y = tf.TensorShape([1]) self.assertAllEqual(y, bijector.forward_event_shape(x)) self.assertAllEqual( tensorshape_util.as_list(y),", "self.evaluate( bijector.forward_event_shape_tensor(tensorshape_util.as_list(x)))) self.assertAllEqual(x, bijector.inverse_event_shape(y)) self.assertAllEqual( tensorshape_util.as_list(x), self.evaluate( bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y)))) def testDocstringExample(self):", "self.evaluate( bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y)))) def testDocstringExample(self): exp_gamma_distribution = ( tfd.TransformedDistribution( distribution=tfd.Gamma(concentration=1., rate=2.),", "required by applicable law or agreed to in writing, software", "InvertBijectorTest(tf.test.TestCase): \"\"\"Tests the correctness of the Y = Invert(bij) transformation.\"\"\"", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "from tensorflow_probability.python.internal import tensorshape_util from tensorflow_probability.python.internal import test_util as tfp_test_util", "__future__ import division from __future__ import print_function import tensorflow.compat.v2 as", "test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes class InvertBijectorTest(tf.test.TestCase): \"\"\"Tests the correctness", "with the License. # You may obtain a copy of", "tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.bijectors import bijector_test_util from", "this file except in compliance with the License. # You", "tensorflow_probability.python.internal import tensorshape_util from tensorflow_probability.python.internal import test_util as tfp_test_util from", "self.assertAllEqual(y, bijector.forward_event_shape(x)) self.assertAllEqual( tensorshape_util.as_list(y), self.evaluate( bijector.forward_event_shape_tensor(tensorshape_util.as_list(x)))) self.assertAllEqual(x, bijector.inverse_event_shape(y)) self.assertAllEqual( tensorshape_util.as_list(x),", "the Apache License, Version 2.0 (the \"License\"); # you may", "self.assertAllEqual( [], self.evaluate( tf.shape( exp_gamma_distribution.sample(seed=tfp_test_util.test_seed())))) if __name__ == \"__main__\": tf.test.main()", "2.], [2., 3.]]] self.assertAllClose( self.evaluate(fwd.inverse(x)), self.evaluate(rev.forward(x))) self.assertAllClose( self.evaluate(fwd.forward(x)), self.evaluate(rev.inverse(x))) self.assertAllClose(", "import absolute_import from __future__ import division from __future__ import print_function", "self.evaluate(rev.inverse(x))) self.assertAllClose( self.evaluate(fwd.forward_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.inverse_log_det_jacobian(x, event_ndims=1))) self.assertAllClose( self.evaluate(fwd.inverse_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.forward_log_det_jacobian(x,", "governing permissions and # limitations under the License. # ============================================================================" ]
[ "type icon or not.\"\"\" self._icon_label.setVisible(show_icon) def _set_dayu_text(self): self._content_label.setText(self._dayu_text) self.setVisible(bool(self._dayu_text)) def", "Get MAlert feedback message. :return: six.string_types \"\"\" return self._dayu_text dayu_text", "the close icon button or not.\"\"\" self._close_button.setVisible(closeable) def set_show_icon(self, show_icon):", "\"\"\"Set MAlert to SuccessType\"\"\" self.set_dayu_type(MAlert.SuccessType) return self def warning(self): \"\"\"Set", "= MAvatar() self._icon_label.set_dayu_size(dayu_theme.tiny) self._content_label = MLabel().secondary() self._close_button = MToolButton().svg('close_line.svg').tiny().icon_only() self._close_button.clicked.connect(functools.partial(self.setVisible,", "set_dayu_text(self, value): \"\"\"Set the feedback content.\"\"\" if isinstance(value, six.string_types): self._dayu_text", "_set_dayu_type(self): self._icon_label.set_dayu_image(MPixmap('{}_fill.svg'.format(self._dayu_type), vars(dayu_theme).get(self._dayu_type + '_color'))) self.style().polish(self) def set_dayu_type(self, value): \"\"\"Set", "'success' WarningType = 'warning' ErrorType = 'error' def __init__(self, text='',", "8) self._main_lay.addWidget(self._icon_label) self._main_lay.addWidget(self._content_label) self._main_lay.addStretch() self._main_lay.addWidget(self._close_button) self.setLayout(self._main_lay) self.set_show_icon(True) self.set_closeable(False) self._dayu_type =", "\"\"\" Get MAlert feedback type. :return: str \"\"\" return self._dayu_type", "\"\"\" return self._dayu_type def get_dayu_text(self): \"\"\" Get MAlert feedback message.", "get_dayu_text(self): \"\"\" Get MAlert feedback message. :return: six.string_types \"\"\" return", "if isinstance(value, six.string_types): self._dayu_text = value else: raise TypeError(\"Input argument", "MAlert feedback message. :return: six.string_types \"\"\" return self._dayu_text dayu_text =", "MAlert to InfoType\"\"\" self.set_dayu_type(MAlert.InfoType) return self def success(self): \"\"\"Set MAlert", "MAlert to ErrorType\"\"\" self.set_dayu_type(MAlert.ErrorType) return self def closable(self): \"\"\"Set MAlert", "{}\".format(type(value))) self._set_dayu_text() def _set_dayu_type(self): self._icon_label.set_dayu_image(MPixmap('{}_fill.svg'.format(self._dayu_type), vars(dayu_theme).get(self._dayu_type + '_color'))) self.style().polish(self) def", "error(self): \"\"\"Set MAlert to ErrorType\"\"\" self.set_dayu_type(MAlert.ErrorType) return self def closable(self):", "MAlert feedback type. :return: str \"\"\" return self._dayu_type def get_dayu_text(self):", "\"\"\"Set the feedback content.\"\"\" if isinstance(value, six.string_types): self._dayu_text = value", "for feedback. Property: dayu_type: The feedback type with different color", "Property @property_mixin class MAlert(QWidget): \"\"\" Alert component for feedback. Property:", "dayu_widgets import dayu_theme from dayu_widgets.tool_button import MToolButton from dayu_widgets.mixin import", "import dayu_theme from dayu_widgets.tool_button import MToolButton from dayu_widgets.mixin import property_mixin", "self.set_closeable(False) self._dayu_type = None self._dayu_text = None self.set_dayu_type(MAlert.InfoType) self.set_dayu_text(text) def", "= 'error' def __init__(self, text='', parent=None, flags=Qt.Widget): super(MAlert, self).__init__(parent, flags)", "\"\"\"Display the information type icon or not.\"\"\" self._icon_label.setVisible(show_icon) def _set_dayu_text(self):", "get {}\".format(type(value))) self._set_dayu_text() def _set_dayu_type(self): self._icon_label.set_dayu_image(MPixmap('{}_fill.svg'.format(self._dayu_type), vars(dayu_theme).get(self._dayu_type + '_color'))) self.style().polish(self)", "= None self.set_dayu_type(MAlert.InfoType) self.set_dayu_text(text) def set_closeable(self, closeable): \"\"\"Display the close", "value): \"\"\"Set the feedback content.\"\"\" if isinstance(value, six.string_types): self._dayu_text =", "if value in [MAlert.InfoType, MAlert.SuccessType, MAlert.WarningType, MAlert.ErrorType]: self._dayu_type = value", "MAlert.SuccessType, MAlert.WarningType, MAlert.ErrorType]: self._dayu_type = value else: raise ValueError(\"Input argument", "string.\") self._set_dayu_type() def get_dayu_type(self): \"\"\" Get MAlert feedback type. :return:", "get_dayu_type, set_dayu_type) def info(self): \"\"\"Set MAlert to InfoType\"\"\" self.set_dayu_type(MAlert.InfoType) return", "InfoType\"\"\" self.set_dayu_type(MAlert.InfoType) return self def success(self): \"\"\"Set MAlert to SuccessType\"\"\"", "from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property @property_mixin", "flags=Qt.Widget): super(MAlert, self).__init__(parent, flags) self.setAttribute(Qt.WA_StyledBackground) self._icon_label = MAvatar() self._icon_label.set_dayu_size(dayu_theme.tiny) self._content_label", "Alert component for feedback. Property: dayu_type: The feedback type with", "False)) self._main_lay = QHBoxLayout() self._main_lay.setContentsMargins(8, 8, 8, 8) self._main_lay.addWidget(self._icon_label) self._main_lay.addWidget(self._content_label)", "None self.set_dayu_type(MAlert.InfoType) self.set_dayu_text(text) def set_closeable(self, closeable): \"\"\"Display the close icon", "self._close_button.setVisible(closeable) def set_show_icon(self, show_icon): \"\"\"Display the information type icon or", "feedback type with different color container. dayu_text: The feedback string", "container. \"\"\" InfoType = 'info' SuccessType = 'success' WarningType =", "showed in container. \"\"\" InfoType = 'info' SuccessType = 'success'", "import MAvatar from dayu_widgets.label import MLabel from dayu_widgets import dayu_theme", "self._icon_label.setVisible(show_icon) def _set_dayu_text(self): self._content_label.setText(self._dayu_text) self.setVisible(bool(self._dayu_text)) def set_dayu_text(self, value): \"\"\"Set the", "self._icon_label.set_dayu_image(MPixmap('{}_fill.svg'.format(self._dayu_type), vars(dayu_theme).get(self._dayu_type + '_color'))) self.style().polish(self) def set_dayu_type(self, value): \"\"\"Set feedback", "dayu_widgets.label import MLabel from dayu_widgets import dayu_theme from dayu_widgets.tool_button import", "of \" \"info/success/warning/error string.\") self._set_dayu_type() def get_dayu_type(self): \"\"\" Get MAlert", "feedback type. :return: str \"\"\" return self._dayu_type def get_dayu_text(self): \"\"\"", "dayu_theme from dayu_widgets.tool_button import MToolButton from dayu_widgets.mixin import property_mixin from", "different color container. dayu_text: The feedback string showed in container.", "from dayu_widgets.label import MLabel from dayu_widgets import dayu_theme from dayu_widgets.tool_button", "TypeError(\"Input argument 'value' should be string type, \" \"but get", "Get MAlert feedback type. :return: str \"\"\" return self._dayu_type def", "\"\"\"Set MAlert to InfoType\"\"\" self.set_dayu_type(MAlert.InfoType) return self def success(self): \"\"\"Set", "\"but get {}\".format(type(value))) self._set_dayu_text() def _set_dayu_type(self): self._icon_label.set_dayu_image(MPixmap('{}_fill.svg'.format(self._dayu_type), vars(dayu_theme).get(self._dayu_type + '_color')))", "MAlert to SuccessType\"\"\" self.set_dayu_type(MAlert.SuccessType) return self def warning(self): \"\"\"Set MAlert", "MToolButton from dayu_widgets.mixin import property_mixin from dayu_widgets.qt import QWidget, QHBoxLayout,", "self.set_dayu_text(text) def set_closeable(self, closeable): \"\"\"Display the close icon button or", "self.set_dayu_type(MAlert.WarningType) return self def error(self): \"\"\"Set MAlert to ErrorType\"\"\" self.set_dayu_type(MAlert.ErrorType)", "to SuccessType\"\"\" self.set_dayu_type(MAlert.SuccessType) return self def warning(self): \"\"\"Set MAlert to", "-*- coding: utf-8 -*- ################################################################### # Author: <NAME> # Date", "closeable): \"\"\"Display the close icon button or not.\"\"\" self._close_button.setVisible(closeable) def", "'warning' ErrorType = 'error' def __init__(self, text='', parent=None, flags=Qt.Widget): super(MAlert,", "Property(six.text_type, get_dayu_text, set_dayu_text) dayu_type = Property(str, get_dayu_type, set_dayu_type) def info(self):", "to InfoType\"\"\" self.set_dayu_type(MAlert.InfoType) return self def success(self): \"\"\"Set MAlert to", "self def warning(self): \"\"\"Set MAlert to WarningType\"\"\" self.set_dayu_type(MAlert.WarningType) return self", "self._dayu_text = value else: raise TypeError(\"Input argument 'value' should be", "flags) self.setAttribute(Qt.WA_StyledBackground) self._icon_label = MAvatar() self._icon_label.set_dayu_size(dayu_theme.tiny) self._content_label = MLabel().secondary() self._close_button", "self._set_dayu_type() def get_dayu_type(self): \"\"\" Get MAlert feedback type. :return: str", "self.style().polish(self) def set_dayu_type(self, value): \"\"\"Set feedback type.\"\"\" if value in", "return self def error(self): \"\"\"Set MAlert to ErrorType\"\"\" self.set_dayu_type(MAlert.ErrorType) return", "the feedback content.\"\"\" if isinstance(value, six.string_types): self._dayu_text = value else:", "self._icon_label.set_dayu_size(dayu_theme.tiny) self._content_label = MLabel().secondary() self._close_button = MToolButton().svg('close_line.svg').tiny().icon_only() self._close_button.clicked.connect(functools.partial(self.setVisible, False)) self._main_lay", "self.set_dayu_type(MAlert.ErrorType) return self def closable(self): \"\"\"Set MAlert closebale is True\"\"\"", "get_dayu_text, set_dayu_text) dayu_type = Property(str, get_dayu_type, set_dayu_type) def info(self): \"\"\"Set", "six.string_types): self._dayu_text = value else: raise TypeError(\"Input argument 'value' should", "Author: <NAME> # Date : 2019.2 # Email : <EMAIL>", "from dayu_widgets import dayu_theme from dayu_widgets.tool_button import MToolButton from dayu_widgets.mixin", "type with different color container. dayu_text: The feedback string showed", "dayu_text: The feedback string showed in container. \"\"\" InfoType =", "info(self): \"\"\"Set MAlert to InfoType\"\"\" self.set_dayu_type(MAlert.InfoType) return self def success(self):", "import functools from dayu_widgets.avatar import MAvatar from dayu_widgets.label import MLabel", "return self def success(self): \"\"\"Set MAlert to SuccessType\"\"\" self.set_dayu_type(MAlert.SuccessType) return", "string showed in container. \"\"\" InfoType = 'info' SuccessType =", "success(self): \"\"\"Set MAlert to SuccessType\"\"\" self.set_dayu_type(MAlert.SuccessType) return self def warning(self):", "dayu_widgets.mixin import property_mixin from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt,", "message. :return: six.string_types \"\"\" return self._dayu_text dayu_text = Property(six.text_type, get_dayu_text,", "The feedback string showed in container. \"\"\" InfoType = 'info'", "None self._dayu_text = None self.set_dayu_type(MAlert.InfoType) self.set_dayu_text(text) def set_closeable(self, closeable): \"\"\"Display", "set_show_icon(self, show_icon): \"\"\"Display the information type icon or not.\"\"\" self._icon_label.setVisible(show_icon)", "with different color container. dayu_text: The feedback string showed in", "MAlert.ErrorType]: self._dayu_type = value else: raise ValueError(\"Input argument 'value' should", "= None self._dayu_text = None self.set_dayu_type(MAlert.InfoType) self.set_dayu_text(text) def set_closeable(self, closeable):", "self def closable(self): \"\"\"Set MAlert closebale is True\"\"\" self.set_closeable(True) return", "text='', parent=None, flags=Qt.Widget): super(MAlert, self).__init__(parent, flags) self.setAttribute(Qt.WA_StyledBackground) self._icon_label = MAvatar()", "MAvatar() self._icon_label.set_dayu_size(dayu_theme.tiny) self._content_label = MLabel().secondary() self._close_button = MToolButton().svg('close_line.svg').tiny().icon_only() self._close_button.clicked.connect(functools.partial(self.setVisible, False))", "property_mixin from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property", "raise TypeError(\"Input argument 'value' should be string type, \" \"but", "\"\"\" return self._dayu_text dayu_text = Property(six.text_type, get_dayu_text, set_dayu_text) dayu_type =", "feedback type.\"\"\" if value in [MAlert.InfoType, MAlert.SuccessType, MAlert.WarningType, MAlert.ErrorType]: self._dayu_type", "QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property @property_mixin class MAlert(QWidget): \"\"\"", "python # -*- coding: utf-8 -*- ################################################################### # Author: <NAME>", "2019.2 # Email : <EMAIL> ################################################################### \"\"\" MAlert class. \"\"\"", "MAlert.WarningType, MAlert.ErrorType]: self._dayu_type = value else: raise ValueError(\"Input argument 'value'", "return self def closable(self): \"\"\"Set MAlert closebale is True\"\"\" self.set_closeable(True)", "\" \"info/success/warning/error string.\") self._set_dayu_type() def get_dayu_type(self): \"\"\" Get MAlert feedback", "The feedback type with different color container. dayu_text: The feedback", "self._main_lay.addWidget(self._content_label) self._main_lay.addStretch() self._main_lay.addWidget(self._close_button) self.setLayout(self._main_lay) self.set_show_icon(True) self.set_closeable(False) self._dayu_type = None self._dayu_text", "def warning(self): \"\"\"Set MAlert to WarningType\"\"\" self.set_dayu_type(MAlert.WarningType) return self def", "\"\"\" MAlert class. \"\"\" import six import functools from dayu_widgets.avatar", "= MLabel().secondary() self._close_button = MToolButton().svg('close_line.svg').tiny().icon_only() self._close_button.clicked.connect(functools.partial(self.setVisible, False)) self._main_lay = QHBoxLayout()", "dayu_text = Property(six.text_type, get_dayu_text, set_dayu_text) dayu_type = Property(str, get_dayu_type, set_dayu_type)", "-*- ################################################################### # Author: <NAME> # Date : 2019.2 #", "\"\"\" Get MAlert feedback message. :return: six.string_types \"\"\" return self._dayu_text", "super(MAlert, self).__init__(parent, flags) self.setAttribute(Qt.WA_StyledBackground) self._icon_label = MAvatar() self._icon_label.set_dayu_size(dayu_theme.tiny) self._content_label =", "type. :return: str \"\"\" return self._dayu_type def get_dayu_text(self): \"\"\" Get", "raise ValueError(\"Input argument 'value' should be one of \" \"info/success/warning/error", "self def success(self): \"\"\"Set MAlert to SuccessType\"\"\" self.set_dayu_type(MAlert.SuccessType) return self", "value else: raise TypeError(\"Input argument 'value' should be string type,", "MAlert class. \"\"\" import six import functools from dayu_widgets.avatar import", "value in [MAlert.InfoType, MAlert.SuccessType, MAlert.WarningType, MAlert.ErrorType]: self._dayu_type = value else:", "import MLabel from dayu_widgets import dayu_theme from dayu_widgets.tool_button import MToolButton", "= MToolButton().svg('close_line.svg').tiny().icon_only() self._close_button.clicked.connect(functools.partial(self.setVisible, False)) self._main_lay = QHBoxLayout() self._main_lay.setContentsMargins(8, 8, 8,", "\"\"\"Display the close icon button or not.\"\"\" self._close_button.setVisible(closeable) def set_show_icon(self,", "self._content_label.setText(self._dayu_text) self.setVisible(bool(self._dayu_text)) def set_dayu_text(self, value): \"\"\"Set the feedback content.\"\"\" if", "def set_closeable(self, closeable): \"\"\"Display the close icon button or not.\"\"\"", "self.setAttribute(Qt.WA_StyledBackground) self._icon_label = MAvatar() self._icon_label.set_dayu_size(dayu_theme.tiny) self._content_label = MLabel().secondary() self._close_button =", "not.\"\"\" self._icon_label.setVisible(show_icon) def _set_dayu_text(self): self._content_label.setText(self._dayu_text) self.setVisible(bool(self._dayu_text)) def set_dayu_text(self, value): \"\"\"Set", "\"\"\" import six import functools from dayu_widgets.avatar import MAvatar from", "the information type icon or not.\"\"\" self._icon_label.setVisible(show_icon) def _set_dayu_text(self): self._content_label.setText(self._dayu_text)", "feedback content.\"\"\" if isinstance(value, six.string_types): self._dayu_text = value else: raise", "MToolButton().svg('close_line.svg').tiny().icon_only() self._close_button.clicked.connect(functools.partial(self.setVisible, False)) self._main_lay = QHBoxLayout() self._main_lay.setContentsMargins(8, 8, 8, 8)", "type, \" \"but get {}\".format(type(value))) self._set_dayu_text() def _set_dayu_type(self): self._icon_label.set_dayu_image(MPixmap('{}_fill.svg'.format(self._dayu_type), vars(dayu_theme).get(self._dayu_type", "= Property(str, get_dayu_type, set_dayu_type) def info(self): \"\"\"Set MAlert to InfoType\"\"\"", "utf-8 -*- ################################################################### # Author: <NAME> # Date : 2019.2", "= 'info' SuccessType = 'success' WarningType = 'warning' ErrorType =", "warning(self): \"\"\"Set MAlert to WarningType\"\"\" self.set_dayu_type(MAlert.WarningType) return self def error(self):", "str \"\"\" return self._dayu_type def get_dayu_text(self): \"\"\" Get MAlert feedback", "# Email : <EMAIL> ################################################################### \"\"\" MAlert class. \"\"\" import", "string type, \" \"but get {}\".format(type(value))) self._set_dayu_text() def _set_dayu_type(self): self._icon_label.set_dayu_image(MPixmap('{}_fill.svg'.format(self._dayu_type),", "color container. dayu_text: The feedback string showed in container. \"\"\"", "#!/usr/bin/env python # -*- coding: utf-8 -*- ################################################################### # Author:", "self._set_dayu_text() def _set_dayu_type(self): self._icon_label.set_dayu_image(MPixmap('{}_fill.svg'.format(self._dayu_type), vars(dayu_theme).get(self._dayu_type + '_color'))) self.style().polish(self) def set_dayu_type(self,", "return self._dayu_type def get_dayu_text(self): \"\"\" Get MAlert feedback message. :return:", "dayu_widgets.tool_button import MToolButton from dayu_widgets.mixin import property_mixin from dayu_widgets.qt import", "'value' should be one of \" \"info/success/warning/error string.\") self._set_dayu_type() def", "self._dayu_type def get_dayu_text(self): \"\"\" Get MAlert feedback message. :return: six.string_types", "8, 8) self._main_lay.addWidget(self._icon_label) self._main_lay.addWidget(self._content_label) self._main_lay.addStretch() self._main_lay.addWidget(self._close_button) self.setLayout(self._main_lay) self.set_show_icon(True) self.set_closeable(False) self._dayu_type", "def get_dayu_text(self): \"\"\" Get MAlert feedback message. :return: six.string_types \"\"\"", "self.set_dayu_type(MAlert.SuccessType) return self def warning(self): \"\"\"Set MAlert to WarningType\"\"\" self.set_dayu_type(MAlert.WarningType)", "__init__(self, text='', parent=None, flags=Qt.Widget): super(MAlert, self).__init__(parent, flags) self.setAttribute(Qt.WA_StyledBackground) self._icon_label =", "Email : <EMAIL> ################################################################### \"\"\" MAlert class. \"\"\" import six", "# Author: <NAME> # Date : 2019.2 # Email :", "self.set_dayu_type(MAlert.InfoType) self.set_dayu_text(text) def set_closeable(self, closeable): \"\"\"Display the close icon button", "import MToolButton from dayu_widgets.mixin import property_mixin from dayu_widgets.qt import QWidget,", "or not.\"\"\" self._icon_label.setVisible(show_icon) def _set_dayu_text(self): self._content_label.setText(self._dayu_text) self.setVisible(bool(self._dayu_text)) def set_dayu_text(self, value):", ":return: six.string_types \"\"\" return self._dayu_text dayu_text = Property(six.text_type, get_dayu_text, set_dayu_text)", "def _set_dayu_text(self): self._content_label.setText(self._dayu_text) self.setVisible(bool(self._dayu_text)) def set_dayu_text(self, value): \"\"\"Set the feedback", "isinstance(value, six.string_types): self._dayu_text = value else: raise TypeError(\"Input argument 'value'", "to ErrorType\"\"\" self.set_dayu_type(MAlert.ErrorType) return self def closable(self): \"\"\"Set MAlert closebale", "self._close_button.clicked.connect(functools.partial(self.setVisible, False)) self._main_lay = QHBoxLayout() self._main_lay.setContentsMargins(8, 8, 8, 8) self._main_lay.addWidget(self._icon_label)", "self._dayu_text = None self.set_dayu_type(MAlert.InfoType) self.set_dayu_text(text) def set_closeable(self, closeable): \"\"\"Display the", "self._main_lay.addWidget(self._icon_label) self._main_lay.addWidget(self._content_label) self._main_lay.addStretch() self._main_lay.addWidget(self._close_button) self.setLayout(self._main_lay) self.set_show_icon(True) self.set_closeable(False) self._dayu_type = None", "MAlert to WarningType\"\"\" self.set_dayu_type(MAlert.WarningType) return self def error(self): \"\"\"Set MAlert", "self._content_label = MLabel().secondary() self._close_button = MToolButton().svg('close_line.svg').tiny().icon_only() self._close_button.clicked.connect(functools.partial(self.setVisible, False)) self._main_lay =", "_set_dayu_text(self): self._content_label.setText(self._dayu_text) self.setVisible(bool(self._dayu_text)) def set_dayu_text(self, value): \"\"\"Set the feedback content.\"\"\"", "should be one of \" \"info/success/warning/error string.\") self._set_dayu_type() def get_dayu_type(self):", "self._dayu_type = None self._dayu_text = None self.set_dayu_type(MAlert.InfoType) self.set_dayu_text(text) def set_closeable(self,", "def _set_dayu_type(self): self._icon_label.set_dayu_image(MPixmap('{}_fill.svg'.format(self._dayu_type), vars(dayu_theme).get(self._dayu_type + '_color'))) self.style().polish(self) def set_dayu_type(self, value):", "'value' should be string type, \" \"but get {}\".format(type(value))) self._set_dayu_text()", "self def error(self): \"\"\"Set MAlert to ErrorType\"\"\" self.set_dayu_type(MAlert.ErrorType) return self", "<NAME> # Date : 2019.2 # Email : <EMAIL> ###################################################################", "dayu_type: The feedback type with different color container. dayu_text: The", "<EMAIL> ################################################################### \"\"\" MAlert class. \"\"\" import six import functools", "self.set_dayu_type(MAlert.InfoType) return self def success(self): \"\"\"Set MAlert to SuccessType\"\"\" self.set_dayu_type(MAlert.SuccessType)", "def get_dayu_type(self): \"\"\" Get MAlert feedback type. :return: str \"\"\"", "coding: utf-8 -*- ################################################################### # Author: <NAME> # Date :", "\"\"\" InfoType = 'info' SuccessType = 'success' WarningType = 'warning'", "self._main_lay.addStretch() self._main_lay.addWidget(self._close_button) self.setLayout(self._main_lay) self.set_show_icon(True) self.set_closeable(False) self._dayu_type = None self._dayu_text =", "get_dayu_type(self): \"\"\" Get MAlert feedback type. :return: str \"\"\" return", "argument 'value' should be one of \" \"info/success/warning/error string.\") self._set_dayu_type()", "\"\"\"Set MAlert to WarningType\"\"\" self.set_dayu_type(MAlert.WarningType) return self def error(self): \"\"\"Set", "def __init__(self, text='', parent=None, flags=Qt.Widget): super(MAlert, self).__init__(parent, flags) self.setAttribute(Qt.WA_StyledBackground) self._icon_label", "'_color'))) self.style().polish(self) def set_dayu_type(self, value): \"\"\"Set feedback type.\"\"\" if value", "set_dayu_text) dayu_type = Property(str, get_dayu_type, set_dayu_type) def info(self): \"\"\"Set MAlert", "dayu_type = Property(str, get_dayu_type, set_dayu_type) def info(self): \"\"\"Set MAlert to", "self._icon_label = MAvatar() self._icon_label.set_dayu_size(dayu_theme.tiny) self._content_label = MLabel().secondary() self._close_button = MToolButton().svg('close_line.svg').tiny().icon_only()", "self.setVisible(bool(self._dayu_text)) def set_dayu_text(self, value): \"\"\"Set the feedback content.\"\"\" if isinstance(value,", "import six import functools from dayu_widgets.avatar import MAvatar from dayu_widgets.label", "icon or not.\"\"\" self._icon_label.setVisible(show_icon) def _set_dayu_text(self): self._content_label.setText(self._dayu_text) self.setVisible(bool(self._dayu_text)) def set_dayu_text(self,", "MAlert(QWidget): \"\"\" Alert component for feedback. Property: dayu_type: The feedback", "SuccessType\"\"\" self.set_dayu_type(MAlert.SuccessType) return self def warning(self): \"\"\"Set MAlert to WarningType\"\"\"", "button or not.\"\"\" self._close_button.setVisible(closeable) def set_show_icon(self, show_icon): \"\"\"Display the information", "close icon button or not.\"\"\" self._close_button.setVisible(closeable) def set_show_icon(self, show_icon): \"\"\"Display", "self._dayu_text dayu_text = Property(six.text_type, get_dayu_text, set_dayu_text) dayu_type = Property(str, get_dayu_type,", "################################################################### \"\"\" MAlert class. \"\"\" import six import functools from", "functools from dayu_widgets.avatar import MAvatar from dayu_widgets.label import MLabel from", "\"\"\" Alert component for feedback. Property: dayu_type: The feedback type", "= QHBoxLayout() self._main_lay.setContentsMargins(8, 8, 8, 8) self._main_lay.addWidget(self._icon_label) self._main_lay.addWidget(self._content_label) self._main_lay.addStretch() self._main_lay.addWidget(self._close_button)", "MIcon, Property @property_mixin class MAlert(QWidget): \"\"\" Alert component for feedback.", "one of \" \"info/success/warning/error string.\") self._set_dayu_type() def get_dayu_type(self): \"\"\" Get", "self._close_button = MToolButton().svg('close_line.svg').tiny().icon_only() self._close_button.clicked.connect(functools.partial(self.setVisible, False)) self._main_lay = QHBoxLayout() self._main_lay.setContentsMargins(8, 8,", "ErrorType = 'error' def __init__(self, text='', parent=None, flags=Qt.Widget): super(MAlert, self).__init__(parent,", "import property_mixin from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon,", "\" \"but get {}\".format(type(value))) self._set_dayu_text() def _set_dayu_type(self): self._icon_label.set_dayu_image(MPixmap('{}_fill.svg'.format(self._dayu_type), vars(dayu_theme).get(self._dayu_type +", ":return: str \"\"\" return self._dayu_type def get_dayu_text(self): \"\"\" Get MAlert", "MLabel from dayu_widgets import dayu_theme from dayu_widgets.tool_button import MToolButton from", "# -*- coding: utf-8 -*- ################################################################### # Author: <NAME> #", "information type icon or not.\"\"\" self._icon_label.setVisible(show_icon) def _set_dayu_text(self): self._content_label.setText(self._dayu_text) self.setVisible(bool(self._dayu_text))", "[MAlert.InfoType, MAlert.SuccessType, MAlert.WarningType, MAlert.ErrorType]: self._dayu_type = value else: raise ValueError(\"Input", "= 'warning' ErrorType = 'error' def __init__(self, text='', parent=None, flags=Qt.Widget):", "Property(str, get_dayu_type, set_dayu_type) def info(self): \"\"\"Set MAlert to InfoType\"\"\" self.set_dayu_type(MAlert.InfoType)", "# Date : 2019.2 # Email : <EMAIL> ################################################################### \"\"\"", "= value else: raise ValueError(\"Input argument 'value' should be one", "SuccessType = 'success' WarningType = 'warning' ErrorType = 'error' def", "icon button or not.\"\"\" self._close_button.setVisible(closeable) def set_show_icon(self, show_icon): \"\"\"Display the", "\"\"\"Set feedback type.\"\"\" if value in [MAlert.InfoType, MAlert.SuccessType, MAlert.WarningType, MAlert.ErrorType]:", "value else: raise ValueError(\"Input argument 'value' should be one of", "class. \"\"\" import six import functools from dayu_widgets.avatar import MAvatar", "QHBoxLayout() self._main_lay.setContentsMargins(8, 8, 8, 8) self._main_lay.addWidget(self._icon_label) self._main_lay.addWidget(self._content_label) self._main_lay.addStretch() self._main_lay.addWidget(self._close_button) self.setLayout(self._main_lay)", "set_dayu_type) def info(self): \"\"\"Set MAlert to InfoType\"\"\" self.set_dayu_type(MAlert.InfoType) return self", "def set_dayu_text(self, value): \"\"\"Set the feedback content.\"\"\" if isinstance(value, six.string_types):", "component for feedback. Property: dayu_type: The feedback type with different", "InfoType = 'info' SuccessType = 'success' WarningType = 'warning' ErrorType", "self._main_lay = QHBoxLayout() self._main_lay.setContentsMargins(8, 8, 8, 8) self._main_lay.addWidget(self._icon_label) self._main_lay.addWidget(self._content_label) self._main_lay.addStretch()", "def error(self): \"\"\"Set MAlert to ErrorType\"\"\" self.set_dayu_type(MAlert.ErrorType) return self def", "in container. \"\"\" InfoType = 'info' SuccessType = 'success' WarningType", "be string type, \" \"but get {}\".format(type(value))) self._set_dayu_text() def _set_dayu_type(self):", "def set_show_icon(self, show_icon): \"\"\"Display the information type icon or not.\"\"\"", "value): \"\"\"Set feedback type.\"\"\" if value in [MAlert.InfoType, MAlert.SuccessType, MAlert.WarningType,", "in [MAlert.InfoType, MAlert.SuccessType, MAlert.WarningType, MAlert.ErrorType]: self._dayu_type = value else: raise", "WarningType\"\"\" self.set_dayu_type(MAlert.WarningType) return self def error(self): \"\"\"Set MAlert to ErrorType\"\"\"", "QHBoxLayout, MPixmap, Qt, MIcon, Property @property_mixin class MAlert(QWidget): \"\"\" Alert", "\"\"\"Set MAlert to ErrorType\"\"\" self.set_dayu_type(MAlert.ErrorType) return self def closable(self): \"\"\"Set", "from dayu_widgets.tool_button import MToolButton from dayu_widgets.mixin import property_mixin from dayu_widgets.qt", "################################################################### # Author: <NAME> # Date : 2019.2 # Email", "= value else: raise TypeError(\"Input argument 'value' should be string", "be one of \" \"info/success/warning/error string.\") self._set_dayu_type() def get_dayu_type(self): \"\"\"", "self._dayu_type = value else: raise ValueError(\"Input argument 'value' should be", "argument 'value' should be string type, \" \"but get {}\".format(type(value)))", "8, 8, 8) self._main_lay.addWidget(self._icon_label) self._main_lay.addWidget(self._content_label) self._main_lay.addStretch() self._main_lay.addWidget(self._close_button) self.setLayout(self._main_lay) self.set_show_icon(True) self.set_closeable(False)", "set_closeable(self, closeable): \"\"\"Display the close icon button or not.\"\"\" self._close_button.setVisible(closeable)", ": <EMAIL> ################################################################### \"\"\" MAlert class. \"\"\" import six import", "set_dayu_type(self, value): \"\"\"Set feedback type.\"\"\" if value in [MAlert.InfoType, MAlert.SuccessType,", "MAvatar from dayu_widgets.label import MLabel from dayu_widgets import dayu_theme from", "vars(dayu_theme).get(self._dayu_type + '_color'))) self.style().polish(self) def set_dayu_type(self, value): \"\"\"Set feedback type.\"\"\"", "Date : 2019.2 # Email : <EMAIL> ################################################################### \"\"\" MAlert", "from dayu_widgets.mixin import property_mixin from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap,", "to WarningType\"\"\" self.set_dayu_type(MAlert.WarningType) return self def error(self): \"\"\"Set MAlert to", "ErrorType\"\"\" self.set_dayu_type(MAlert.ErrorType) return self def closable(self): \"\"\"Set MAlert closebale is", "show_icon): \"\"\"Display the information type icon or not.\"\"\" self._icon_label.setVisible(show_icon) def", "self.setLayout(self._main_lay) self.set_show_icon(True) self.set_closeable(False) self._dayu_type = None self._dayu_text = None self.set_dayu_type(MAlert.InfoType)", "self).__init__(parent, flags) self.setAttribute(Qt.WA_StyledBackground) self._icon_label = MAvatar() self._icon_label.set_dayu_size(dayu_theme.tiny) self._content_label = MLabel().secondary()", "self._main_lay.setContentsMargins(8, 8, 8, 8) self._main_lay.addWidget(self._icon_label) self._main_lay.addWidget(self._content_label) self._main_lay.addStretch() self._main_lay.addWidget(self._close_button) self.setLayout(self._main_lay) self.set_show_icon(True)", "should be string type, \" \"but get {}\".format(type(value))) self._set_dayu_text() def", "= 'success' WarningType = 'warning' ErrorType = 'error' def __init__(self,", "feedback. Property: dayu_type: The feedback type with different color container.", "def closable(self): \"\"\"Set MAlert closebale is True\"\"\" self.set_closeable(True) return self", "container. dayu_text: The feedback string showed in container. \"\"\" InfoType", "self.set_show_icon(True) self.set_closeable(False) self._dayu_type = None self._dayu_text = None self.set_dayu_type(MAlert.InfoType) self.set_dayu_text(text)", "or not.\"\"\" self._close_button.setVisible(closeable) def set_show_icon(self, show_icon): \"\"\"Display the information type", "WarningType = 'warning' ErrorType = 'error' def __init__(self, text='', parent=None,", "self._main_lay.addWidget(self._close_button) self.setLayout(self._main_lay) self.set_show_icon(True) self.set_closeable(False) self._dayu_type = None self._dayu_text = None", ": 2019.2 # Email : <EMAIL> ################################################################### \"\"\" MAlert class.", "Qt, MIcon, Property @property_mixin class MAlert(QWidget): \"\"\" Alert component for", "else: raise TypeError(\"Input argument 'value' should be string type, \"", "def info(self): \"\"\"Set MAlert to InfoType\"\"\" self.set_dayu_type(MAlert.InfoType) return self def", "'info' SuccessType = 'success' WarningType = 'warning' ErrorType = 'error'", "return self def warning(self): \"\"\"Set MAlert to WarningType\"\"\" self.set_dayu_type(MAlert.WarningType) return", "from dayu_widgets.avatar import MAvatar from dayu_widgets.label import MLabel from dayu_widgets", "dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property @property_mixin class", "+ '_color'))) self.style().polish(self) def set_dayu_type(self, value): \"\"\"Set feedback type.\"\"\" if", "import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property @property_mixin class MAlert(QWidget):", "\"info/success/warning/error string.\") self._set_dayu_type() def get_dayu_type(self): \"\"\" Get MAlert feedback type.", "return self._dayu_text dayu_text = Property(six.text_type, get_dayu_text, set_dayu_text) dayu_type = Property(str,", "feedback string showed in container. \"\"\" InfoType = 'info' SuccessType", "else: raise ValueError(\"Input argument 'value' should be one of \"", "def success(self): \"\"\"Set MAlert to SuccessType\"\"\" self.set_dayu_type(MAlert.SuccessType) return self def", "type.\"\"\" if value in [MAlert.InfoType, MAlert.SuccessType, MAlert.WarningType, MAlert.ErrorType]: self._dayu_type =", "ValueError(\"Input argument 'value' should be one of \" \"info/success/warning/error string.\")", "Property: dayu_type: The feedback type with different color container. dayu_text:", "feedback message. :return: six.string_types \"\"\" return self._dayu_text dayu_text = Property(six.text_type,", "parent=None, flags=Qt.Widget): super(MAlert, self).__init__(parent, flags) self.setAttribute(Qt.WA_StyledBackground) self._icon_label = MAvatar() self._icon_label.set_dayu_size(dayu_theme.tiny)", "= Property(six.text_type, get_dayu_text, set_dayu_text) dayu_type = Property(str, get_dayu_type, set_dayu_type) def", "content.\"\"\" if isinstance(value, six.string_types): self._dayu_text = value else: raise TypeError(\"Input", "class MAlert(QWidget): \"\"\" Alert component for feedback. Property: dayu_type: The", "not.\"\"\" self._close_button.setVisible(closeable) def set_show_icon(self, show_icon): \"\"\"Display the information type icon", "@property_mixin class MAlert(QWidget): \"\"\" Alert component for feedback. Property: dayu_type:", "def set_dayu_type(self, value): \"\"\"Set feedback type.\"\"\" if value in [MAlert.InfoType,", "MPixmap, Qt, MIcon, Property @property_mixin class MAlert(QWidget): \"\"\" Alert component", "six.string_types \"\"\" return self._dayu_text dayu_text = Property(six.text_type, get_dayu_text, set_dayu_text) dayu_type", "'error' def __init__(self, text='', parent=None, flags=Qt.Widget): super(MAlert, self).__init__(parent, flags) self.setAttribute(Qt.WA_StyledBackground)", "MLabel().secondary() self._close_button = MToolButton().svg('close_line.svg').tiny().icon_only() self._close_button.clicked.connect(functools.partial(self.setVisible, False)) self._main_lay = QHBoxLayout() self._main_lay.setContentsMargins(8,", "dayu_widgets.avatar import MAvatar from dayu_widgets.label import MLabel from dayu_widgets import", "six import functools from dayu_widgets.avatar import MAvatar from dayu_widgets.label import" ]
[ "<gh_stars>0 input_str = input(\"문자열을 입력해 주세요. >> \") print(\"입력받은 문자열의", "input(\"문자열을 입력해 주세요. >> \") print(\"입력받은 문자열의 길이는\", len(input_str), \"입니다.\")", "= input(\"문자열을 입력해 주세요. >> \") print(\"입력받은 문자열의 길이는\", len(input_str),", "input_str = input(\"문자열을 입력해 주세요. >> \") print(\"입력받은 문자열의 길이는\"," ]
[ "END_DATE, STATUS_CODE, PARTICIPANT_LEVEL_CODE, SALES_REP_TYPE_CODE, CURRENT_RECORD_FLAG, LAST_HIRE_DATE) SELECT B.WW_DIRECT_GEO_DESCRIPTION AS SALES_GEOGRAPHY,", "C ON C.BK_EMPLOYEE_ID = A.EMPLOYEE_ID WHERE RANK = 1\"\"\" return", "'0' dest_count = '0' # start Spark application and get", "SALES_GEOGRAPHY, B.MULTI_AREA_DESCRIPTION AS SALES_MULTI_AREA, B.AREA_DESCRIPTION AS SALES_AREA, B.MULTI_REGION_DESCRIPTION AS SALES_MULTI_REGION,", "--> JB_SALES_HIERARCHY_FLAG_N_SR.py #************************************************************************************************************** # # Created by : bibin #", "return query # Main method def main(): try: src_count =", "EbiReadWrite import logging import sys from time import gmtime, strftime", "+\"\"\".SALES_HIERARCHY (SALES_GEOGRAPHY, SALES_MULTI_AREA, SALES_AREA, SALES_MULTI_REGION, SALES_REGION, SALES_DISTRICT, SALES_TEAM, EMPLOYEE_ID, SALES_REP_NUMBER,", "= A.EMPLOYEE_ID WHERE RANK = 1\"\"\" return query # Main", "gmtime())+\"'\" data_format = \"JOB START DT : \"+start_date+\" | SCRIPT", "import logging import sys from time import gmtime, strftime import", "\"JOB START DT : \"+start_date+\" | SCRIPT NAME : \"+script_name+\"", "Spark application and get Spark session, logger and config spark,", "Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"Success\") Ebi_read_write_obj.job_debugger_print(\" \\n __main__ \" + app_name +\" -->", "\"+end_date+\" | STATUS : %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"[Error] Failed\") Ebi_read_write_obj.job_debugger_print(\" \\n", "in spark log or console end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" data_format =", "Entry point for script if __name__ == \"__main__\": # Calling", "AS SALES_REP_NAME, A.ORGANIZATION_NAME AS SALES_REP_ORG, A.COMP_PLAN_TYPE_CODE, A.COMP_PLAN_TITLE, A.COMP_PLAN_CATEGORY_CODE, A.COMP_PLAN_DESCRIPTION, NULL", "A.COMP_PLAN_CATEGORY_CODE, A.COMP_PLAN_DESCRIPTION, NULL AS GOAL_CURR_CODE , A.START_DATE, A.END_DATE, A.STATUS_CODE, A.PARTICIPANT_LEVEL_CODE,", "SALES_MULTI_AREA, B.AREA_DESCRIPTION AS SALES_AREA, B.MULTI_REGION_DESCRIPTION AS SALES_MULTI_REGION, SUBSTR(B.REGION_DESCRIPTION,1,50) AS SALES_REGION,", "Main method def main(): try: src_count = '0' dest_count =", "%H:%M:%S\", gmtime())+\"'\" data_format = \"JOB START DT : \"+start_date+\" |", "DT : \"+end_date+\" | STATUS : %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"Success\") Ebi_read_write_obj.job_debugger_print(\"", "# Importing required Lib from dependencies.spark import start_spark from dependencies.EbiReadWrite", "+\" --> Job \"+app_name+\" Succeed \\n\") except Exception as err:", ": bibin # Version : 1.0 # # Description :", "A.COMP_PLAN_DESCRIPTION, NULL AS GOAL_CURR_CODE , A.START_DATE, A.END_DATE, A.STATUS_CODE, A.PARTICIPANT_LEVEL_CODE, SUBSTR(A.SALES_REP_TYPE_CODE,1,5)", "Query query = query_data(db_schema) # Calling Job Class method -->", "(YYYY-MM-DD) Change Description # ----------------- ------------------ # 2018-11-02 Initial creation", "Create class Object Ebi_read_write_obj = EbiReadWrite(app_name,spark,config,logger) # DB prop Key", "\"JB_SALES_HIERARCHY_FLAG_N_SR\" log_filename = app_name + '_' + log_date + '.log'", "B.WW_DIRECT_GEO_DESCRIPTION AS SALES_GEOGRAPHY, B.MULTI_AREA_DESCRIPTION AS SALES_MULTI_AREA, B.AREA_DESCRIPTION AS SALES_AREA, B.MULTI_REGION_DESCRIPTION", "\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" log_date =strftime(\"%Y%m%d\", gmtime()) # Job Naming Details", "start Spark application and get Spark session, logger and config", "config['DB_PROP_KEY_EXTRACT'] db_schema = config['DB_SCHEMA'] log_file = config['LOG_DIR_NAME'] + \"/\" +", "A.EMPLOYEE_ID WHERE RANK = 1\"\"\" return query # Main method", "= config['DB_SCHEMA'] log_file = config['LOG_DIR_NAME'] + \"/\" + log_filename #SQL", "log or console end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" data_format = \"JOB START", "BY END_DATE desc) as RANK FROM DIMS.SALES_PARTICIPANT a WHERE BK_SALES_REP_NUMBER", "DB prop Key of Source DB db_prop_key_load = config['DB_PROP_KEY_LOAD'] db_prop_key_extract", "get Spark session, logger and config spark, config = start_spark(", "+ log_filename #SQL Query query = query_data(db_schema) # Calling Job", "\"+ app_name +\" --> Exception-Traceback :: \" + str(err)) raise", "gmtime, strftime import cx_Oracle import py4j import pyspark # Spark", "Ebi_read_write_obj.job_debugger_print(\" \\n Job \"+app_name+\" Failed\\n\") logger.error(\"\\n __main__ \"+ app_name +\"", "# 2018-11-02 Initial creation # #************************************************************************************************************** # Importing required Lib", "= \"JOB START DT : \"+start_date+\" | SCRIPT NAME :", "log_filename #SQL Query query = query_data(db_schema) # Calling Job Class", "start_date = \"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" log_date =strftime(\"%Y%m%d\", gmtime()) # Job", "--> Exception-Traceback :: \" + str(err)) raise # Entry point", "spark, config = start_spark( app_name=app_name) # Create class Object Ebi_read_write_obj", "data into 'SALES_HIERARCHY' table based on stream lookups. # #", "logger = logging.getLogger(__name__) # Date Formats start_date = \"'\"+strftime(\"%Y-%m-%d %H:%M:%S\",", "SALES_DISTRICT, SALES_TEAM, EMPLOYEE_ID, SALES_REP_NUMBER, LOGIN_ID, SALES_REP_NAME, SALES_REP_ORG, COMP_PLAN_TYPE_CODE, COMP_PLAN_TITLE, COMP_PLAN_CATEGORY_CODE,", "SALES_REGION, SALES_DISTRICT, SALES_TEAM, EMPLOYEE_ID, SALES_REP_NUMBER, LOGIN_ID, SALES_REP_NAME, SALES_REP_ORG, COMP_PLAN_TYPE_CODE, COMP_PLAN_TITLE,", "= 'Y') AND PARTICIPANT_LEVEL_CODE = 'SR' ORDER BY BK_SALES_REP_NUMBER,SALES_PARTICIPANT_KEY )", "a.*,ROW_NUMBER() over (partition by BK_SALES_REP_NUMBER ORDER BY END_DATE desc) as", "IN (SELECT DISTINCT BK_SALES_REP_NUMBER FROM DIMS.SALES_PARTICIPANT WHERE CURRENT_RECORD_FLAG = 'Y')", "= 'SR' ORDER BY BK_SALES_REP_NUMBER,SALES_PARTICIPANT_KEY ) A INNER JOIN DIMS.SALES_TERR_HIERAR_AS_IS_MV", "Date (YYYY-MM-DD) Change Description # ----------------- ------------------ # 2018-11-02 Initial", "A.BK_SALES_REP_NUMBER AS SALES_REP_NUMBER, SUBSTR(A.EMP_SYS_LOGIN_ID,1,10) AS LOGIN_ID, SUBSTR(A.SALES_REP_NAME,1,50) AS SALES_REP_NAME, A.ORGANIZATION_NAME", "\"+app_name+\" | SRC COUNT : \"+src_count+\" | TGT COUNT :", "console end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" data_format = \"JOB START DT :", "GOAL_CURR_CODE, START_DATE, END_DATE, STATUS_CODE, PARTICIPANT_LEVEL_CODE, SALES_REP_TYPE_CODE, CURRENT_RECORD_FLAG, LAST_HIRE_DATE) SELECT B.WW_DIRECT_GEO_DESCRIPTION", "SUBSTR(A.EMP_SYS_LOGIN_ID,1,10) AS LOGIN_ID, SUBSTR(A.SALES_REP_NAME,1,50) AS SALES_REP_NAME, A.ORGANIZATION_NAME AS SALES_REP_ORG, A.COMP_PLAN_TYPE_CODE,", "BK_EMPLOYEE_ID,RECENT_HIRE_DATE FROM DIMS.WORKER_DETAIL WHERE CURRENT_RECORD_IND = 1 ) C ON", "A.START_DATE, A.END_DATE, A.STATUS_CODE, A.PARTICIPANT_LEVEL_CODE, SUBSTR(A.SALES_REP_TYPE_CODE,1,5) AS SALES_REP_TYPE_CODE, A.CURRENT_RECORD_FLAG, C.RECENT_HIRE_DATE AS", "#************************************************************************************************************** # # Created by : bibin # Version :", "BK_SALES_REP_NUMBER FROM DIMS.SALES_PARTICIPANT WHERE CURRENT_RECORD_FLAG = 'Y') AND PARTICIPANT_LEVEL_CODE =", "AS LAST_HIRE_DATE FROM ( SELECT a.*,ROW_NUMBER() over (partition by BK_SALES_REP_NUMBER", "\"/\" + log_filename #SQL Query query = query_data(db_schema) # Calling", "logger and config spark, config = start_spark( app_name=app_name) # Create", "OUTER JOIN (SELECT LTRIM(BK_EMPLOYEE_ID,'0') BK_EMPLOYEE_ID,RECENT_HIRE_DATE FROM DIMS.WORKER_DETAIL WHERE CURRENT_RECORD_IND =", "This script will load the data into 'SALES_HIERARCHY' table based", "STATUS : %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"[Error] Failed\") Ebi_read_write_obj.job_debugger_print(\" \\n Job \"+app_name+\"", "Initial creation # #************************************************************************************************************** # Importing required Lib from dependencies.spark", "------------------ # 2018-11-02 Initial creation # #************************************************************************************************************** # Importing required", "and get Spark session, logger and config spark, config =", "Initial Creation: # # Date (YYYY-MM-DD) Change Description # -----------------", "SALES_REP_ORG, A.COMP_PLAN_TYPE_CODE, A.COMP_PLAN_TITLE, A.COMP_PLAN_CATEGORY_CODE, A.COMP_PLAN_DESCRIPTION, NULL AS GOAL_CURR_CODE , A.START_DATE,", "\\n __main__ \" + app_name +\" --> Job \"+app_name+\" Succeed", "except Exception as err: # Write expeption in spark log", "DIMS.SALES_PARTICIPANT WHERE CURRENT_RECORD_FLAG = 'Y') AND PARTICIPANT_LEVEL_CODE = 'SR' ORDER", "\"+dest_count+\" | JOB END DT : \"+end_date+\" | STATUS :", "AS SALES_MULTI_REGION, SUBSTR(B.REGION_DESCRIPTION,1,50) AS SALES_REGION, SUBSTR(B.DISTRICT_DESCRIPTION,1,50) AS SALES_DISTRICT, SUBSTR(B.TEAM_DESCRIPTION,1,50) AS", "= \"\"\"INSERT INTO \"\"\"+ db_schema +\"\"\".SALES_HIERARCHY (SALES_GEOGRAPHY, SALES_MULTI_AREA, SALES_AREA, SALES_MULTI_REGION,", "(partition by BK_SALES_REP_NUMBER ORDER BY END_DATE desc) as RANK FROM", "%H:%M:%S\", gmtime())+\"'\" log_date =strftime(\"%Y%m%d\", gmtime()) # Job Naming Details script_name", "WHERE CURRENT_RECORD_FLAG = 'Y') AND PARTICIPANT_LEVEL_CODE = 'SR' ORDER BY", "Date Formats start_date = \"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" log_date =strftime(\"%Y%m%d\", gmtime())", "COMP_PLAN_DESCRIPTION, GOAL_CURR_CODE, START_DATE, END_DATE, STATUS_CODE, PARTICIPANT_LEVEL_CODE, SALES_REP_TYPE_CODE, CURRENT_RECORD_FLAG, LAST_HIRE_DATE) SELECT", "\\n Job \"+app_name+\" Failed\\n\") logger.error(\"\\n __main__ \"+ app_name +\" -->", "+ log_date + '.log' # Query for loading invoice table", "AS SALES_AREA, B.MULTI_REGION_DESCRIPTION AS SALES_MULTI_REGION, SUBSTR(B.REGION_DESCRIPTION,1,50) AS SALES_REGION, SUBSTR(B.DISTRICT_DESCRIPTION,1,50) AS", ") A INNER JOIN DIMS.SALES_TERR_HIERAR_AS_IS_MV B ON B.TERRITORY_KEY = A.TERRITORY_KEY", "# # # Initial Creation: # # Date (YYYY-MM-DD) Change", "1 ) C ON C.BK_EMPLOYEE_ID = A.EMPLOYEE_ID WHERE RANK =", "AS SALES_REP_TYPE_CODE, A.CURRENT_RECORD_FLAG, C.RECENT_HIRE_DATE AS LAST_HIRE_DATE FROM ( SELECT a.*,ROW_NUMBER()", "ORDER BY BK_SALES_REP_NUMBER,SALES_PARTICIPANT_KEY ) A INNER JOIN DIMS.SALES_TERR_HIERAR_AS_IS_MV B ON", "B ON B.TERRITORY_KEY = A.TERRITORY_KEY LEFT OUTER JOIN (SELECT LTRIM(BK_EMPLOYEE_ID,'0')", "raise # Entry point for script if __name__ == \"__main__\":", "config spark, config = start_spark( app_name=app_name) # Create class Object", "B.MULTI_AREA_DESCRIPTION AS SALES_MULTI_AREA, B.AREA_DESCRIPTION AS SALES_AREA, B.MULTI_REGION_DESCRIPTION AS SALES_MULTI_REGION, SUBSTR(B.REGION_DESCRIPTION,1,50)", "Change Description # ----------------- ------------------ # 2018-11-02 Initial creation #", "CURRENT_RECORD_IND = 1 ) C ON C.BK_EMPLOYEE_ID = A.EMPLOYEE_ID WHERE", "AS SALES_REP_NUMBER, SUBSTR(A.EMP_SYS_LOGIN_ID,1,10) AS LOGIN_ID, SUBSTR(A.SALES_REP_NAME,1,50) AS SALES_REP_NAME, A.ORGANIZATION_NAME AS", "+ str(err)) raise # Entry point for script if __name__", "# SCH1101.sh --> JB_SALES_HIERARCHY_FLAG_N_SR.py #************************************************************************************************************** # # Created by :", "# Description : # 1. This script will load the", "START DT : \"+start_date+\" | SCRIPT NAME : \"+script_name+\" |", "\"+src_count+\" | TGT COUNT : \"+dest_count+\" | JOB END DT", "# Date Formats start_date = \"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" log_date =strftime(\"%Y%m%d\",", "| JOB END DT : \"+end_date+\" | STATUS : %(message)s\"", "# Initial Creation: # # Date (YYYY-MM-DD) Change Description #", "session, logger and config spark, config = start_spark( app_name=app_name) #", "| SRC COUNT : \"+src_count+\" | TGT COUNT : \"+dest_count+\"", "SRC COUNT : \"+src_count+\" | TGT COUNT : \"+dest_count+\" |", "logging import sys from time import gmtime, strftime import cx_Oracle", "# DB prop Key of Source DB db_prop_key_load = config['DB_PROP_KEY_LOAD']", "INNER JOIN DIMS.SALES_TERR_HIERAR_AS_IS_MV B ON B.TERRITORY_KEY = A.TERRITORY_KEY LEFT OUTER", "required Lib from dependencies.spark import start_spark from dependencies.EbiReadWrite import EbiReadWrite", "load the data into 'SALES_HIERARCHY' table based on stream lookups.", "db_prop_key_load = config['DB_PROP_KEY_LOAD'] db_prop_key_extract = config['DB_PROP_KEY_EXTRACT'] db_schema = config['DB_SCHEMA'] log_file", "A.TERRITORY_KEY LEFT OUTER JOIN (SELECT LTRIM(BK_EMPLOYEE_ID,'0') BK_EMPLOYEE_ID,RECENT_HIRE_DATE FROM DIMS.WORKER_DETAIL WHERE", "start_spark( app_name=app_name) # Create class Object Ebi_read_write_obj = EbiReadWrite(app_name,spark,config,logger) #", "log_filename = app_name + '_' + log_date + '.log' #", "# Entry point for script if __name__ == \"__main__\": #", "\"+app_name+\" Failed\\n\") logger.error(\"\\n __main__ \"+ app_name +\" --> Exception-Traceback ::", "AS GOAL_CURR_CODE , A.START_DATE, A.END_DATE, A.STATUS_CODE, A.PARTICIPANT_LEVEL_CODE, SUBSTR(A.SALES_REP_TYPE_CODE,1,5) AS SALES_REP_TYPE_CODE,", "class Object Ebi_read_write_obj = EbiReadWrite(app_name,spark,config,logger) # DB prop Key of", "COUNT : \"+dest_count+\" | JOB END DT : \"+end_date+\" |", "SALES_DISTRICT, SUBSTR(B.TEAM_DESCRIPTION,1,50) AS SALES_TEAM, A.EMPLOYEE_ID, A.BK_SALES_REP_NUMBER AS SALES_REP_NUMBER, SUBSTR(A.EMP_SYS_LOGIN_ID,1,10) AS", "Object Ebi_read_write_obj = EbiReadWrite(app_name,spark,config,logger) # DB prop Key of Source", "Failed\") Ebi_read_write_obj.job_debugger_print(\" \\n Job \"+app_name+\" Failed\\n\") logger.error(\"\\n __main__ \"+ app_name", "over (partition by BK_SALES_REP_NUMBER ORDER BY END_DATE desc) as RANK", "JOB : \"+app_name+\" | SRC COUNT : \"+src_count+\" | TGT", "db_prop_key_extract = config['DB_PROP_KEY_EXTRACT'] db_schema = config['DB_SCHEMA'] log_file = config['LOG_DIR_NAME'] +", "INTO \"\"\"+ db_schema +\"\"\".SALES_HIERARCHY (SALES_GEOGRAPHY, SALES_MULTI_AREA, SALES_AREA, SALES_MULTI_REGION, SALES_REGION, SALES_DISTRICT,", "the data into 'SALES_HIERARCHY' table based on stream lookups. #", "# Spark logging logger = logging.getLogger(__name__) # Date Formats start_date", "EbiReadWrite(app_name,spark,config,logger) # DB prop Key of Source DB db_prop_key_load =", ") C ON C.BK_EMPLOYEE_ID = A.EMPLOYEE_ID WHERE RANK = 1\"\"\"", "A.EMPLOYEE_ID, A.BK_SALES_REP_NUMBER AS SALES_REP_NUMBER, SUBSTR(A.EMP_SYS_LOGIN_ID,1,10) AS LOGIN_ID, SUBSTR(A.SALES_REP_NAME,1,50) AS SALES_REP_NAME,", "# ----------------- ------------------ # 2018-11-02 Initial creation # #************************************************************************************************************** #", "Creation: # # Date (YYYY-MM-DD) Change Description # ----------------- ------------------", "log_date =strftime(\"%Y%m%d\", gmtime()) # Job Naming Details script_name = \"SCH1101.SH\"", "END DT : \"+end_date+\" | STATUS : %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"Success\")", "RANK FROM DIMS.SALES_PARTICIPANT a WHERE BK_SALES_REP_NUMBER NOT IN (SELECT DISTINCT", "WHERE CURRENT_RECORD_IND = 1 ) C ON C.BK_EMPLOYEE_ID = A.EMPLOYEE_ID", "Ebi_read_write_obj.get_target_data_update(query,db_prop_key_load) end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" data_format = \"JOB START DT :", "# # Description : # 1. This script will load", ": \"+dest_count+\" | JOB END DT : \"+end_date+\" | STATUS", "Ebi_read_write_obj = EbiReadWrite(app_name,spark,config,logger) # DB prop Key of Source DB", "'SR' ORDER BY BK_SALES_REP_NUMBER,SALES_PARTICIPANT_KEY ) A INNER JOIN DIMS.SALES_TERR_HIERAR_AS_IS_MV B", "GOAL_CURR_CODE , A.START_DATE, A.END_DATE, A.STATUS_CODE, A.PARTICIPANT_LEVEL_CODE, SUBSTR(A.SALES_REP_TYPE_CODE,1,5) AS SALES_REP_TYPE_CODE, A.CURRENT_RECORD_FLAG,", "Formats start_date = \"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" log_date =strftime(\"%Y%m%d\", gmtime()) #", "2018-11-02 Initial creation # #************************************************************************************************************** # Importing required Lib from", "# # Initial Creation: # # Date (YYYY-MM-DD) Change Description", "method --> get_target_data_update() Ebi_read_write_obj.get_target_data_update(query,db_prop_key_load) end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" data_format = \"JOB", "\" + app_name +\" --> Job \"+app_name+\" Succeed \\n\") except", "A.ORGANIZATION_NAME AS SALES_REP_ORG, A.COMP_PLAN_TYPE_CODE, A.COMP_PLAN_TITLE, A.COMP_PLAN_CATEGORY_CODE, A.COMP_PLAN_DESCRIPTION, NULL AS GOAL_CURR_CODE", "from time import gmtime, strftime import cx_Oracle import py4j import", "'SALES_HIERARCHY' table based on stream lookups. # # # Initial", "1\"\"\" return query # Main method def main(): try: src_count", "time import gmtime, strftime import cx_Oracle import py4j import pyspark", "AS SALES_MULTI_AREA, B.AREA_DESCRIPTION AS SALES_AREA, B.MULTI_REGION_DESCRIPTION AS SALES_MULTI_REGION, SUBSTR(B.REGION_DESCRIPTION,1,50) AS", "= config['LOG_DIR_NAME'] + \"/\" + log_filename #SQL Query query =", "SALES_REGION, SUBSTR(B.DISTRICT_DESCRIPTION,1,50) AS SALES_DISTRICT, SUBSTR(B.TEAM_DESCRIPTION,1,50) AS SALES_TEAM, A.EMPLOYEE_ID, A.BK_SALES_REP_NUMBER AS", "start_spark from dependencies.EbiReadWrite import EbiReadWrite import logging import sys from", "from dependencies.spark import start_spark from dependencies.EbiReadWrite import EbiReadWrite import logging", "C.RECENT_HIRE_DATE AS LAST_HIRE_DATE FROM ( SELECT a.*,ROW_NUMBER() over (partition by", "SALES_TEAM, A.EMPLOYEE_ID, A.BK_SALES_REP_NUMBER AS SALES_REP_NUMBER, SUBSTR(A.EMP_SYS_LOGIN_ID,1,10) AS LOGIN_ID, SUBSTR(A.SALES_REP_NAME,1,50) AS", "db_schema +\"\"\".SALES_HIERARCHY (SALES_GEOGRAPHY, SALES_MULTI_AREA, SALES_AREA, SALES_MULTI_REGION, SALES_REGION, SALES_DISTRICT, SALES_TEAM, EMPLOYEE_ID,", "\"SCH1101.SH\" app_name = \"JB_SALES_HIERARCHY_FLAG_N_SR\" log_filename = app_name + '_' +", "sys from time import gmtime, strftime import cx_Oracle import py4j", "FROM DIMS.SALES_PARTICIPANT WHERE CURRENT_RECORD_FLAG = 'Y') AND PARTICIPANT_LEVEL_CODE = 'SR'", "logger.error(\"\\n __main__ \"+ app_name +\" --> Exception-Traceback :: \" +", "DISTINCT BK_SALES_REP_NUMBER FROM DIMS.SALES_PARTICIPANT WHERE CURRENT_RECORD_FLAG = 'Y') AND PARTICIPANT_LEVEL_CODE", "CURRENT_RECORD_FLAG = 'Y') AND PARTICIPANT_LEVEL_CODE = 'SR' ORDER BY BK_SALES_REP_NUMBER,SALES_PARTICIPANT_KEY", "or console end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" data_format = \"JOB START DT", "SALES_AREA, SALES_MULTI_REGION, SALES_REGION, SALES_DISTRICT, SALES_TEAM, EMPLOYEE_ID, SALES_REP_NUMBER, LOGIN_ID, SALES_REP_NAME, SALES_REP_ORG,", "'Y') AND PARTICIPANT_LEVEL_CODE = 'SR' ORDER BY BK_SALES_REP_NUMBER,SALES_PARTICIPANT_KEY ) A", "= EbiReadWrite(app_name,spark,config,logger) # DB prop Key of Source DB db_prop_key_load", "Succeed \\n\") except Exception as err: # Write expeption in", "AS SALES_GEOGRAPHY, B.MULTI_AREA_DESCRIPTION AS SALES_MULTI_AREA, B.AREA_DESCRIPTION AS SALES_AREA, B.MULTI_REGION_DESCRIPTION AS", "\" + str(err)) raise # Entry point for script if", "A.END_DATE, A.STATUS_CODE, A.PARTICIPANT_LEVEL_CODE, SUBSTR(A.SALES_REP_TYPE_CODE,1,5) AS SALES_REP_TYPE_CODE, A.CURRENT_RECORD_FLAG, C.RECENT_HIRE_DATE AS LAST_HIRE_DATE", "Job \"+app_name+\" Succeed \\n\") except Exception as err: # Write", "NOT IN (SELECT DISTINCT BK_SALES_REP_NUMBER FROM DIMS.SALES_PARTICIPANT WHERE CURRENT_RECORD_FLAG =", "FROM DIMS.WORKER_DETAIL WHERE CURRENT_RECORD_IND = 1 ) C ON C.BK_EMPLOYEE_ID", "\"+start_date+\" | SCRIPT NAME : \"+script_name+\" | JOB : \"+app_name+\"", "table def query_data(db_schema): query = \"\"\"INSERT INTO \"\"\"+ db_schema +\"\"\".SALES_HIERARCHY", "START_DATE, END_DATE, STATUS_CODE, PARTICIPANT_LEVEL_CODE, SALES_REP_TYPE_CODE, CURRENT_RECORD_FLAG, LAST_HIRE_DATE) SELECT B.WW_DIRECT_GEO_DESCRIPTION AS", "A.COMP_PLAN_TITLE, A.COMP_PLAN_CATEGORY_CODE, A.COMP_PLAN_DESCRIPTION, NULL AS GOAL_CURR_CODE , A.START_DATE, A.END_DATE, A.STATUS_CODE,", "# Write expeption in spark log or console end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\",", "strftime import cx_Oracle import py4j import pyspark # Spark logging", "----------------- ------------------ # 2018-11-02 Initial creation # #************************************************************************************************************** # Importing", "= \"JB_SALES_HIERARCHY_FLAG_N_SR\" log_filename = app_name + '_' + log_date +", "= start_spark( app_name=app_name) # Create class Object Ebi_read_write_obj = EbiReadWrite(app_name,spark,config,logger)", "SALES_REP_TYPE_CODE, CURRENT_RECORD_FLAG, LAST_HIRE_DATE) SELECT B.WW_DIRECT_GEO_DESCRIPTION AS SALES_GEOGRAPHY, B.MULTI_AREA_DESCRIPTION AS SALES_MULTI_AREA,", "db_schema = config['DB_SCHEMA'] log_file = config['LOG_DIR_NAME'] + \"/\" + log_filename", "A.STATUS_CODE, A.PARTICIPANT_LEVEL_CODE, SUBSTR(A.SALES_REP_TYPE_CODE,1,5) AS SALES_REP_TYPE_CODE, A.CURRENT_RECORD_FLAG, C.RECENT_HIRE_DATE AS LAST_HIRE_DATE FROM", "\"\"\"+ db_schema +\"\"\".SALES_HIERARCHY (SALES_GEOGRAPHY, SALES_MULTI_AREA, SALES_AREA, SALES_MULTI_REGION, SALES_REGION, SALES_DISTRICT, SALES_TEAM,", "point for script if __name__ == \"__main__\": # Calling main()", "err: # Write expeption in spark log or console end_date=\"'\"+strftime(\"%Y-%m-%d", "+ \"/\" + log_filename #SQL Query query = query_data(db_schema) #", "str(err)) raise # Entry point for script if __name__ ==", "SUBSTR(B.REGION_DESCRIPTION,1,50) AS SALES_REGION, SUBSTR(B.DISTRICT_DESCRIPTION,1,50) AS SALES_DISTRICT, SUBSTR(B.TEAM_DESCRIPTION,1,50) AS SALES_TEAM, A.EMPLOYEE_ID,", "LAST_HIRE_DATE FROM ( SELECT a.*,ROW_NUMBER() over (partition by BK_SALES_REP_NUMBER ORDER", "NAME : \"+script_name+\" | JOB : \"+app_name+\" | SRC COUNT", "B.TERRITORY_KEY = A.TERRITORY_KEY LEFT OUTER JOIN (SELECT LTRIM(BK_EMPLOYEE_ID,'0') BK_EMPLOYEE_ID,RECENT_HIRE_DATE FROM", "query # Main method def main(): try: src_count = '0'", "table based on stream lookups. # # # Initial Creation:", "| TGT COUNT : \"+dest_count+\" | JOB END DT :", "\"+app_name+\" Succeed \\n\") except Exception as err: # Write expeption", "JB_SALES_HIERARCHY_FLAG_N_SR.py #************************************************************************************************************** # # Created by : bibin # Version", "and config spark, config = start_spark( app_name=app_name) # Create class", "SCH1101.sh --> JB_SALES_HIERARCHY_FLAG_N_SR.py #************************************************************************************************************** # # Created by : bibin", "Importing required Lib from dependencies.spark import start_spark from dependencies.EbiReadWrite import", "gmtime()) # Job Naming Details script_name = \"SCH1101.SH\" app_name =", "DT : \"+end_date+\" | STATUS : %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"[Error] Failed\")", "by : bibin # Version : 1.0 # # Description", "import start_spark from dependencies.EbiReadWrite import EbiReadWrite import logging import sys", "spark log or console end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" data_format = \"JOB", ": 1.0 # # Description : # 1. This script", "A.CURRENT_RECORD_FLAG, C.RECENT_HIRE_DATE AS LAST_HIRE_DATE FROM ( SELECT a.*,ROW_NUMBER() over (partition", "DIMS.SALES_TERR_HIERAR_AS_IS_MV B ON B.TERRITORY_KEY = A.TERRITORY_KEY LEFT OUTER JOIN (SELECT", "AS SALES_REP_ORG, A.COMP_PLAN_TYPE_CODE, A.COMP_PLAN_TITLE, A.COMP_PLAN_CATEGORY_CODE, A.COMP_PLAN_DESCRIPTION, NULL AS GOAL_CURR_CODE ,", "SCRIPT NAME : \"+script_name+\" | JOB : \"+app_name+\" | SRC", "# # Created by : bibin # Version : 1.0", "\"+script_name+\" | JOB : \"+app_name+\" | SRC COUNT : \"+src_count+\"", "# start Spark application and get Spark session, logger and", "TGT COUNT : \"+dest_count+\" | JOB END DT : \"+end_date+\"", "= 1 ) C ON C.BK_EMPLOYEE_ID = A.EMPLOYEE_ID WHERE RANK", "app_name +\" --> Exception-Traceback :: \" + str(err)) raise #", "JOIN (SELECT LTRIM(BK_EMPLOYEE_ID,'0') BK_EMPLOYEE_ID,RECENT_HIRE_DATE FROM DIMS.WORKER_DETAIL WHERE CURRENT_RECORD_IND = 1", "#************************************************************************************************************** # Importing required Lib from dependencies.spark import start_spark from", "+ '.log' # Query for loading invoice table def query_data(db_schema):", "log_file = config['LOG_DIR_NAME'] + \"/\" + log_filename #SQL Query query", "dependencies.spark import start_spark from dependencies.EbiReadWrite import EbiReadWrite import logging import", "LTRIM(BK_EMPLOYEE_ID,'0') BK_EMPLOYEE_ID,RECENT_HIRE_DATE FROM DIMS.WORKER_DETAIL WHERE CURRENT_RECORD_IND = 1 ) C", "A.PARTICIPANT_LEVEL_CODE, SUBSTR(A.SALES_REP_TYPE_CODE,1,5) AS SALES_REP_TYPE_CODE, A.CURRENT_RECORD_FLAG, C.RECENT_HIRE_DATE AS LAST_HIRE_DATE FROM (", "FROM ( SELECT a.*,ROW_NUMBER() over (partition by BK_SALES_REP_NUMBER ORDER BY", "JOIN DIMS.SALES_TERR_HIERAR_AS_IS_MV B ON B.TERRITORY_KEY = A.TERRITORY_KEY LEFT OUTER JOIN", "Naming Details script_name = \"SCH1101.SH\" app_name = \"JB_SALES_HIERARCHY_FLAG_N_SR\" log_filename =", "# Job Naming Details script_name = \"SCH1101.SH\" app_name = \"JB_SALES_HIERARCHY_FLAG_N_SR\"", "SALES_REP_NAME, SALES_REP_ORG, COMP_PLAN_TYPE_CODE, COMP_PLAN_TITLE, COMP_PLAN_CATEGORY_CODE, COMP_PLAN_DESCRIPTION, GOAL_CURR_CODE, START_DATE, END_DATE, STATUS_CODE,", "based on stream lookups. # # # Initial Creation: #", "Spark logging logger = logging.getLogger(__name__) # Date Formats start_date =", "invoice table def query_data(db_schema): query = \"\"\"INSERT INTO \"\"\"+ db_schema", "SALES_REP_NUMBER, LOGIN_ID, SALES_REP_NAME, SALES_REP_ORG, COMP_PLAN_TYPE_CODE, COMP_PLAN_TITLE, COMP_PLAN_CATEGORY_CODE, COMP_PLAN_DESCRIPTION, GOAL_CURR_CODE, START_DATE,", "%(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"[Error] Failed\") Ebi_read_write_obj.job_debugger_print(\" \\n Job \"+app_name+\" Failed\\n\") logger.error(\"\\n", "SALES_REP_TYPE_CODE, A.CURRENT_RECORD_FLAG, C.RECENT_HIRE_DATE AS LAST_HIRE_DATE FROM ( SELECT a.*,ROW_NUMBER() over", "AS SALES_TEAM, A.EMPLOYEE_ID, A.BK_SALES_REP_NUMBER AS SALES_REP_NUMBER, SUBSTR(A.EMP_SYS_LOGIN_ID,1,10) AS LOGIN_ID, SUBSTR(A.SALES_REP_NAME,1,50)", "Description : # 1. This script will load the data", "AS SALES_REGION, SUBSTR(B.DISTRICT_DESCRIPTION,1,50) AS SALES_DISTRICT, SUBSTR(B.TEAM_DESCRIPTION,1,50) AS SALES_TEAM, A.EMPLOYEE_ID, A.BK_SALES_REP_NUMBER", "as err: # Write expeption in spark log or console", "Exception-Traceback :: \" + str(err)) raise # Entry point for", "AS LOGIN_ID, SUBSTR(A.SALES_REP_NAME,1,50) AS SALES_REP_NAME, A.ORGANIZATION_NAME AS SALES_REP_ORG, A.COMP_PLAN_TYPE_CODE, A.COMP_PLAN_TITLE,", "logger.info(\"Success\") Ebi_read_write_obj.job_debugger_print(\" \\n __main__ \" + app_name +\" --> Job", "Ebi_read_write_obj.job_debugger_print(\" \\n __main__ \" + app_name +\" --> Job \"+app_name+\"", "LEFT OUTER JOIN (SELECT LTRIM(BK_EMPLOYEE_ID,'0') BK_EMPLOYEE_ID,RECENT_HIRE_DATE FROM DIMS.WORKER_DETAIL WHERE CURRENT_RECORD_IND", ": \"+end_date+\" | STATUS : %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"Success\") Ebi_read_write_obj.job_debugger_print(\" \\n", "SELECT B.WW_DIRECT_GEO_DESCRIPTION AS SALES_GEOGRAPHY, B.MULTI_AREA_DESCRIPTION AS SALES_MULTI_AREA, B.AREA_DESCRIPTION AS SALES_AREA,", "expeption in spark log or console end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" data_format", "Query for loading invoice table def query_data(db_schema): query = \"\"\"INSERT", "Write expeption in spark log or console end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\"", ": %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"Success\") Ebi_read_write_obj.job_debugger_print(\" \\n __main__ \" + app_name", "= app_name + '_' + log_date + '.log' # Query", "get_target_data_update() Ebi_read_write_obj.get_target_data_update(query,db_prop_key_load) end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" data_format = \"JOB START DT", "logger.info(\"[Error] Failed\") Ebi_read_write_obj.job_debugger_print(\" \\n Job \"+app_name+\" Failed\\n\") logger.error(\"\\n __main__ \"+", "DB db_prop_key_load = config['DB_PROP_KEY_LOAD'] db_prop_key_extract = config['DB_PROP_KEY_EXTRACT'] db_schema = config['DB_SCHEMA']", "+\" --> Exception-Traceback :: \" + str(err)) raise # Entry", "Job Class method --> get_target_data_update() Ebi_read_write_obj.get_target_data_update(query,db_prop_key_load) end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" data_format", "for script if __name__ == \"__main__\": # Calling main() method", "ON C.BK_EMPLOYEE_ID = A.EMPLOYEE_ID WHERE RANK = 1\"\"\" return query", "SALES_REP_NUMBER, SUBSTR(A.EMP_SYS_LOGIN_ID,1,10) AS LOGIN_ID, SUBSTR(A.SALES_REP_NAME,1,50) AS SALES_REP_NAME, A.ORGANIZATION_NAME AS SALES_REP_ORG,", "prop Key of Source DB db_prop_key_load = config['DB_PROP_KEY_LOAD'] db_prop_key_extract =", "= '0' dest_count = '0' # start Spark application and", "will load the data into 'SALES_HIERARCHY' table based on stream", "config['DB_PROP_KEY_LOAD'] db_prop_key_extract = config['DB_PROP_KEY_EXTRACT'] db_schema = config['DB_SCHEMA'] log_file = config['LOG_DIR_NAME']", "= config['DB_PROP_KEY_EXTRACT'] db_schema = config['DB_SCHEMA'] log_file = config['LOG_DIR_NAME'] + \"/\"", "loading invoice table def query_data(db_schema): query = \"\"\"INSERT INTO \"\"\"+", "LOGIN_ID, SALES_REP_NAME, SALES_REP_ORG, COMP_PLAN_TYPE_CODE, COMP_PLAN_TITLE, COMP_PLAN_CATEGORY_CODE, COMP_PLAN_DESCRIPTION, GOAL_CURR_CODE, START_DATE, END_DATE,", "script if __name__ == \"__main__\": # Calling main() method main()", "= '0' # start Spark application and get Spark session,", "import sys from time import gmtime, strftime import cx_Oracle import", "of Source DB db_prop_key_load = config['DB_PROP_KEY_LOAD'] db_prop_key_extract = config['DB_PROP_KEY_EXTRACT'] db_schema", "application and get Spark session, logger and config spark, config", "PARTICIPANT_LEVEL_CODE, SALES_REP_TYPE_CODE, CURRENT_RECORD_FLAG, LAST_HIRE_DATE) SELECT B.WW_DIRECT_GEO_DESCRIPTION AS SALES_GEOGRAPHY, B.MULTI_AREA_DESCRIPTION AS", "pyspark # Spark logging logger = logging.getLogger(__name__) # Date Formats", "Created by : bibin # Version : 1.0 # #", "# Create class Object Ebi_read_write_obj = EbiReadWrite(app_name,spark,config,logger) # DB prop", "logging.getLogger(__name__) # Date Formats start_date = \"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" log_date", ": \"+start_date+\" | SCRIPT NAME : \"+script_name+\" | JOB :", "END DT : \"+end_date+\" | STATUS : %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"[Error]", "query = \"\"\"INSERT INTO \"\"\"+ db_schema +\"\"\".SALES_HIERARCHY (SALES_GEOGRAPHY, SALES_MULTI_AREA, SALES_AREA,", "--> Job \"+app_name+\" Succeed \\n\") except Exception as err: #", "lookups. # # # Initial Creation: # # Date (YYYY-MM-DD)", "'.log' # Query for loading invoice table def query_data(db_schema): query", "import cx_Oracle import py4j import pyspark # Spark logging logger", ": \"+app_name+\" | SRC COUNT : \"+src_count+\" | TGT COUNT", "dest_count = '0' # start Spark application and get Spark", "= \"SCH1101.SH\" app_name = \"JB_SALES_HIERARCHY_FLAG_N_SR\" log_filename = app_name + '_'", "Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"[Error] Failed\") Ebi_read_write_obj.job_debugger_print(\" \\n Job \"+app_name+\" Failed\\n\") logger.error(\"\\n __main__", "Class method --> get_target_data_update() Ebi_read_write_obj.get_target_data_update(query,db_prop_key_load) end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" data_format =", "DIMS.WORKER_DETAIL WHERE CURRENT_RECORD_IND = 1 ) C ON C.BK_EMPLOYEE_ID =", "Job Naming Details script_name = \"SCH1101.SH\" app_name = \"JB_SALES_HIERARCHY_FLAG_N_SR\" log_filename", "config = start_spark( app_name=app_name) # Create class Object Ebi_read_write_obj =", "src_count = '0' dest_count = '0' # start Spark application", "AND PARTICIPANT_LEVEL_CODE = 'SR' ORDER BY BK_SALES_REP_NUMBER,SALES_PARTICIPANT_KEY ) A INNER", "ORDER BY END_DATE desc) as RANK FROM DIMS.SALES_PARTICIPANT a WHERE", "creation # #************************************************************************************************************** # Importing required Lib from dependencies.spark import", "CURRENT_RECORD_FLAG, LAST_HIRE_DATE) SELECT B.WW_DIRECT_GEO_DESCRIPTION AS SALES_GEOGRAPHY, B.MULTI_AREA_DESCRIPTION AS SALES_MULTI_AREA, B.AREA_DESCRIPTION", "'0' # start Spark application and get Spark session, logger", "def query_data(db_schema): query = \"\"\"INSERT INTO \"\"\"+ db_schema +\"\"\".SALES_HIERARCHY (SALES_GEOGRAPHY,", "gmtime())+\"'\" log_date =strftime(\"%Y%m%d\", gmtime()) # Job Naming Details script_name =", "A INNER JOIN DIMS.SALES_TERR_HIERAR_AS_IS_MV B ON B.TERRITORY_KEY = A.TERRITORY_KEY LEFT", "# Date (YYYY-MM-DD) Change Description # ----------------- ------------------ # 2018-11-02", "import gmtime, strftime import cx_Oracle import py4j import pyspark #", "COUNT : \"+src_count+\" | TGT COUNT : \"+dest_count+\" | JOB", "app_name + '_' + log_date + '.log' # Query for", "by BK_SALES_REP_NUMBER ORDER BY END_DATE desc) as RANK FROM DIMS.SALES_PARTICIPANT", "<reponame>bibinvasudev/EBI_Project # SCH1101.sh --> JB_SALES_HIERARCHY_FLAG_N_SR.py #************************************************************************************************************** # # Created by", "AS SALES_DISTRICT, SUBSTR(B.TEAM_DESCRIPTION,1,50) AS SALES_TEAM, A.EMPLOYEE_ID, A.BK_SALES_REP_NUMBER AS SALES_REP_NUMBER, SUBSTR(A.EMP_SYS_LOGIN_ID,1,10)", "main(): try: src_count = '0' dest_count = '0' # start", "\\n\") except Exception as err: # Write expeption in spark", "= A.TERRITORY_KEY LEFT OUTER JOIN (SELECT LTRIM(BK_EMPLOYEE_ID,'0') BK_EMPLOYEE_ID,RECENT_HIRE_DATE FROM DIMS.WORKER_DETAIL", "%(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"Success\") Ebi_read_write_obj.job_debugger_print(\" \\n __main__ \" + app_name +\"", "(SALES_GEOGRAPHY, SALES_MULTI_AREA, SALES_AREA, SALES_MULTI_REGION, SALES_REGION, SALES_DISTRICT, SALES_TEAM, EMPLOYEE_ID, SALES_REP_NUMBER, LOGIN_ID,", "LAST_HIRE_DATE) SELECT B.WW_DIRECT_GEO_DESCRIPTION AS SALES_GEOGRAPHY, B.MULTI_AREA_DESCRIPTION AS SALES_MULTI_AREA, B.AREA_DESCRIPTION AS", "| JOB : \"+app_name+\" | SRC COUNT : \"+src_count+\" |", "B.AREA_DESCRIPTION AS SALES_AREA, B.MULTI_REGION_DESCRIPTION AS SALES_MULTI_REGION, SUBSTR(B.REGION_DESCRIPTION,1,50) AS SALES_REGION, SUBSTR(B.DISTRICT_DESCRIPTION,1,50)", "= query_data(db_schema) # Calling Job Class method --> get_target_data_update() Ebi_read_write_obj.get_target_data_update(query,db_prop_key_load)", ": \"+script_name+\" | JOB : \"+app_name+\" | SRC COUNT :", ", A.START_DATE, A.END_DATE, A.STATUS_CODE, A.PARTICIPANT_LEVEL_CODE, SUBSTR(A.SALES_REP_TYPE_CODE,1,5) AS SALES_REP_TYPE_CODE, A.CURRENT_RECORD_FLAG, C.RECENT_HIRE_DATE", "log_date + '.log' # Query for loading invoice table def", "| SCRIPT NAME : \"+script_name+\" | JOB : \"+app_name+\" |", "into 'SALES_HIERARCHY' table based on stream lookups. # # #", "as RANK FROM DIMS.SALES_PARTICIPANT a WHERE BK_SALES_REP_NUMBER NOT IN (SELECT", "=strftime(\"%Y%m%d\", gmtime()) # Job Naming Details script_name = \"SCH1101.SH\" app_name", "SUBSTR(B.TEAM_DESCRIPTION,1,50) AS SALES_TEAM, A.EMPLOYEE_ID, A.BK_SALES_REP_NUMBER AS SALES_REP_NUMBER, SUBSTR(A.EMP_SYS_LOGIN_ID,1,10) AS LOGIN_ID,", "data_format = \"JOB START DT : \"+start_date+\" | SCRIPT NAME", "| STATUS : %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"[Error] Failed\") Ebi_read_write_obj.job_debugger_print(\" \\n Job", "SUBSTR(B.DISTRICT_DESCRIPTION,1,50) AS SALES_DISTRICT, SUBSTR(B.TEAM_DESCRIPTION,1,50) AS SALES_TEAM, A.EMPLOYEE_ID, A.BK_SALES_REP_NUMBER AS SALES_REP_NUMBER,", "END_DATE desc) as RANK FROM DIMS.SALES_PARTICIPANT a WHERE BK_SALES_REP_NUMBER NOT", "# # Date (YYYY-MM-DD) Change Description # ----------------- ------------------ #", "Job \"+app_name+\" Failed\\n\") logger.error(\"\\n __main__ \"+ app_name +\" --> Exception-Traceback", "method def main(): try: src_count = '0' dest_count = '0'", "py4j import pyspark # Spark logging logger = logging.getLogger(__name__) #", "1. This script will load the data into 'SALES_HIERARCHY' table", "SELECT a.*,ROW_NUMBER() over (partition by BK_SALES_REP_NUMBER ORDER BY END_DATE desc)", "WHERE BK_SALES_REP_NUMBER NOT IN (SELECT DISTINCT BK_SALES_REP_NUMBER FROM DIMS.SALES_PARTICIPANT WHERE", "BY BK_SALES_REP_NUMBER,SALES_PARTICIPANT_KEY ) A INNER JOIN DIMS.SALES_TERR_HIERAR_AS_IS_MV B ON B.TERRITORY_KEY", "ON B.TERRITORY_KEY = A.TERRITORY_KEY LEFT OUTER JOIN (SELECT LTRIM(BK_EMPLOYEE_ID,'0') BK_EMPLOYEE_ID,RECENT_HIRE_DATE", "# Main method def main(): try: src_count = '0' dest_count", "1.0 # # Description : # 1. This script will", "script will load the data into 'SALES_HIERARCHY' table based on", "WHERE RANK = 1\"\"\" return query # Main method def", "script_name = \"SCH1101.SH\" app_name = \"JB_SALES_HIERARCHY_FLAG_N_SR\" log_filename = app_name +", "= logging.getLogger(__name__) # Date Formats start_date = \"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\"", "query = query_data(db_schema) # Calling Job Class method --> get_target_data_update()", "A.COMP_PLAN_TYPE_CODE, A.COMP_PLAN_TITLE, A.COMP_PLAN_CATEGORY_CODE, A.COMP_PLAN_DESCRIPTION, NULL AS GOAL_CURR_CODE , A.START_DATE, A.END_DATE,", "__main__ \" + app_name +\" --> Job \"+app_name+\" Succeed \\n\")", "Failed\\n\") logger.error(\"\\n __main__ \"+ app_name +\" --> Exception-Traceback :: \"", "| STATUS : %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"Success\") Ebi_read_write_obj.job_debugger_print(\" \\n __main__ \"", "app_name=app_name) # Create class Object Ebi_read_write_obj = EbiReadWrite(app_name,spark,config,logger) # DB", "COMP_PLAN_TITLE, COMP_PLAN_CATEGORY_CODE, COMP_PLAN_DESCRIPTION, GOAL_CURR_CODE, START_DATE, END_DATE, STATUS_CODE, PARTICIPANT_LEVEL_CODE, SALES_REP_TYPE_CODE, CURRENT_RECORD_FLAG,", "RANK = 1\"\"\" return query # Main method def main():", "COMP_PLAN_CATEGORY_CODE, COMP_PLAN_DESCRIPTION, GOAL_CURR_CODE, START_DATE, END_DATE, STATUS_CODE, PARTICIPANT_LEVEL_CODE, SALES_REP_TYPE_CODE, CURRENT_RECORD_FLAG, LAST_HIRE_DATE)", "--> get_target_data_update() Ebi_read_write_obj.get_target_data_update(query,db_prop_key_load) end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" data_format = \"JOB START", "config['LOG_DIR_NAME'] + \"/\" + log_filename #SQL Query query = query_data(db_schema)", "BK_SALES_REP_NUMBER NOT IN (SELECT DISTINCT BK_SALES_REP_NUMBER FROM DIMS.SALES_PARTICIPANT WHERE CURRENT_RECORD_FLAG", "EMPLOYEE_ID, SALES_REP_NUMBER, LOGIN_ID, SALES_REP_NAME, SALES_REP_ORG, COMP_PLAN_TYPE_CODE, COMP_PLAN_TITLE, COMP_PLAN_CATEGORY_CODE, COMP_PLAN_DESCRIPTION, GOAL_CURR_CODE,", "app_name = \"JB_SALES_HIERARCHY_FLAG_N_SR\" log_filename = app_name + '_' + log_date", "SUBSTR(A.SALES_REP_TYPE_CODE,1,5) AS SALES_REP_TYPE_CODE, A.CURRENT_RECORD_FLAG, C.RECENT_HIRE_DATE AS LAST_HIRE_DATE FROM ( SELECT", ": %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"[Error] Failed\") Ebi_read_write_obj.job_debugger_print(\" \\n Job \"+app_name+\" Failed\\n\")", "# Query for loading invoice table def query_data(db_schema): query =", "SUBSTR(A.SALES_REP_NAME,1,50) AS SALES_REP_NAME, A.ORGANIZATION_NAME AS SALES_REP_ORG, A.COMP_PLAN_TYPE_CODE, A.COMP_PLAN_TITLE, A.COMP_PLAN_CATEGORY_CODE, A.COMP_PLAN_DESCRIPTION,", "SALES_MULTI_AREA, SALES_AREA, SALES_MULTI_REGION, SALES_REGION, SALES_DISTRICT, SALES_TEAM, EMPLOYEE_ID, SALES_REP_NUMBER, LOGIN_ID, SALES_REP_NAME,", "# Version : 1.0 # # Description : # 1.", "bibin # Version : 1.0 # # Description : #", "COMP_PLAN_TYPE_CODE, COMP_PLAN_TITLE, COMP_PLAN_CATEGORY_CODE, COMP_PLAN_DESCRIPTION, GOAL_CURR_CODE, START_DATE, END_DATE, STATUS_CODE, PARTICIPANT_LEVEL_CODE, SALES_REP_TYPE_CODE,", "query_data(db_schema) # Calling Job Class method --> get_target_data_update() Ebi_read_write_obj.get_target_data_update(query,db_prop_key_load) end_date=\"'\"+strftime(\"%Y-%m-%d", "= \"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" log_date =strftime(\"%Y%m%d\", gmtime()) # Job Naming", "DIMS.SALES_PARTICIPANT a WHERE BK_SALES_REP_NUMBER NOT IN (SELECT DISTINCT BK_SALES_REP_NUMBER FROM", "(SELECT LTRIM(BK_EMPLOYEE_ID,'0') BK_EMPLOYEE_ID,RECENT_HIRE_DATE FROM DIMS.WORKER_DETAIL WHERE CURRENT_RECORD_IND = 1 )", "Lib from dependencies.spark import start_spark from dependencies.EbiReadWrite import EbiReadWrite import", ": # 1. This script will load the data into", "logging logger = logging.getLogger(__name__) # Date Formats start_date = \"'\"+strftime(\"%Y-%m-%d", "C.BK_EMPLOYEE_ID = A.EMPLOYEE_ID WHERE RANK = 1\"\"\" return query #", ": \"+src_count+\" | TGT COUNT : \"+dest_count+\" | JOB END", "def main(): try: src_count = '0' dest_count = '0' #", "BK_SALES_REP_NUMBER ORDER BY END_DATE desc) as RANK FROM DIMS.SALES_PARTICIPANT a", "(SELECT DISTINCT BK_SALES_REP_NUMBER FROM DIMS.SALES_PARTICIPANT WHERE CURRENT_RECORD_FLAG = 'Y') AND", "SALES_TEAM, EMPLOYEE_ID, SALES_REP_NUMBER, LOGIN_ID, SALES_REP_NAME, SALES_REP_ORG, COMP_PLAN_TYPE_CODE, COMP_PLAN_TITLE, COMP_PLAN_CATEGORY_CODE, COMP_PLAN_DESCRIPTION,", "SALES_REP_ORG, COMP_PLAN_TYPE_CODE, COMP_PLAN_TITLE, COMP_PLAN_CATEGORY_CODE, COMP_PLAN_DESCRIPTION, GOAL_CURR_CODE, START_DATE, END_DATE, STATUS_CODE, PARTICIPANT_LEVEL_CODE,", "Description # ----------------- ------------------ # 2018-11-02 Initial creation # #**************************************************************************************************************", "# Calling Job Class method --> get_target_data_update() Ebi_read_write_obj.get_target_data_update(query,db_prop_key_load) end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\",", "config['DB_SCHEMA'] log_file = config['LOG_DIR_NAME'] + \"/\" + log_filename #SQL Query", "FROM DIMS.SALES_PARTICIPANT a WHERE BK_SALES_REP_NUMBER NOT IN (SELECT DISTINCT BK_SALES_REP_NUMBER", ":: \" + str(err)) raise # Entry point for script", "SALES_MULTI_REGION, SUBSTR(B.REGION_DESCRIPTION,1,50) AS SALES_REGION, SUBSTR(B.DISTRICT_DESCRIPTION,1,50) AS SALES_DISTRICT, SUBSTR(B.TEAM_DESCRIPTION,1,50) AS SALES_TEAM,", "on stream lookups. # # # Initial Creation: # #", "import EbiReadWrite import logging import sys from time import gmtime,", "dependencies.EbiReadWrite import EbiReadWrite import logging import sys from time import", "import py4j import pyspark # Spark logging logger = logging.getLogger(__name__)", "= config['DB_PROP_KEY_LOAD'] db_prop_key_extract = config['DB_PROP_KEY_EXTRACT'] db_schema = config['DB_SCHEMA'] log_file =", "'_' + log_date + '.log' # Query for loading invoice", ": \"+end_date+\" | STATUS : %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"[Error] Failed\") Ebi_read_write_obj.job_debugger_print(\"", "# #************************************************************************************************************** # Importing required Lib from dependencies.spark import start_spark", "#SQL Query query = query_data(db_schema) # Calling Job Class method", "for loading invoice table def query_data(db_schema): query = \"\"\"INSERT INTO", "Exception as err: # Write expeption in spark log or", "Version : 1.0 # # Description : # 1. This", "Calling Job Class method --> get_target_data_update() Ebi_read_write_obj.get_target_data_update(query,db_prop_key_load) end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\"", "STATUS : %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"Success\") Ebi_read_write_obj.job_debugger_print(\" \\n __main__ \" +", "a WHERE BK_SALES_REP_NUMBER NOT IN (SELECT DISTINCT BK_SALES_REP_NUMBER FROM DIMS.SALES_PARTICIPANT", "query_data(db_schema): query = \"\"\"INSERT INTO \"\"\"+ db_schema +\"\"\".SALES_HIERARCHY (SALES_GEOGRAPHY, SALES_MULTI_AREA,", "# Created by : bibin # Version : 1.0 #", "= 1\"\"\" return query # Main method def main(): try:", "Key of Source DB db_prop_key_load = config['DB_PROP_KEY_LOAD'] db_prop_key_extract = config['DB_PROP_KEY_EXTRACT']", "import pyspark # Spark logging logger = logging.getLogger(__name__) # Date", "PARTICIPANT_LEVEL_CODE = 'SR' ORDER BY BK_SALES_REP_NUMBER,SALES_PARTICIPANT_KEY ) A INNER JOIN", "SALES_REP_NAME, A.ORGANIZATION_NAME AS SALES_REP_ORG, A.COMP_PLAN_TYPE_CODE, A.COMP_PLAN_TITLE, A.COMP_PLAN_CATEGORY_CODE, A.COMP_PLAN_DESCRIPTION, NULL AS", "stream lookups. # # # Initial Creation: # # Date", "Details script_name = \"SCH1101.SH\" app_name = \"JB_SALES_HIERARCHY_FLAG_N_SR\" log_filename = app_name", "Spark session, logger and config spark, config = start_spark( app_name=app_name)", "+ app_name +\" --> Job \"+app_name+\" Succeed \\n\") except Exception", "B.MULTI_REGION_DESCRIPTION AS SALES_MULTI_REGION, SUBSTR(B.REGION_DESCRIPTION,1,50) AS SALES_REGION, SUBSTR(B.DISTRICT_DESCRIPTION,1,50) AS SALES_DISTRICT, SUBSTR(B.TEAM_DESCRIPTION,1,50)", "STATUS_CODE, PARTICIPANT_LEVEL_CODE, SALES_REP_TYPE_CODE, CURRENT_RECORD_FLAG, LAST_HIRE_DATE) SELECT B.WW_DIRECT_GEO_DESCRIPTION AS SALES_GEOGRAPHY, B.MULTI_AREA_DESCRIPTION", "app_name +\" --> Job \"+app_name+\" Succeed \\n\") except Exception as", "( SELECT a.*,ROW_NUMBER() over (partition by BK_SALES_REP_NUMBER ORDER BY END_DATE", "cx_Oracle import py4j import pyspark # Spark logging logger =", "SALES_MULTI_REGION, SALES_REGION, SALES_DISTRICT, SALES_TEAM, EMPLOYEE_ID, SALES_REP_NUMBER, LOGIN_ID, SALES_REP_NAME, SALES_REP_ORG, COMP_PLAN_TYPE_CODE,", "LOGIN_ID, SUBSTR(A.SALES_REP_NAME,1,50) AS SALES_REP_NAME, A.ORGANIZATION_NAME AS SALES_REP_ORG, A.COMP_PLAN_TYPE_CODE, A.COMP_PLAN_TITLE, A.COMP_PLAN_CATEGORY_CODE,", "Source DB db_prop_key_load = config['DB_PROP_KEY_LOAD'] db_prop_key_extract = config['DB_PROP_KEY_EXTRACT'] db_schema =", "desc) as RANK FROM DIMS.SALES_PARTICIPANT a WHERE BK_SALES_REP_NUMBER NOT IN", "NULL AS GOAL_CURR_CODE , A.START_DATE, A.END_DATE, A.STATUS_CODE, A.PARTICIPANT_LEVEL_CODE, SUBSTR(A.SALES_REP_TYPE_CODE,1,5) AS", "# 1. This script will load the data into 'SALES_HIERARCHY'", "end_date=\"'\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())+\"'\" data_format = \"JOB START DT : \"+start_date+\"", "JOB END DT : \"+end_date+\" | STATUS : %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger)", "try: src_count = '0' dest_count = '0' # start Spark", "\"+end_date+\" | STATUS : %(message)s\" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info(\"Success\") Ebi_read_write_obj.job_debugger_print(\" \\n __main__", "BK_SALES_REP_NUMBER,SALES_PARTICIPANT_KEY ) A INNER JOIN DIMS.SALES_TERR_HIERAR_AS_IS_MV B ON B.TERRITORY_KEY =", "__main__ \"+ app_name +\" --> Exception-Traceback :: \" + str(err))", "\"\"\"INSERT INTO \"\"\"+ db_schema +\"\"\".SALES_HIERARCHY (SALES_GEOGRAPHY, SALES_MULTI_AREA, SALES_AREA, SALES_MULTI_REGION, SALES_REGION,", "from dependencies.EbiReadWrite import EbiReadWrite import logging import sys from time", "SALES_AREA, B.MULTI_REGION_DESCRIPTION AS SALES_MULTI_REGION, SUBSTR(B.REGION_DESCRIPTION,1,50) AS SALES_REGION, SUBSTR(B.DISTRICT_DESCRIPTION,1,50) AS SALES_DISTRICT,", "DT : \"+start_date+\" | SCRIPT NAME : \"+script_name+\" | JOB", "+ '_' + log_date + '.log' # Query for loading" ]
[ "def encodeTime(t): ret = '' for i in t[:-3]: si", "= int(i / 4294967296) low = i - high return", "i in t[:-3]: si = str(i) if len(si) < 2:", "# MythTV's standard representation used on filenames def encodeTime(t): ret", "if low < 0: low += 4294967296 if high <", "< 2: ret += si.zfill(2) else: ret += si return", "encodeLongLong(i): high = int(i / 4294967296) low = i -", "i - high return high, low def parseOk(str): if str", "converts it to # MythTV's standard representation used on filenames", "si = str(i) if len(si) < 2: ret += si.zfill(2)", "<< 32 low = int(lst[1]) if low < 0: low", "pass # t is a nine item tuple returned by", "repr(lst[i]) pass # t is a nine item tuple returned", "low = i - high return high, low def parseOk(str):", "ret = '' for i in t[:-3]: si = str(i)", "printList(lst): #for i in range(len(lst)): # print i, '\\t', repr(lst[i])", "low def encodeLongLong(i): high = int(i / 4294967296) low =", "return high + low def encodeLongLong(i): high = int(i /", "int(lst[1]) if low < 0: low += 4294967296 if high", "low = int(lst[1]) if low < 0: low += 4294967296", "4294967296 return high + low def encodeLongLong(i): high = int(i", "else: return False def printList(lst): #for i in range(len(lst)): #", "for i in t[:-3]: si = str(i) if len(si) <", "used on filenames def encodeTime(t): ret = '' for i", "+ low def encodeLongLong(i): high = int(i / 4294967296) low", "32 low = int(lst[1]) if low < 0: low +=", "i in range(len(lst)): # print i, '\\t', repr(lst[i]) pass #", "'' for i in t[:-3]: si = str(i) if len(si)", "high, low def parseOk(str): if str == 'ok': return True", "str == 'ok': return True else: return False def printList(lst):", "filenames def encodeTime(t): ret = '' for i in t[:-3]:", "# print i, '\\t', repr(lst[i]) pass # t is a", "0: high += 4294967296 return high + low def encodeLongLong(i):", "in range(len(lst)): # print i, '\\t', repr(lst[i]) pass # t", "= int(lst[1]) if low < 0: low += 4294967296 if", "#for i in range(len(lst)): # print i, '\\t', repr(lst[i]) pass", "def parseOk(str): if str == 'ok': return True else: return", "t[:-3]: si = str(i) if len(si) < 2: ret +=", "+= 4294967296 return high + low def encodeLongLong(i): high =", "def printList(lst): #for i in range(len(lst)): # print i, '\\t',", "4294967296) low = i - high return high, low def", "len(si) < 2: ret += si.zfill(2) else: ret += si", "- high return high, low def parseOk(str): if str ==", "return False def printList(lst): #for i in range(len(lst)): # print", "in t[:-3]: si = str(i) if len(si) < 2: ret", "is a nine item tuple returned by the time module.", "print i, '\\t', repr(lst[i]) pass # t is a nine", "/ 4294967296) low = i - high return high, low", "= int(lst[0]) << 32 low = int(lst[1]) if low <", "high = int(i / 4294967296) low = i - high", "i, '\\t', repr(lst[i]) pass # t is a nine item", "2: ret += si.zfill(2) else: ret += si return ret", "MythTV's standard representation used on filenames def encodeTime(t): ret =", "'ok': return True else: return False def printList(lst): #for i", "= i - high return high, low def parseOk(str): if", "str(i) if len(si) < 2: ret += si.zfill(2) else: ret", "'\\t', repr(lst[i]) pass # t is a nine item tuple", "on filenames def encodeTime(t): ret = '' for i in", "high return high, low def parseOk(str): if str == 'ok':", "representation used on filenames def encodeTime(t): ret = '' for", "0: low += 4294967296 if high < 0: high +=", "= str(i) if len(si) < 2: ret += si.zfill(2) else:", "encodeTime(t): ret = '' for i in t[:-3]: si =", "This method converts it to # MythTV's standard representation used", "returned by the time module. This method converts it to", "time module. This method converts it to # MythTV's standard", "the time module. This method converts it to # MythTV's", "if len(si) < 2: ret += si.zfill(2) else: ret +=", "if high < 0: high += 4294967296 return high +", "decodeLongLong(lst): high = int(lst[0]) << 32 low = int(lst[1]) if", "return high, low def parseOk(str): if str == 'ok': return", "low def parseOk(str): if str == 'ok': return True else:", "True else: return False def printList(lst): #for i in range(len(lst)):", "range(len(lst)): # print i, '\\t', repr(lst[i]) pass # t is", "method converts it to # MythTV's standard representation used on", "high < 0: high += 4294967296 return high + low", "low += 4294967296 if high < 0: high += 4294967296", "= '' for i in t[:-3]: si = str(i) if", "4294967296 if high < 0: high += 4294967296 return high", "high += 4294967296 return high + low def encodeLongLong(i): high", "nine item tuple returned by the time module. This method", "return True else: return False def printList(lst): #for i in", "< 0: high += 4294967296 return high + low def", "int(i / 4294967296) low = i - high return high,", "def encodeLongLong(i): high = int(i / 4294967296) low = i", "parseOk(str): if str == 'ok': return True else: return False", "module. This method converts it to # MythTV's standard representation", "if str == 'ok': return True else: return False def", "to # MythTV's standard representation used on filenames def encodeTime(t):", "== 'ok': return True else: return False def printList(lst): #for", "low < 0: low += 4294967296 if high < 0:", "tuple returned by the time module. This method converts it", "it to # MythTV's standard representation used on filenames def", "standard representation used on filenames def encodeTime(t): ret = ''", "by the time module. This method converts it to #", "< 0: low += 4294967296 if high < 0: high", "a nine item tuple returned by the time module. This", "+= 4294967296 if high < 0: high += 4294967296 return", "high = int(lst[0]) << 32 low = int(lst[1]) if low", "high + low def encodeLongLong(i): high = int(i / 4294967296)", "# t is a nine item tuple returned by the", "item tuple returned by the time module. This method converts", "def decodeLongLong(lst): high = int(lst[0]) << 32 low = int(lst[1])", "int(lst[0]) << 32 low = int(lst[1]) if low < 0:", "t is a nine item tuple returned by the time", "False def printList(lst): #for i in range(len(lst)): # print i," ]
[ "finished!\") # Unfrag the file subprocess.run([\"ffmpeg\", '-i', temp_path, '-c:v', 'copy',", "all visual methods need detection images vid=cv2.VideoCapture(media_file) ok=True frame =", "new track objects based on the result # from the", "\" f\"something other than 'No'!\") continue media_shape = (media.height, media.width)", "TODO: Handle is_cut? def join_up_final(detections, track_ids): tracklets = defaultdict(list) num_tracklets", "'iou-global-motion'] # Weight methods that require the video visual_methods =", "{media.id}, name {media.name} due to \" f\"'Tracklet Generator Processed' attribute", "name in names[1:-1]: module = getattr(module,name) classify_function = getattr(module,names[-1]) print(\"Strategy:", "= argparse.ArgumentParser(description=__doc__) tator.get_parser(parser) parser.add_argument(\"--detection-type-id\", type=int, required=True) parser.add_argument(\"--tracklet-type-id\", type=int, required=True) parser.add_argument(\"--version-id\",", "numpy as np from openem.tracking import * import json import", "track_ids): tracklets[tid].append(d) return tracklets def extend_tracklets(tracklets, length): for track_id,track in", "x:x['frame']) def restore_det(det): det['x'] = det.get('orig_x',det['x']) det['y'] = det.get('orig_y',det['y']) det['width']", "media {media_file}\", flush=True) continue print(f\"Processing {len(localizations)} detections\", flush=True) # Group", "detections, track_ids = trim_tracklets(detections, track_ids, trim_to) _,det_counts_per_track=np.unique(track_ids,return_counts=True) print(f\"frame-diff {x}: {len(detections)}", "trim_tracklets(detections, track_ids, trim_to) _,det_counts_per_track=np.unique(track_ids,return_counts=True) print(f\"frame-diff {x}: {len(detections)} to {len(det_counts_per_track)}\", flush=True)", "print(f\"Trimming track to max length of {trim_to}\") detections, track_ids =", "'iou-global-motion'] api = tator.get_api(args.host, args.token) detection_type = api.get_localization_type(args.detection_type_id) project =", "weights_strategy = HybridWeights(comparator, None, None, media_shape, fps, 0.0, batch_size) elif", "localizations for x in strategy['frame-diffs']: print(f\"Started {x}\", flush=True) detections, track_ids,", "pairs, weights, is_cut, constraints = join_tracklets( detections, track_ids, x, weights_strategy)", "= renumber_track_ids(track_ids) return detections, track_ids if __name__==\"__main__\": parser = argparse.ArgumentParser(description=__doc__)", "= strategy['args'].get('batch_size', 4) comparator=FeaturesComparator(model_file) #extractor=FeaturesExtractor(args.model_file) class_method = strategy.get('class-method',None) classify_function =", "= (media.height, media.width) fps = media.fps localizations_by_frame = {} localizations", "track.sort(key=lambda x:x['frame']) if classify_function: valid,attrs = classify_function(media.to_dict(), track, **classify_args) elif", "Now we make new track objects based on the result", "track: track_ids.append(track_id) detections.append(d) return detections,track_ids def trim_tracklets(detections, track_ids, max_length): tracklets", "max length of {trim_to}\") detections, track_ids = trim_tracklets(detections, track_ids, trim_to)", "renumber_track_ids(track_ids) if strategy['method'] == 'hybrid': weights_strategy = HybridWeights(comparator, None, None,", "localizations present in media {media_file}\", flush=True) continue print(f\"Processing {len(localizations)} detections\",", "track#,...] # [ 133, 33, 13, 133,] # [ 0,0,1,1]", "track[0]['width'] = min(max(abs(old_x-track[0]['x'])+avg_w,0),1) track[0]['height'] = min(max(abs(old_x-track[0]['y'])+avg_h,0),1) else: track[0]['width'] = 0", "openem.tracking import * import json import sys import datetime import", "in names[1:-1]: module = getattr(module,name) classify_function = getattr(module,names[-1]) print(\"Strategy: \",", "detection, ...] # [ track#, track#, track#,...] # [ 133,", "> 0 and min_y > 0: track[0]['x'] = min(max(0,min_x),1) track[0]['y']", "Handle is_cut? def join_up_final(detections, track_ids): tracklets = defaultdict(list) num_tracklets =", "on the result # from the graph solver # [", "restore_det(det): det['x'] = det.get('orig_x',det['x']) det['y'] = det.get('orig_y',det['y']) det['width'] = det.get('orig_w',det['width'])", "= {**default_strategy} with open(args.strategy_config, \"r\") as strategy_file: strategy.update(yaml.load(strategy_file)) else: strategy", "0: track[-1]['x'] = min_x track[-1]['y'] = min_y track[-1]['width'] = min(max(0,abs(new_x-track[-1]['x'])+avg_w),1)", "track[0]['x'] = min(max(0,min_x),1) track[0]['y'] = min(max(0,min_y),1) track[0]['width'] = min(max(abs(old_x-track[0]['x'])+avg_w,0),1) track[0]['height']", "import sys import datetime import tator from pprint import pprint", "+= 1 detections, track_ids = split_tracklets(new_tracklets) track_ids = renumber_track_ids(track_ids) return", "flush=True) pprint(strategy) print(args.media_files, flush=True) optional_fetch_args = {} if args.input_version_id: optional_fetch_args['version']", "> 0: track[0]['x'] = min(max(0,min_x),1) track[0]['y'] = min(max(0,min_y),1) track[0]['width'] =", "in localizations_by_frame[frame]: l['bgr'] = crop_localization(frame_bgr, l) if l['attributes']['Confidence'] < 0.50:", "return img_crop def join_up_iteration(detections, track_ids): tracklets = defaultdict(list) num_tracklets =", "by linear motion, {ext_frames}\") tracklets = join_up_iteration(detections,track_ids) tracklets = extend_tracklets(tracklets,", ">= strategy['min-length']: valid = True attrs = {} else: valid", "import math import subprocess import sys def crop_localization(frame_bgr, localization): img_width", "[ track#, track#, track#,...] # [ 133, 33, 13, 133,]", "def split_tracklets(tracklets): track_ids=[] detections=[] for track_id,track in tracklets.items(): for d", "process.\", flush=True) function_name = class_method.get('function',None) classify_args = class_method.get('args',None) names =", "def join_up_final(detections, track_ids): tracklets = defaultdict(list) num_tracklets = np.max(track_ids) +", "classify_function(media.to_dict(), track, **classify_args) elif len(track) >= strategy['min-length']: valid = True", "frame_detections: detections.append(det) track_ids.append(track_id) track_id += 1 print(\"Loaded all detections\", flush=True)", "detections, track_ids = split_tracklets(new_tracklets) track_ids = renumber_track_ids(track_ids) return detections, track_ids", "valid = False attrs = {} if valid: obj={\"type\": args.tracklet_type_id,", "# [ detection, detection, detection, ...] # [ track#, track#,", "{} if valid: obj={\"type\": args.tracklet_type_id, \"media_ids\": [int(media_id)], \"localization_ids\": [x['id'] for", "exist, download it. if strategy['method'] == 'iou-global-motion': if not os.path.exists(media_file):", "in new_objs if x is not None] print(f\"New objects =", "in enumerate(localizations): frame = local['frame'] if frame in localizations_by_frame: localizations_by_frame[frame].append(local)", "= local['frame'] if frame in localizations_by_frame: localizations_by_frame[frame].append(local) else: localizations_by_frame[frame] =", "= comps[1] avg_h = sum_h / len(track) avg_w = sum_w", "frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:] return img_crop def join_up_iteration(detections, track_ids): tracklets = defaultdict(list) num_tracklets", "True attrs = {} else: valid = False attrs =", "max_length): tracklets = join_up_iteration(detections, track_ids) next_track_id = 1 new_tracklets =", "# Not all visual methods need detection images vid=cv2.VideoCapture(media_file) ok=True", "Weight methods that require the video visual_methods = ['hybrid', 'iou-global-motion']", "detection_type = api.get_localization_type(args.detection_type_id) project = detection_type.project version_id = args.version_id default_strategy", "detection_type.project version_id = args.version_id default_strategy = {\"method\": \"hybrid\", \"frame-diffs\": [1,2,4,8,16,32,64,128,256],", "based on grouped localizations for x in strategy['frame-diffs']: print(f\"Started {x}\",", "for x in strategy['frame-diffs']: print(f\"Started {x}\", flush=True) detections, track_ids, pairs,", "= False attrs = {} if valid: obj={\"type\": args.tracklet_type_id, \"media_ids\":", "required=True) parser.add_argument(\"--version-id\", type=int) parser.add_argument(\"--input-version-id\", type=int) parser.add_argument(\"--strategy-config\", type=str) parser.add_argument(\"--dry-run\", action='store_true') parser.add_argument('media_files',", "frame in localizations_by_frame: localizations_by_frame[frame].append(local) else: localizations_by_frame[frame] = [local] detections=[] track_ids=[]", "weights, is_cut, constraints = join_tracklets( detections, track_ids, x, weights_strategy) if", "= ['hybrid', 'iou-global-motion'] api = tator.get_api(args.host, args.token) detection_type = api.get_localization_type(args.detection_type_id)", "strategy = {**default_strategy} with open(args.strategy_config, \"r\") as strategy_file: strategy.update(yaml.load(strategy_file)) else:", "min(track[0]['x'],old_x) min_y = min(track[0]['y'],old_y) if min_x > 0 and min_y", "d in track: track_ids.append(track_id) detections.append(d) return detections,track_ids def trim_tracklets(detections, track_ids,", "= min(max(abs(old_x-track[0]['y'])+avg_h,0),1) else: track[0]['width'] = 0 track[0]['height'] = 0 return", "{'method' : None}, \"max-length\": {}, \"min-length\": 0} if args.strategy_config: strategy", "x, weights_strategy) if x in strategy['max-length']: trim_to = strategy['max-length'][x] print(f\"Trimming", "'hybrid': weights_strategy = HybridWeights(comparator, None, None, media_shape, fps, 0.0, batch_size)", "= strategy['max-length'][x] print(f\"Trimming track to max length of {trim_to}\") detections,", "processing the video for frame,frame_detections in localizations_by_frame.items(): for det in", "track.sort(key=lambda x:x['frame']) def restore_det(det): det['x'] = det.get('orig_x',det['x']) det['y'] = det.get('orig_y',det['y'])", "== 'hybrid': # Not all visual methods need detection images", "result # from the graph solver # [ detection, detection,", "== 0: print(f\"No localizations present in media {media_file}\", flush=True) continue", "frame = local['frame'] if frame in localizations_by_frame: localizations_by_frame[frame].append(local) else: localizations_by_frame[frame]", "parser.add_argument('media_files', type=str, nargs='*') args = parser.parse_args() # Weight methods methods", "new_tracklets = {} for track_id,detections in tracklets.items(): new_track_count=math.ceil(len(detections)/max_length) for i", "pprint from collections import defaultdict import yaml import math import", "pip_package: p = subprocess.run([sys.executable, \"-m\", \"pip\", \"install\", pip_package]) print(\"Finished process.\",", "[ 133, 33, 13, 133,] # [ 0,0,1,1] # TODO:", "names = function_name.split('.') module = __import__(names[0]) for name in names[1:-1]:", "in tracklets.items(): new_track_count=math.ceil(len(detections)/max_length) for i in range(new_track_count): start=max_length*i end=max_length+(max_length*i) new_tracklets[next_track_id]", "parser = argparse.ArgumentParser(description=__doc__) tator.get_parser(parser) parser.add_argument(\"--detection-type-id\", type=int, required=True) parser.add_argument(\"--tracklet-type-id\", type=int, required=True)", "visual methods need detection images vid=cv2.VideoCapture(media_file) ok=True frame = 0", "weights_strategy) if x in strategy['max-length']: trim_to = strategy['max-length'][x] print(f\"Trimming track", "x in strategy['max-length']: trim_to = strategy['max-length'][x] print(f\"Trimming track to max", "{} for track_id,detections in tracklets.items(): new_track_count=math.ceil(len(detections)/max_length) for i in range(new_track_count):", "[args.input_version_id] for media_file in args.media_files: comps=os.path.splitext(os.path.basename(media_file))[0] media_id=comps.split('_')[0] media = api.get_media(media_id)", "new_tracklets[next_track_id] = detections[start:end] next_track_id += 1 detections, track_ids = split_tracklets(new_tracklets)", "name {media.name} due to \" f\"'Tracklet Generator Processed' attribute being", "we make new track objects based on the result #", "sum_h=0.0 sum_w=0.0 track.sort(key=lambda x:x['frame']) def restore_det(det): det['x'] = det.get('orig_x',det['x']) det['y']", "= min(1,max(0,track[-1]['x']+(vel_x*ext_length))) new_y = min(1,max(0,track[-1]['y']+(vel_y*ext_length))) old_x = min(1,max(0,track[0]['x']-(vel_x*ext_length))) old_y =", "method is analytical on the detections coordinates # and does", "in track: track_ids.append(track_id) detections.append(d) return detections,track_ids def trim_tracklets(detections, track_ids, max_length):", "that require the video visual_methods = ['hybrid', 'iou-global-motion'] api =", "assert(len(detections) == len(track_ids)) for d,tid in zip(detections, track_ids): tracklets[tid].append(d) return", "def extend_tracklets(tracklets, length): for track_id,track in tracklets.items(): if len(track) <=", "the video visual_methods = ['hybrid', 'iou-global-motion'] api = tator.get_api(args.host, args.token)", "if frame in localizations_by_frame: localizations_by_frame[frame].append(local) else: localizations_by_frame[frame] = [local] detections=[]", "det['x'] = det.get('orig_x',det['x']) det['y'] = det.get('orig_y',det['y']) det['width'] = det.get('orig_w',det['width']) det['height']", "= None classify_args = {} if class_method: pip_package=class_method.get('pip',None) if pip_package:", "x:x['frame']) if classify_function: valid,attrs = classify_function(media.to_dict(), track, **classify_args) elif len(track)", "if strategy['method'] == 'iou-global-motion': if not os.path.exists(media_file): temp_path = f'/tmp/{os.path.basename(media_file)}'", "min_y > 0: track[0]['x'] = min(max(0,min_x),1) track[0]['y'] = min(max(0,min_y),1) track[0]['width']", "module = __import__(names[0]) for name in names[1:-1]: module = getattr(module,name)", "media ID {media.id}, name {media.name} due to \" f\"'Tracklet Generator", "detections coordinates # and does not require processing the video", "= frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:] return img_crop def join_up_iteration(detections, track_ids): tracklets = defaultdict(list)", "img_width) box_height = round(localization['height'] * img_height) img_crop = frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:] return", "methods = ['hybrid', 'iou', 'iou-motion', 'iou-global-motion'] # Weight methods that", "localizations] if len(localizations) == 0: print(f\"No localizations present in media", "else: track[-1]['width'] = 0 track[-1]['height'] = 0 min_x = min(track[0]['x'],old_x)", "objects = {len(new_objs)}\") with open(f\"/work/{media_id}.json\", \"w\") as f: json.dump(new_objs,f) if", "tator.util.chunked_create(api.create_state_list,project, state_spec=new_objs): pass try: api.update_media(int(media_id), {\"attributes\":{\"Tracklet Generator Processed\": str(datetime.datetime.now())}}) except:", "detections,track_ids def trim_tracklets(detections, track_ids, max_length): tracklets = join_up_iteration(detections, track_ids) next_track_id", "Unfrag the file subprocess.run([\"ffmpeg\", '-i', temp_path, '-c:v', 'copy', media_file]) os.remove(temp_path)", "local in enumerate(localizations): frame = local['frame'] if frame in localizations_by_frame:", "det['orig_w'] = det['width'] det['orig_h'] = det['height'] restore_det(track[0]) restore_det(track[-1]) for d", "os.path.exists(media_file): temp_path = f'/tmp/{os.path.basename(media_file)}' for progress in tator.util.download_media(api, media, temp_path):", "import json import sys import datetime import tator from pprint", "* img_height) box_width = round(localization['width'] * img_width) box_height = round(localization['height']", "Weight methods methods = ['hybrid', 'iou', 'iou-motion', 'iou-global-motion'] # Weight", "class_method: pip_package=class_method.get('pip',None) if pip_package: p = subprocess.run([sys.executable, \"-m\", \"pip\", \"install\",", "= class_method.get('args',None) names = function_name.split('.') module = __import__(names[0]) for name", "track[-1]['width'] = 0 track[-1]['height'] = 0 min_x = min(track[0]['x'],old_x) min_y", "join_up_final(detections, track_ids): tracklets = defaultdict(list) num_tracklets = np.max(track_ids) + 1", "min_x track[-1]['y'] = min_y track[-1]['width'] = min(max(0,abs(new_x-track[-1]['x'])+avg_w),1) track[-1]['height'] = min(max(0,abs(new_x-track[-1]['y'])+avg_h),1)", "x is not None] print(f\"New objects = {len(new_objs)}\") with open(f\"/work/{media_id}.json\",", "all detections\", flush=True) track_ids = renumber_track_ids(track_ids) if strategy['method'] == 'hybrid':", "strategy['method'] == 'iou-motion': weights_strategy = IoUMotionWeights(media_shape, **strategy['args']) elif strategy['method'] ==", "\"min-length\": 0} if args.strategy_config: strategy = {**default_strategy} with open(args.strategy_config, \"r\")", "0: print(f\"No localizations present in media {media_file}\", flush=True) continue print(f\"Processing", "for l in localizations_by_frame[frame]: l['bgr'] = crop_localization(frame_bgr, l) if l['attributes']['Confidence']", "parser.add_argument(\"--tracklet-type-id\", type=int, required=True) parser.add_argument(\"--version-id\", type=int) parser.add_argument(\"--input-version-id\", type=int) parser.add_argument(\"--strategy-config\", type=str) parser.add_argument(\"--dry-run\",", "in localizations_by_frame.items(): for det in frame_detections: detections.append(det) track_ids.append(track_id) track_id +=", "tracklets = extend_tracklets(tracklets, ext_frames) detections, track_ids = split_tracklets(tracklets) # Now", "# If media does not exist, download it. if strategy['method']", "in range(new_track_count): start=max_length*i end=max_length+(max_length*i) new_tracklets[next_track_id] = detections[start:end] next_track_id += 1", "detection images vid=cv2.VideoCapture(media_file) ok=True frame = 0 while ok: ok,frame_bgr", "args.tracklet_type_id, \"media_ids\": [int(media_id)], \"localization_ids\": [x['id'] for x in track], **attrs,", "join_up_final(detections, track_ids) new_objs=[make_object(tracklet) for tracklet in tracklets.values()] new_objs=[x for x", "for tracklet in tracklets.values()] new_objs=[x for x in new_objs if", "print(f\"Skipping media ID {media.id}, name {media.name} due to \" f\"'Tracklet", "ok=True frame = 0 while ok: ok,frame_bgr = vid.read() if", "= 0 min_x = min(track[0]['x'],old_x) min_y = min(track[0]['y'],old_y) if min_x", "len(track_ids)) for d,tid in zip(detections, track_ids): tracklets[tid].append(d) return tracklets def", "sum_w += d['width'] angle,vel,comps = track_vel(track) vel_x = comps[0] vel_y", "0.50: continue detections.append(l) track_ids.append(track_id) track_id += 1 frame+=1 else: #", "elif len(track) >= strategy['min-length']: valid = True attrs = {}", "argparse.ArgumentParser(description=__doc__) tator.get_parser(parser) parser.add_argument(\"--detection-type-id\", type=int, required=True) parser.add_argument(\"--tracklet-type-id\", type=int, required=True) parser.add_argument(\"--version-id\", type=int)", "i in range(new_track_count): start=max_length*i end=max_length+(max_length*i) new_tracklets[next_track_id] = detections[start:end] next_track_id +=", "nargs='*') args = parser.parse_args() # Weight methods methods = ['hybrid',", "subprocess import sys def crop_localization(frame_bgr, localization): img_width = frame_bgr.shape[1] img_height", "new_x = min(1,max(0,track[-1]['x']+(vel_x*ext_length))) new_y = min(1,max(0,track[-1]['y']+(vel_y*ext_length))) old_x = min(1,max(0,track[0]['x']-(vel_x*ext_length))) old_y", "det in frame_detections: detections.append(det) track_ids.append(track_id) track_id += 1 print(\"Loaded all", "str(datetime.datetime.now())}}) except: print(\"WARNING: Unable to set 'Tracklet Generator Processed' attribute\")", "argparse import openem import os import cv2 import numpy as", "strategy['frame-diffs']: print(f\"Started {x}\", flush=True) detections, track_ids, pairs, weights, is_cut, constraints", "\"r\") as strategy_file: strategy.update(yaml.load(strategy_file)) else: strategy = default_strategy if strategy['method']", "box_x = round(localization['x'] * img_width) box_y = round(localization['y'] * img_height)", "version_id = args.version_id default_strategy = {\"method\": \"hybrid\", \"frame-diffs\": [1,2,4,8,16,32,64,128,256], \"args\":", "import yaml import math import subprocess import sys def crop_localization(frame_bgr,", "classify_function = None classify_args = {} if class_method: pip_package=class_method.get('pip',None) if", "strategy_file: strategy.update(yaml.load(strategy_file)) else: strategy = default_strategy if strategy['method'] == 'hybrid':", "print(\"Download finished!\") # Unfrag the file subprocess.run([\"ffmpeg\", '-i', temp_path, '-c:v',", "'iou-global-motion': weights_strategy = IoUGlobalMotionWeights(media_shape, media_file, **strategy['args']) # Generate localization bgr", "[int(media_id)], \"localization_ids\": [x['id'] for x in track], **attrs, \"version\": version_id}", "# Generate localization bgr based on grouped localizations for x", "fps = media.fps localizations_by_frame = {} localizations = api.get_localization_list(project, type=args.detection_type_id,", "= detection_type.project version_id = args.version_id default_strategy = {\"method\": \"hybrid\", \"frame-diffs\":", "1 and strategy['extension']['method'] == 'linear-motion': ext_frames=x print(f\"Extending by linear motion,", "args.input_version_id: optional_fetch_args['version'] = [args.input_version_id] for media_file in args.media_files: comps=os.path.splitext(os.path.basename(media_file))[0] media_id=comps.split('_')[0]", "for i in range(new_track_count): start=max_length*i end=max_length+(max_length*i) new_tracklets[next_track_id] = detections[start:end] next_track_id", "13, 133,] # [ 0,0,1,1] # TODO: Handle is_cut? def", "return obj else: return None tracklets = join_up_final(detections, track_ids) new_objs=[make_object(tracklet)", "num_tracklets = np.max(track_ids) + 1 assert(len(detections) == len(track_ids)) for d,tid", "localizations_by_frame: for l in localizations_by_frame[frame]: l['bgr'] = crop_localization(frame_bgr, l) if", "new_track_count=math.ceil(len(detections)/max_length) for i in range(new_track_count): start=max_length*i end=max_length+(max_length*i) new_tracklets[next_track_id] = detections[start:end]", "len(track) <= 16: continue ext_length = min(length,len(track)) sum_h=0.0 sum_w=0.0 track.sort(key=lambda", "weights_strategy = IoUWeights(media_shape, **strategy['args']) elif strategy['method'] == 'iou-motion': weights_strategy =", "#!/usr/bin/env python3 import argparse import openem import os import cv2", "print(f\"Started {x}\", flush=True) detections, track_ids, pairs, weights, is_cut, constraints =", "{ext_frames}\") tracklets = join_up_iteration(detections,track_ids) tracklets = extend_tracklets(tracklets, ext_frames) detections, track_ids", "frame+=1 else: # The method is analytical on the detections", "\"extension\": {'method' : None}, \"max-length\": {}, \"min-length\": 0} if args.strategy_config:", "IoUMotionWeights(media_shape, **strategy['args']) elif strategy['method'] == 'iou-global-motion': weights_strategy = IoUGlobalMotionWeights(media_shape, media_file,", "False attrs = {} if valid: obj={\"type\": args.tracklet_type_id, \"media_ids\": [int(media_id)],", "for progress in tator.util.download_media(api, media, temp_path): print(f\"Downloading {media_file}, {progress}%...\") print(\"Download", "0 return tracklets def split_tracklets(tracklets): track_ids=[] detections=[] for track_id,track in", "flush=True) if x > 1 and strategy['extension']['method'] == 'linear-motion': ext_frames=x", "= [args.input_version_id] for media_file in args.media_files: comps=os.path.splitext(os.path.basename(media_file))[0] media_id=comps.split('_')[0] media =", "['hybrid', 'iou-global-motion'] api = tator.get_api(args.host, args.token) detection_type = api.get_localization_type(args.detection_type_id) project", "= sum_w / len(track) new_x = min(1,max(0,track[-1]['x']+(vel_x*ext_length))) new_y = min(1,max(0,track[-1]['y']+(vel_y*ext_length)))", "pprint import pprint from collections import defaultdict import yaml import", "localizations = [l.to_dict() for l in localizations] if len(localizations) ==", "media_file]) os.remove(temp_path) if strategy['method'] == 'hybrid': # Not all visual", "continue detections.append(l) track_ids.append(track_id) track_id += 1 frame+=1 else: # The", "len(track) >= strategy['min-length']: valid = True attrs = {} else:", "import openem import os import cv2 import numpy as np", "> 0 and min_y > 0: track[-1]['x'] = min_x track[-1]['y']", "track_ids.append(track_id) track_id += 1 frame+=1 else: # The method is", "flush=True) detections, track_ids, pairs, weights, is_cut, constraints = join_tracklets( detections,", "action='store_true') parser.add_argument('media_files', type=str, nargs='*') args = parser.parse_args() # Weight methods", "linear motion, {ext_frames}\") tracklets = join_up_iteration(detections,track_ids) tracklets = extend_tracklets(tracklets, ext_frames)", "to \" f\"something other than 'No'!\") continue media_shape = (media.height,", "methods need detection images vid=cv2.VideoCapture(media_file) ok=True frame = 0 while", "\"pip\", \"install\", pip_package]) print(\"Finished process.\", flush=True) function_name = class_method.get('function',None) classify_args", "api.get_localization_list(project, type=args.detection_type_id, media_id=[media_id], **optional_fetch_args) localizations = [l.to_dict() for l in", "and strategy['extension']['method'] == 'linear-motion': ext_frames=x print(f\"Extending by linear motion, {ext_frames}\")", "parser.add_argument(\"--input-version-id\", type=int) parser.add_argument(\"--strategy-config\", type=str) parser.add_argument(\"--dry-run\", action='store_true') parser.add_argument('media_files', type=str, nargs='*') args", "det['height'] = det.get('orig_h',det['height']) det['orig_x'] = det['x'] det['orig_y'] = det['y'] det['orig_w']", "sys def crop_localization(frame_bgr, localization): img_width = frame_bgr.shape[1] img_height = frame_bgr.shape[0]", "= 0 track[-1]['height'] = 0 min_x = min(track[0]['x'],old_x) min_y =", "IoUWeights(media_shape, **strategy['args']) elif strategy['method'] == 'iou-motion': weights_strategy = IoUMotionWeights(media_shape, **strategy['args'])", "/ len(track) avg_w = sum_w / len(track) new_x = min(1,max(0,track[-1]['x']+(vel_x*ext_length)))", "min(max(abs(old_x-track[0]['x'])+avg_w,0),1) track[0]['height'] = min(max(abs(old_x-track[0]['y'])+avg_h,0),1) else: track[0]['width'] = 0 track[0]['height'] =", "0 and min_y > 0: track[0]['x'] = min(max(0,min_x),1) track[0]['y'] =", "== 'hybrid': model_file = strategy['args']['model_file'] batch_size = strategy['args'].get('batch_size', 4) comparator=FeaturesComparator(model_file)", "0,0,1,1] # TODO: Handle is_cut? def join_up_final(detections, track_ids): tracklets =", "with open(args.strategy_config, \"r\") as strategy_file: strategy.update(yaml.load(strategy_file)) else: strategy = default_strategy", "= min(1,max(0,track[0]['x']-(vel_x*ext_length))) old_y = min(1,max(0,track[0]['y']-(vel_y*ext_length))) min_x = min(track[-1]['x'],new_x) min_y =", "detections=[] for track_id,track in tracklets.items(): for d in track: track_ids.append(track_id)", "track[0]['width'] = 0 track[0]['height'] = 0 return tracklets def split_tracklets(tracklets):", "not args.dry_run: for response in tator.util.chunked_create(api.create_state_list,project, state_spec=new_objs): pass try: api.update_media(int(media_id),", "for lid, local in enumerate(localizations): frame = local['frame'] if frame", "in track], **attrs, \"version\": version_id} return obj else: return None", "in zip(detections, track_ids): tracklets[tid].append(d) return tracklets def extend_tracklets(tracklets, length): for", "print(f\"No localizations present in media {media_file}\", flush=True) continue print(f\"Processing {len(localizations)}", "in tracklets.values()] new_objs=[x for x in new_objs if x is", "= join_up_final(detections, track_ids) new_objs=[make_object(tracklet) for tracklet in tracklets.values()] new_objs=[x for", "defaultdict import yaml import math import subprocess import sys def", "media, temp_path): print(f\"Downloading {media_file}, {progress}%...\") print(\"Download finished!\") # Unfrag the", "track_ids): tracklets = defaultdict(list) num_tracklets = np.max(track_ids) + 1 assert(len(detections)", "media_shape = (media.height, media.width) fps = media.fps localizations_by_frame = {}", "track, **classify_args) elif len(track) >= strategy['min-length']: valid = True attrs", "detections.append(l) track_ids.append(track_id) track_id += 1 frame+=1 else: # The method", "p = subprocess.run([sys.executable, \"-m\", \"pip\", \"install\", pip_package]) print(\"Finished process.\", flush=True)", "return detections, track_ids if __name__==\"__main__\": parser = argparse.ArgumentParser(description=__doc__) tator.get_parser(parser) parser.add_argument(\"--detection-type-id\",", "track_ids.append(track_id) track_id += 1 print(\"Loaded all detections\", flush=True) track_ids =", "min_x = min(track[0]['x'],old_x) min_y = min(track[0]['y'],old_y) if min_x > 0", "sum_w=0.0 track.sort(key=lambda x:x['frame']) def restore_det(det): det['x'] = det.get('orig_x',det['x']) det['y'] =", "next_track_id = 1 new_tracklets = {} for track_id,detections in tracklets.items():", "print(f\"Processing {len(localizations)} detections\", flush=True) # Group by localizations by frame", "strategy['method'] == 'hybrid': weights_strategy = HybridWeights(comparator, None, None, media_shape, fps,", "= round(localization['x'] * img_width) box_y = round(localization['y'] * img_height) box_width", "open(f\"/work/{media_id}.json\", \"w\") as f: json.dump(new_objs,f) if not args.dry_run: for response", "det['width'] = det.get('orig_w',det['width']) det['height'] = det.get('orig_h',det['height']) det['orig_x'] = det['x'] det['orig_y']", "(media.height, media.width) fps = media.fps localizations_by_frame = {} localizations =", "localizations_by_frame = {} localizations = api.get_localization_list(project, type=args.detection_type_id, media_id=[media_id], **optional_fetch_args) localizations", "not exist, download it. if strategy['method'] == 'iou-global-motion': if not", "to max length of {trim_to}\") detections, track_ids = trim_tracklets(detections, track_ids,", "valid: obj={\"type\": args.tracklet_type_id, \"media_ids\": [int(media_id)], \"localization_ids\": [x['id'] for x in", "0 and min_y > 0: track[-1]['x'] = min_x track[-1]['y'] =", "**attrs, \"version\": version_id} return obj else: return None tracklets =", "= 0 return tracklets def split_tracklets(tracklets): track_ids=[] detections=[] for track_id,track", "min_x > 0 and min_y > 0: track[-1]['x'] = min_x", "old_x = min(1,max(0,track[0]['x']-(vel_x*ext_length))) old_y = min(1,max(0,track[0]['y']-(vel_y*ext_length))) min_x = min(track[-1]['x'],new_x) min_y", "print(f\"frame-diff {x}: {len(detections)} to {len(det_counts_per_track)}\", flush=True) if x > 1", "<= 16: continue ext_length = min(length,len(track)) sum_h=0.0 sum_w=0.0 track.sort(key=lambda x:x['frame'])", "cv2 import numpy as np from openem.tracking import * import", "= min(max(0,abs(new_x-track[-1]['x'])+avg_w),1) track[-1]['height'] = min(max(0,abs(new_x-track[-1]['y'])+avg_h),1) else: track[-1]['width'] = 0 track[-1]['height']", "[l.to_dict() for l in localizations] if len(localizations) == 0: print(f\"No", "if classify_function: valid,attrs = classify_function(media.to_dict(), track, **classify_args) elif len(track) >=", "min(max(0,abs(new_x-track[-1]['y'])+avg_h),1) else: track[-1]['width'] = 0 track[-1]['height'] = 0 min_x =", "track_id += 1 print(\"Loaded all detections\", flush=True) track_ids = renumber_track_ids(track_ids)", "attrs = {} if valid: obj={\"type\": args.tracklet_type_id, \"media_ids\": [int(media_id)], \"localization_ids\":", "the detections coordinates # and does not require processing the", "for d,tid in zip(detections, track_ids): tracklets[tid].append(d) return tracklets def extend_tracklets(tracklets,", "= det['width'] det['orig_h'] = det['height'] restore_det(track[0]) restore_det(track[-1]) for d in", "det['width'] det['orig_h'] = det['height'] restore_det(track[0]) restore_det(track[-1]) for d in track:", "img_height) box_width = round(localization['width'] * img_width) box_height = round(localization['height'] *", "print(\"Finished process.\", flush=True) function_name = class_method.get('function',None) classify_args = class_method.get('args',None) names", "track_ids, pairs, weights, is_cut, constraints = join_tracklets( detections, track_ids, x,", "avg_h = sum_h / len(track) avg_w = sum_w / len(track)", "flush=True) optional_fetch_args = {} if args.input_version_id: optional_fetch_args['version'] = [args.input_version_id] for", "make new track objects based on the result # from", "{\"method\": \"hybrid\", \"frame-diffs\": [1,2,4,8,16,32,64,128,256], \"args\": {}, \"extension\": {'method' : None},", "in localizations_by_frame: for l in localizations_by_frame[frame]: l['bgr'] = crop_localization(frame_bgr, l)", "track_ids) new_objs=[make_object(tracklet) for tracklet in tracklets.values()] new_objs=[x for x in", "import sys def crop_localization(frame_bgr, localization): img_width = frame_bgr.shape[1] img_height =", "in tracklets.items(): for d in track: track_ids.append(track_id) detections.append(d) return detections,track_ids", "and min_y > 0: track[0]['x'] = min(max(0,min_x),1) track[0]['y'] = min(max(0,min_y),1)", "for det in frame_detections: detections.append(det) track_ids.append(track_id) track_id += 1 print(\"Loaded", "if len(track) <= 16: continue ext_length = min(length,len(track)) sum_h=0.0 sum_w=0.0", "min(max(0,min_x),1) track[0]['y'] = min(max(0,min_y),1) track[0]['width'] = min(max(abs(old_x-track[0]['x'])+avg_w,0),1) track[0]['height'] = min(max(abs(old_x-track[0]['y'])+avg_h,0),1)", "batch_size = strategy['args'].get('batch_size', 4) comparator=FeaturesComparator(model_file) #extractor=FeaturesExtractor(args.model_file) class_method = strategy.get('class-method',None) classify_function", "det['orig_h'] = det['height'] restore_det(track[0]) restore_det(track[-1]) for d in track: sum_h", "if x is not None] print(f\"New objects = {len(new_objs)}\") with", "0 track[0]['height'] = 0 return tracklets def split_tracklets(tracklets): track_ids=[] detections=[]", "the video for frame,frame_detections in localizations_by_frame.items(): for det in frame_detections:", "= IoUMotionWeights(media_shape, **strategy['args']) elif strategy['method'] == 'iou-global-motion': weights_strategy = IoUGlobalMotionWeights(media_shape,", "# [ 133, 33, 13, 133,] # [ 0,0,1,1] #", "detections.append(d) return detections,track_ids def trim_tracklets(detections, track_ids, max_length): tracklets = join_up_iteration(detections,", "else: # The method is analytical on the detections coordinates", "tracklets = join_up_iteration(detections, track_ids) next_track_id = 1 new_tracklets = {}", "renumber_track_ids(track_ids) return detections, track_ids if __name__==\"__main__\": parser = argparse.ArgumentParser(description=__doc__) tator.get_parser(parser)", "angle,vel,comps = track_vel(track) vel_x = comps[0] vel_y = comps[1] avg_h", "based on the result # from the graph solver #", "round(localization['y'] * img_height) box_width = round(localization['width'] * img_width) box_height =", "tracklets.values()] new_objs=[x for x in new_objs if x is not", "# The method is analytical on the detections coordinates #", "progress in tator.util.download_media(api, media, temp_path): print(f\"Downloading {media_file}, {progress}%...\") print(\"Download finished!\")", "min(length,len(track)) sum_h=0.0 sum_w=0.0 track.sort(key=lambda x:x['frame']) def restore_det(det): det['x'] = det.get('orig_x',det['x'])", "type=int) parser.add_argument(\"--strategy-config\", type=str) parser.add_argument(\"--dry-run\", action='store_true') parser.add_argument('media_files', type=str, nargs='*') args =", "if strategy['method'] == 'hybrid': weights_strategy = HybridWeights(comparator, None, None, media_shape,", "due to \" f\"'Tracklet Generator Processed' attribute being set to", "for track_id,track in tracklets.items(): for d in track: track_ids.append(track_id) detections.append(d)", "'iou-global-motion': if not os.path.exists(media_file): temp_path = f'/tmp/{os.path.basename(media_file)}' for progress in", "# Unfrag the file subprocess.run([\"ffmpeg\", '-i', temp_path, '-c:v', 'copy', media_file])", "det['orig_y'] = det['y'] det['orig_w'] = det['width'] det['orig_h'] = det['height'] restore_det(track[0])", "**strategy['args']) elif strategy['method'] == 'iou-motion': weights_strategy = IoUMotionWeights(media_shape, **strategy['args']) elif", "track_ids.append(track_id) detections.append(d) return detections,track_ids def trim_tracklets(detections, track_ids, max_length): tracklets =", "from openem.tracking import * import json import sys import datetime", "box_height = round(localization['height'] * img_height) img_crop = frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:] return img_crop", "import subprocess import sys def crop_localization(frame_bgr, localization): img_width = frame_bgr.shape[1]", "> 1 and strategy['extension']['method'] == 'linear-motion': ext_frames=x print(f\"Extending by linear", "continue ext_length = min(length,len(track)) sum_h=0.0 sum_w=0.0 track.sort(key=lambda x:x['frame']) def restore_det(det):", "flush=True) # Group by localizations by frame for lid, local", "[ detection, detection, detection, ...] # [ track#, track#, track#,...]", "Generator Processed' attribute being set to \" f\"something other than", "\"max-length\": {}, \"min-length\": 0} if args.strategy_config: strategy = {**default_strategy} with", "media_file, **strategy['args']) # Generate localization bgr based on grouped localizations", "next_track_id += 1 detections, track_ids = split_tracklets(new_tracklets) track_ids = renumber_track_ids(track_ids)", "module = getattr(module,name) classify_function = getattr(module,names[-1]) print(\"Strategy: \", flush=True) pprint(strategy)", "1 print(\"Loaded all detections\", flush=True) track_ids = renumber_track_ids(track_ids) if strategy['method']", "{x}\", flush=True) detections, track_ids, pairs, weights, is_cut, constraints = join_tracklets(", "is_cut, constraints = join_tracklets( detections, track_ids, x, weights_strategy) if x", "if not args.dry_run: for response in tator.util.chunked_create(api.create_state_list,project, state_spec=new_objs): pass try:", "{len(localizations)} detections\", flush=True) # Group by localizations by frame for", "as np from openem.tracking import * import json import sys", "extend_tracklets(tracklets, length): for track_id,track in tracklets.items(): if len(track) <= 16:", "{} else: valid = False attrs = {} if valid:", "type=int) parser.add_argument(\"--input-version-id\", type=int) parser.add_argument(\"--strategy-config\", type=str) parser.add_argument(\"--dry-run\", action='store_true') parser.add_argument('media_files', type=str, nargs='*')", "elif strategy['method'] == 'iou-motion': weights_strategy = IoUMotionWeights(media_shape, **strategy['args']) elif strategy['method']", "def make_object(track): track.sort(key=lambda x:x['frame']) if classify_function: valid,attrs = classify_function(media.to_dict(), track,", "strategy['method'] == 'hybrid': # Not all visual methods need detection", "localizations_by_frame.items(): for det in frame_detections: detections.append(det) track_ids.append(track_id) track_id += 1", "track_ids, x, weights_strategy) if x in strategy['max-length']: trim_to = strategy['max-length'][x]", "track_id=1 # If media does not exist, download it. if", "class_method.get('function',None) classify_args = class_method.get('args',None) names = function_name.split('.') module = __import__(names[0])", "detections, track_ids, pairs, weights, is_cut, constraints = join_tracklets( detections, track_ids,", "tracklets[tid].append(d) return tracklets def extend_tracklets(tracklets, length): for track_id,track in tracklets.items():", "= min(track[0]['x'],old_x) min_y = min(track[0]['y'],old_y) if min_x > 0 and", "= {} if args.input_version_id: optional_fetch_args['version'] = [args.input_version_id] for media_file in", "model_file = strategy['args']['model_file'] batch_size = strategy['args'].get('batch_size', 4) comparator=FeaturesComparator(model_file) #extractor=FeaturesExtractor(args.model_file) class_method", "frame = 0 while ok: ok,frame_bgr = vid.read() if frame", "if __name__==\"__main__\": parser = argparse.ArgumentParser(description=__doc__) tator.get_parser(parser) parser.add_argument(\"--detection-type-id\", type=int, required=True) parser.add_argument(\"--tracklet-type-id\",", "vid=cv2.VideoCapture(media_file) ok=True frame = 0 while ok: ok,frame_bgr = vid.read()", "frame for lid, local in enumerate(localizations): frame = local['frame'] if", "track_ids=[] track_id=1 # If media does not exist, download it.", "print(f\"New objects = {len(new_objs)}\") with open(f\"/work/{media_id}.json\", \"w\") as f: json.dump(new_objs,f)", "track[-1]['x'] = min_x track[-1]['y'] = min_y track[-1]['width'] = min(max(0,abs(new_x-track[-1]['x'])+avg_w),1) track[-1]['height']", "required=True) parser.add_argument(\"--tracklet-type-id\", type=int, required=True) parser.add_argument(\"--version-id\", type=int) parser.add_argument(\"--input-version-id\", type=int) parser.add_argument(\"--strategy-config\", type=str)", "= det.get('orig_h',det['height']) det['orig_x'] = det['x'] det['orig_y'] = det['y'] det['orig_w'] =", "track to max length of {trim_to}\") detections, track_ids = trim_tracklets(detections,", "print(\"Loaded all detections\", flush=True) track_ids = renumber_track_ids(track_ids) if strategy['method'] ==", "trim_to = strategy['max-length'][x] print(f\"Trimming track to max length of {trim_to}\")", "track_id += 1 frame+=1 else: # The method is analytical", "= frame_bgr.shape[0] box_x = round(localization['x'] * img_width) box_y = round(localization['y']", "= args.version_id default_strategy = {\"method\": \"hybrid\", \"frame-diffs\": [1,2,4,8,16,32,64,128,256], \"args\": {},", "Processed' attribute being set to \" f\"something other than 'No'!\")", "tracklets.items(): if len(track) <= 16: continue ext_length = min(length,len(track)) sum_h=0.0", "'-c:v', 'copy', media_file]) os.remove(temp_path) if strategy['method'] == 'hybrid': # Not", "motion, {ext_frames}\") tracklets = join_up_iteration(detections,track_ids) tracklets = extend_tracklets(tracklets, ext_frames) detections,", "import argparse import openem import os import cv2 import numpy", "np.max(track_ids) + 1 assert(len(detections) == len(track_ids)) for d,tid in zip(detections,", "def trim_tracklets(detections, track_ids, max_length): tracklets = join_up_iteration(detections, track_ids) next_track_id =", "= api.get_media(media_id) if media.attributes.get(\"Tracklet Generator Processed\") != \"No\": print(f\"Skipping media", "= {\"method\": \"hybrid\", \"frame-diffs\": [1,2,4,8,16,32,64,128,256], \"args\": {}, \"extension\": {'method' :", "os import cv2 import numpy as np from openem.tracking import", "133,] # [ 0,0,1,1] # TODO: Handle is_cut? def join_up_final(detections,", "json.dump(new_objs,f) if not args.dry_run: for response in tator.util.chunked_create(api.create_state_list,project, state_spec=new_objs): pass", "project = detection_type.project version_id = args.version_id default_strategy = {\"method\": \"hybrid\",", "sum_h += d['height'] sum_w += d['width'] angle,vel,comps = track_vel(track) vel_x", "l['bgr'] = crop_localization(frame_bgr, l) if l['attributes']['Confidence'] < 0.50: continue detections.append(l)", "= round(localization['height'] * img_height) img_crop = frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:] return img_crop def", "min_x > 0 and min_y > 0: track[0]['x'] = min(max(0,min_x),1)", "= det['height'] restore_det(track[0]) restore_det(track[-1]) for d in track: sum_h +=", "= min(max(0,min_x),1) track[0]['y'] = min(max(0,min_y),1) track[0]['width'] = min(max(abs(old_x-track[0]['x'])+avg_w,0),1) track[0]['height'] =", "= f'/tmp/{os.path.basename(media_file)}' for progress in tator.util.download_media(api, media, temp_path): print(f\"Downloading {media_file},", "optional_fetch_args = {} if args.input_version_id: optional_fetch_args['version'] = [args.input_version_id] for media_file", "[x['id'] for x in track], **attrs, \"version\": version_id} return obj", "min(max(0,abs(new_x-track[-1]['x'])+avg_w),1) track[-1]['height'] = min(max(0,abs(new_x-track[-1]['y'])+avg_h),1) else: track[-1]['width'] = 0 track[-1]['height'] =", "= media.fps localizations_by_frame = {} localizations = api.get_localization_list(project, type=args.detection_type_id, media_id=[media_id],", "local['frame'] if frame in localizations_by_frame: localizations_by_frame[frame].append(local) else: localizations_by_frame[frame] = [local]", "\"args\": {}, \"extension\": {'method' : None}, \"max-length\": {}, \"min-length\": 0}", "16: continue ext_length = min(length,len(track)) sum_h=0.0 sum_w=0.0 track.sort(key=lambda x:x['frame']) def", "trim_to) _,det_counts_per_track=np.unique(track_ids,return_counts=True) print(f\"frame-diff {x}: {len(detections)} to {len(det_counts_per_track)}\", flush=True) if x", "'iou': weights_strategy = IoUWeights(media_shape, **strategy['args']) elif strategy['method'] == 'iou-motion': weights_strategy", "api.update_media(int(media_id), {\"attributes\":{\"Tracklet Generator Processed\": str(datetime.datetime.now())}}) except: print(\"WARNING: Unable to set", "det.get('orig_h',det['height']) det['orig_x'] = det['x'] det['orig_y'] = det['y'] det['orig_w'] = det['width']", "json import sys import datetime import tator from pprint import", "= subprocess.run([sys.executable, \"-m\", \"pip\", \"install\", pip_package]) print(\"Finished process.\", flush=True) function_name", "!= \"No\": print(f\"Skipping media ID {media.id}, name {media.name} due to", "open(args.strategy_config, \"r\") as strategy_file: strategy.update(yaml.load(strategy_file)) else: strategy = default_strategy if", "[1,2,4,8,16,32,64,128,256], \"args\": {}, \"extension\": {'method' : None}, \"max-length\": {}, \"min-length\":", "valid = True attrs = {} else: valid = False", "length): for track_id,track in tracklets.items(): if len(track) <= 16: continue", "+= 1 frame+=1 else: # The method is analytical on", "min(1,max(0,track[-1]['y']+(vel_y*ext_length))) old_x = min(1,max(0,track[0]['x']-(vel_x*ext_length))) old_y = min(1,max(0,track[0]['y']-(vel_y*ext_length))) min_x = min(track[-1]['x'],new_x)", "api.get_media(media_id) if media.attributes.get(\"Tracklet Generator Processed\") != \"No\": print(f\"Skipping media ID", "= track_vel(track) vel_x = comps[0] vel_y = comps[1] avg_h =", "ID {media.id}, name {media.name} due to \" f\"'Tracklet Generator Processed'", "= frame_bgr.shape[1] img_height = frame_bgr.shape[0] box_x = round(localization['x'] * img_width)", "import os import cv2 import numpy as np from openem.tracking", "of {trim_to}\") detections, track_ids = trim_tracklets(detections, track_ids, trim_to) _,det_counts_per_track=np.unique(track_ids,return_counts=True) print(f\"frame-diff", "track_ids): tracklets[tid].append(d) return tracklets def make_object(track): track.sort(key=lambda x:x['frame']) if classify_function:", "< 0.50: continue detections.append(l) track_ids.append(track_id) track_id += 1 frame+=1 else:", "does not require processing the video for frame,frame_detections in localizations_by_frame.items():", "'iou-motion', 'iou-global-motion'] # Weight methods that require the video visual_methods", "= np.max(track_ids) + 1 assert(len(detections) == len(track_ids)) for d,tid in", "{} if args.input_version_id: optional_fetch_args['version'] = [args.input_version_id] for media_file in args.media_files:", "comps[0] vel_y = comps[1] avg_h = sum_h / len(track) avg_w", "temp_path = f'/tmp/{os.path.basename(media_file)}' for progress in tator.util.download_media(api, media, temp_path): print(f\"Downloading", "{}, \"extension\": {'method' : None}, \"max-length\": {}, \"min-length\": 0} if", "**optional_fetch_args) localizations = [l.to_dict() for l in localizations] if len(localizations)", "x in track], **attrs, \"version\": version_id} return obj else: return", "detections.append(det) track_ids.append(track_id) track_id += 1 print(\"Loaded all detections\", flush=True) track_ids", "+= d['width'] angle,vel,comps = track_vel(track) vel_x = comps[0] vel_y =", "media_id=[media_id], **optional_fetch_args) localizations = [l.to_dict() for l in localizations] if", "= min(max(0,abs(new_x-track[-1]['y'])+avg_h),1) else: track[-1]['width'] = 0 track[-1]['height'] = 0 min_x", "= min(track[0]['y'],old_y) if min_x > 0 and min_y > 0:", "return tracklets def make_object(track): track.sort(key=lambda x:x['frame']) if classify_function: valid,attrs =", "strategy['max-length'][x] print(f\"Trimming track to max length of {trim_to}\") detections, track_ids", "{**default_strategy} with open(args.strategy_config, \"r\") as strategy_file: strategy.update(yaml.load(strategy_file)) else: strategy =", "detections[start:end] next_track_id += 1 detections, track_ids = split_tracklets(new_tracklets) track_ids =", "if valid: obj={\"type\": args.tracklet_type_id, \"media_ids\": [int(media_id)], \"localization_ids\": [x['id'] for x", "in track: sum_h += d['height'] sum_w += d['width'] angle,vel,comps =", "\"hybrid\", \"frame-diffs\": [1,2,4,8,16,32,64,128,256], \"args\": {}, \"extension\": {'method' : None}, \"max-length\":", "return detections,track_ids def trim_tracklets(detections, track_ids, max_length): tracklets = join_up_iteration(detections, track_ids)", "det['y'] det['orig_w'] = det['width'] det['orig_h'] = det['height'] restore_det(track[0]) restore_det(track[-1]) for", "as strategy_file: strategy.update(yaml.load(strategy_file)) else: strategy = default_strategy if strategy['method'] ==", "= strategy['args']['model_file'] batch_size = strategy['args'].get('batch_size', 4) comparator=FeaturesComparator(model_file) #extractor=FeaturesExtractor(args.model_file) class_method =", "= comps[0] vel_y = comps[1] avg_h = sum_h / len(track)", "min_y track[-1]['width'] = min(max(0,abs(new_x-track[-1]['x'])+avg_w),1) track[-1]['height'] = min(max(0,abs(new_x-track[-1]['y'])+avg_h),1) else: track[-1]['width'] =", "= default_strategy if strategy['method'] == 'hybrid': model_file = strategy['args']['model_file'] batch_size", "localizations_by_frame[frame].append(local) else: localizations_by_frame[frame] = [local] detections=[] track_ids=[] track_id=1 # If", "is analytical on the detections coordinates # and does not", "need detection images vid=cv2.VideoCapture(media_file) ok=True frame = 0 while ok:", "strategy.update(yaml.load(strategy_file)) else: strategy = default_strategy if strategy['method'] == 'hybrid': model_file", "batch_size) elif strategy['method'] == 'iou': weights_strategy = IoUWeights(media_shape, **strategy['args']) elif", "min(1,max(0,track[-1]['x']+(vel_x*ext_length))) new_y = min(1,max(0,track[-1]['y']+(vel_y*ext_length))) old_x = min(1,max(0,track[0]['x']-(vel_x*ext_length))) old_y = min(1,max(0,track[0]['y']-(vel_y*ext_length)))", "track[-1]['width'] = min(max(0,abs(new_x-track[-1]['x'])+avg_w),1) track[-1]['height'] = min(max(0,abs(new_x-track[-1]['y'])+avg_h),1) else: track[-1]['width'] = 0", "if len(localizations) == 0: print(f\"No localizations present in media {media_file}\",", "media.attributes.get(\"Tracklet Generator Processed\") != \"No\": print(f\"Skipping media ID {media.id}, name", "enumerate(localizations): frame = local['frame'] if frame in localizations_by_frame: localizations_by_frame[frame].append(local) else:", "args.media_files: comps=os.path.splitext(os.path.basename(media_file))[0] media_id=comps.split('_')[0] media = api.get_media(media_id) if media.attributes.get(\"Tracklet Generator Processed\")", "= api.get_localization_list(project, type=args.detection_type_id, media_id=[media_id], **optional_fetch_args) localizations = [l.to_dict() for l", "# Weight methods that require the video visual_methods = ['hybrid',", "classify_function = getattr(module,names[-1]) print(\"Strategy: \", flush=True) pprint(strategy) print(args.media_files, flush=True) optional_fetch_args", "track_ids = split_tracklets(new_tracklets) track_ids = renumber_track_ids(track_ids) return detections, track_ids if", "the graph solver # [ detection, detection, detection, ...] #", "= classify_function(media.to_dict(), track, **classify_args) elif len(track) >= strategy['min-length']: valid =", "l in localizations_by_frame[frame]: l['bgr'] = crop_localization(frame_bgr, l) if l['attributes']['Confidence'] <", "return tracklets def split_tracklets(tracklets): track_ids=[] detections=[] for track_id,track in tracklets.items():", "{} localizations = api.get_localization_list(project, type=args.detection_type_id, media_id=[media_id], **optional_fetch_args) localizations = [l.to_dict()", "track_ids = renumber_track_ids(track_ids) if strategy['method'] == 'hybrid': weights_strategy = HybridWeights(comparator,", "d['height'] sum_w += d['width'] angle,vel,comps = track_vel(track) vel_x = comps[0]", "= det.get('orig_w',det['width']) det['height'] = det.get('orig_h',det['height']) det['orig_x'] = det['x'] det['orig_y'] =", "localizations = api.get_localization_list(project, type=args.detection_type_id, media_id=[media_id], **optional_fetch_args) localizations = [l.to_dict() for", "det.get('orig_w',det['width']) det['height'] = det.get('orig_h',det['height']) det['orig_x'] = det['x'] det['orig_y'] = det['y']", "strategy['args'].get('batch_size', 4) comparator=FeaturesComparator(model_file) #extractor=FeaturesExtractor(args.model_file) class_method = strategy.get('class-method',None) classify_function = None", "track[-1]['height'] = min(max(0,abs(new_x-track[-1]['y'])+avg_h),1) else: track[-1]['width'] = 0 track[-1]['height'] = 0", "function_name = class_method.get('function',None) classify_args = class_method.get('args',None) names = function_name.split('.') module", "track_ids, trim_to) _,det_counts_per_track=np.unique(track_ids,return_counts=True) print(f\"frame-diff {x}: {len(detections)} to {len(det_counts_per_track)}\", flush=True) if", "new_objs if x is not None] print(f\"New objects = {len(new_objs)}\")", "from collections import defaultdict import yaml import math import subprocess", "def restore_det(det): det['x'] = det.get('orig_x',det['x']) det['y'] = det.get('orig_y',det['y']) det['width'] =", "= function_name.split('.') module = __import__(names[0]) for name in names[1:-1]: module", "other than 'No'!\") continue media_shape = (media.height, media.width) fps =", "#extractor=FeaturesExtractor(args.model_file) class_method = strategy.get('class-method',None) classify_function = None classify_args = {}", "visual_methods = ['hybrid', 'iou-global-motion'] api = tator.get_api(args.host, args.token) detection_type =", "{trim_to}\") detections, track_ids = trim_tracklets(detections, track_ids, trim_to) _,det_counts_per_track=np.unique(track_ids,return_counts=True) print(f\"frame-diff {x}:", "detections\", flush=True) track_ids = renumber_track_ids(track_ids) if strategy['method'] == 'hybrid': weights_strategy", "= renumber_track_ids(track_ids) if strategy['method'] == 'hybrid': weights_strategy = HybridWeights(comparator, None,", "\"media_ids\": [int(media_id)], \"localization_ids\": [x['id'] for x in track], **attrs, \"version\":", "methods methods = ['hybrid', 'iou', 'iou-motion', 'iou-global-motion'] # Weight methods", "localization): img_width = frame_bgr.shape[1] img_height = frame_bgr.shape[0] box_x = round(localization['x']", "if min_x > 0 and min_y > 0: track[0]['x'] =", "parser.add_argument(\"--dry-run\", action='store_true') parser.add_argument('media_files', type=str, nargs='*') args = parser.parse_args() # Weight", "in localizations] if len(localizations) == 0: print(f\"No localizations present in", "{media.name} due to \" f\"'Tracklet Generator Processed' attribute being set", "localizations_by_frame[frame]: l['bgr'] = crop_localization(frame_bgr, l) if l['attributes']['Confidence'] < 0.50: continue", "**strategy['args']) # Generate localization bgr based on grouped localizations for", "33, 13, 133,] # [ 0,0,1,1] # TODO: Handle is_cut?", "tracklets[tid].append(d) return tracklets def make_object(track): track.sort(key=lambda x:x['frame']) if classify_function: valid,attrs", "track[0]['height'] = min(max(abs(old_x-track[0]['y'])+avg_h,0),1) else: track[0]['width'] = 0 track[0]['height'] = 0", "with open(f\"/work/{media_id}.json\", \"w\") as f: json.dump(new_objs,f) if not args.dry_run: for", "img_crop def join_up_iteration(detections, track_ids): tracklets = defaultdict(list) num_tracklets = np.max(track_ids)", "None, None, media_shape, fps, 0.0, batch_size) elif strategy['method'] == 'iou':", "optional_fetch_args['version'] = [args.input_version_id] for media_file in args.media_files: comps=os.path.splitext(os.path.basename(media_file))[0] media_id=comps.split('_')[0] media", "valid,attrs = classify_function(media.to_dict(), track, **classify_args) elif len(track) >= strategy['min-length']: valid", "0 while ok: ok,frame_bgr = vid.read() if frame in localizations_by_frame:", "If media does not exist, download it. if strategy['method'] ==", "* img_width) box_y = round(localization['y'] * img_height) box_width = round(localization['width']", "api.get_localization_type(args.detection_type_id) project = detection_type.project version_id = args.version_id default_strategy = {\"method\":", "strategy.get('class-method',None) classify_function = None classify_args = {} if class_method: pip_package=class_method.get('pip',None)", "ok,frame_bgr = vid.read() if frame in localizations_by_frame: for l in", "= {} if valid: obj={\"type\": args.tracklet_type_id, \"media_ids\": [int(media_id)], \"localization_ids\": [x['id']", "{progress}%...\") print(\"Download finished!\") # Unfrag the file subprocess.run([\"ffmpeg\", '-i', temp_path,", "if l['attributes']['Confidence'] < 0.50: continue detections.append(l) track_ids.append(track_id) track_id += 1", "strategy['method'] == 'iou-global-motion': if not os.path.exists(media_file): temp_path = f'/tmp/{os.path.basename(media_file)}' for", "box_y = round(localization['y'] * img_height) box_width = round(localization['width'] * img_width)", "{len(det_counts_per_track)}\", flush=True) if x > 1 and strategy['extension']['method'] == 'linear-motion':", "api = tator.get_api(args.host, args.token) detection_type = api.get_localization_type(args.detection_type_id) project = detection_type.project", "track[-1]['height'] = 0 min_x = min(track[0]['x'],old_x) min_y = min(track[0]['y'],old_y) if", "Group by localizations by frame for lid, local in enumerate(localizations):", "in zip(detections, track_ids): tracklets[tid].append(d) return tracklets def make_object(track): track.sort(key=lambda x:x['frame'])", "length of {trim_to}\") detections, track_ids = trim_tracklets(detections, track_ids, trim_to) _,det_counts_per_track=np.unique(track_ids,return_counts=True)", "comps[1] avg_h = sum_h / len(track) avg_w = sum_w /", "= round(localization['y'] * img_height) box_width = round(localization['width'] * img_width) box_height", "getattr(module,name) classify_function = getattr(module,names[-1]) print(\"Strategy: \", flush=True) pprint(strategy) print(args.media_files, flush=True)", "attribute being set to \" f\"something other than 'No'!\") continue", "l) if l['attributes']['Confidence'] < 0.50: continue detections.append(l) track_ids.append(track_id) track_id +=", "def crop_localization(frame_bgr, localization): img_width = frame_bgr.shape[1] img_height = frame_bgr.shape[0] box_x", "the file subprocess.run([\"ffmpeg\", '-i', temp_path, '-c:v', 'copy', media_file]) os.remove(temp_path) if", "new_y = min(1,max(0,track[-1]['y']+(vel_y*ext_length))) old_x = min(1,max(0,track[0]['x']-(vel_x*ext_length))) old_y = min(1,max(0,track[0]['y']-(vel_y*ext_length))) min_x", "pass try: api.update_media(int(media_id), {\"attributes\":{\"Tracklet Generator Processed\": str(datetime.datetime.now())}}) except: print(\"WARNING: Unable", "strategy['args']['model_file'] batch_size = strategy['args'].get('batch_size', 4) comparator=FeaturesComparator(model_file) #extractor=FeaturesExtractor(args.model_file) class_method = strategy.get('class-method',None)", "frame,frame_detections in localizations_by_frame.items(): for det in frame_detections: detections.append(det) track_ids.append(track_id) track_id", "classify_args = class_method.get('args',None) names = function_name.split('.') module = __import__(names[0]) for", "'hybrid': # Not all visual methods need detection images vid=cv2.VideoCapture(media_file)", "by localizations by frame for lid, local in enumerate(localizations): frame", "media_shape, fps, 0.0, batch_size) elif strategy['method'] == 'iou': weights_strategy =", "for media_file in args.media_files: comps=os.path.splitext(os.path.basename(media_file))[0] media_id=comps.split('_')[0] media = api.get_media(media_id) if", "> 0: track[-1]['x'] = min_x track[-1]['y'] = min_y track[-1]['width'] =", "on grouped localizations for x in strategy['frame-diffs']: print(f\"Started {x}\", flush=True)", "== 'linear-motion': ext_frames=x print(f\"Extending by linear motion, {ext_frames}\") tracklets =", "attrs = {} else: valid = False attrs = {}", "img_width) box_y = round(localization['y'] * img_height) box_width = round(localization['width'] *", "frame_bgr.shape[1] img_height = frame_bgr.shape[0] box_x = round(localization['x'] * img_width) box_y", "= min(max(0,min_y),1) track[0]['width'] = min(max(abs(old_x-track[0]['x'])+avg_w,0),1) track[0]['height'] = min(max(abs(old_x-track[0]['y'])+avg_h,0),1) else: track[0]['width']", "state_spec=new_objs): pass try: api.update_media(int(media_id), {\"attributes\":{\"Tracklet Generator Processed\": str(datetime.datetime.now())}}) except: print(\"WARNING:", "Processed\": str(datetime.datetime.now())}}) except: print(\"WARNING: Unable to set 'Tracklet Generator Processed'", "The method is analytical on the detections coordinates # and", "== 'iou-motion': weights_strategy = IoUMotionWeights(media_shape, **strategy['args']) elif strategy['method'] == 'iou-global-motion':", "default_strategy if strategy['method'] == 'hybrid': model_file = strategy['args']['model_file'] batch_size =", "min(max(abs(old_x-track[0]['y'])+avg_h,0),1) else: track[0]['width'] = 0 track[0]['height'] = 0 return tracklets", "\", flush=True) pprint(strategy) print(args.media_files, flush=True) optional_fetch_args = {} if args.input_version_id:", "min(max(0,min_y),1) track[0]['width'] = min(max(abs(old_x-track[0]['x'])+avg_w,0),1) track[0]['height'] = min(max(abs(old_x-track[0]['y'])+avg_h,0),1) else: track[0]['width'] =", "0 track[-1]['height'] = 0 min_x = min(track[0]['x'],old_x) min_y = min(track[0]['y'],old_y)", "def join_up_iteration(detections, track_ids): tracklets = defaultdict(list) num_tracklets = np.max(track_ids) +", "= det.get('orig_y',det['y']) det['width'] = det.get('orig_w',det['width']) det['height'] = det.get('orig_h',det['height']) det['orig_x'] =", "= strategy.get('class-method',None) classify_function = None classify_args = {} if class_method:", "objects based on the result # from the graph solver", "# [ track#, track#, track#,...] # [ 133, 33, 13,", "'hybrid': model_file = strategy['args']['model_file'] batch_size = strategy['args'].get('batch_size', 4) comparator=FeaturesComparator(model_file) #extractor=FeaturesExtractor(args.model_file)", "print(f\"Extending by linear motion, {ext_frames}\") tracklets = join_up_iteration(detections,track_ids) tracklets =", "if strategy['method'] == 'hybrid': model_file = strategy['args']['model_file'] batch_size = strategy['args'].get('batch_size',", "min(track[-1]['x'],new_x) min_y = min(track[-1]['y'],new_y) if min_x > 0 and min_y", "extend_tracklets(tracklets, ext_frames) detections, track_ids = split_tracklets(tracklets) # Now we make", "bgr based on grouped localizations for x in strategy['frame-diffs']: print(f\"Started", "media_file in args.media_files: comps=os.path.splitext(os.path.basename(media_file))[0] media_id=comps.split('_')[0] media = api.get_media(media_id) if media.attributes.get(\"Tracklet", "= [local] detections=[] track_ids=[] track_id=1 # If media does not", "to \" f\"'Tracklet Generator Processed' attribute being set to \"", "crop_localization(frame_bgr, localization): img_width = frame_bgr.shape[1] img_height = frame_bgr.shape[0] box_x =", "{len(detections)} to {len(det_counts_per_track)}\", flush=True) if x > 1 and strategy['extension']['method']", "to {len(det_counts_per_track)}\", flush=True) if x > 1 and strategy['extension']['method'] ==", "= min(track[-1]['x'],new_x) min_y = min(track[-1]['y'],new_y) if min_x > 0 and", "vel_y = comps[1] avg_h = sum_h / len(track) avg_w =", "= det['y'] det['orig_w'] = det['width'] det['orig_h'] = det['height'] restore_det(track[0]) restore_det(track[-1])", "parser.add_argument(\"--strategy-config\", type=str) parser.add_argument(\"--dry-run\", action='store_true') parser.add_argument('media_files', type=str, nargs='*') args = parser.parse_args()", "in localizations_by_frame: localizations_by_frame[frame].append(local) else: localizations_by_frame[frame] = [local] detections=[] track_ids=[] track_id=1", "'iou', 'iou-motion', 'iou-global-motion'] # Weight methods that require the video", "if args.input_version_id: optional_fetch_args['version'] = [args.input_version_id] for media_file in args.media_files: comps=os.path.splitext(os.path.basename(media_file))[0]", "min(1,max(0,track[0]['x']-(vel_x*ext_length))) old_y = min(1,max(0,track[0]['y']-(vel_y*ext_length))) min_x = min(track[-1]['x'],new_x) min_y = min(track[-1]['y'],new_y)", "in strategy['frame-diffs']: print(f\"Started {x}\", flush=True) detections, track_ids, pairs, weights, is_cut,", "= 0 track[0]['height'] = 0 return tracklets def split_tracklets(tracklets): track_ids=[]", "names[1:-1]: module = getattr(module,name) classify_function = getattr(module,names[-1]) print(\"Strategy: \", flush=True)", "None classify_args = {} if class_method: pip_package=class_method.get('pip',None) if pip_package: p", "in media {media_file}\", flush=True) continue print(f\"Processing {len(localizations)} detections\", flush=True) #", "tracklets = defaultdict(list) num_tracklets = np.max(track_ids) + 1 assert(len(detections) ==", "= {} localizations = api.get_localization_list(project, type=args.detection_type_id, media_id=[media_id], **optional_fetch_args) localizations =", "len(track) avg_w = sum_w / len(track) new_x = min(1,max(0,track[-1]['x']+(vel_x*ext_length))) new_y", "min(1,max(0,track[0]['y']-(vel_y*ext_length))) min_x = min(track[-1]['x'],new_x) min_y = min(track[-1]['y'],new_y) if min_x >", "in strategy['max-length']: trim_to = strategy['max-length'][x] print(f\"Trimming track to max length", "in frame_detections: detections.append(det) track_ids.append(track_id) track_id += 1 print(\"Loaded all detections\",", "min(track[-1]['y'],new_y) if min_x > 0 and min_y > 0: track[-1]['x']", "== 'iou-global-motion': weights_strategy = IoUGlobalMotionWeights(media_shape, media_file, **strategy['args']) # Generate localization", "for l in localizations] if len(localizations) == 0: print(f\"No localizations", "tator.get_parser(parser) parser.add_argument(\"--detection-type-id\", type=int, required=True) parser.add_argument(\"--tracklet-type-id\", type=int, required=True) parser.add_argument(\"--version-id\", type=int) parser.add_argument(\"--input-version-id\",", "= min(max(abs(old_x-track[0]['x'])+avg_w,0),1) track[0]['height'] = min(max(abs(old_x-track[0]['y'])+avg_h,0),1) else: track[0]['width'] = 0 track[0]['height']", "else: valid = False attrs = {} if valid: obj={\"type\":", "{\"attributes\":{\"Tracklet Generator Processed\": str(datetime.datetime.now())}}) except: print(\"WARNING: Unable to set 'Tracklet", "import datetime import tator from pprint import pprint from collections", "args.dry_run: for response in tator.util.chunked_create(api.create_state_list,project, state_spec=new_objs): pass try: api.update_media(int(media_id), {\"attributes\":{\"Tracklet", "ext_length = min(length,len(track)) sum_h=0.0 sum_w=0.0 track.sort(key=lambda x:x['frame']) def restore_det(det): det['x']", "{media_file}, {progress}%...\") print(\"Download finished!\") # Unfrag the file subprocess.run([\"ffmpeg\", '-i',", "# Group by localizations by frame for lid, local in", "comparator=FeaturesComparator(model_file) #extractor=FeaturesExtractor(args.model_file) class_method = strategy.get('class-method',None) classify_function = None classify_args =", "* import json import sys import datetime import tator from", "img_height) img_crop = frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:] return img_crop def join_up_iteration(detections, track_ids): tracklets", "parser.add_argument(\"--version-id\", type=int) parser.add_argument(\"--input-version-id\", type=int) parser.add_argument(\"--strategy-config\", type=str) parser.add_argument(\"--dry-run\", action='store_true') parser.add_argument('media_files', type=str,", "return None tracklets = join_up_final(detections, track_ids) new_objs=[make_object(tracklet) for tracklet in", "round(localization['x'] * img_width) box_y = round(localization['y'] * img_height) box_width =", "methods that require the video visual_methods = ['hybrid', 'iou-global-motion'] api", "print(f\"Downloading {media_file}, {progress}%...\") print(\"Download finished!\") # Unfrag the file subprocess.run([\"ffmpeg\",", "from the graph solver # [ detection, detection, detection, ...]", "if class_method: pip_package=class_method.get('pip',None) if pip_package: p = subprocess.run([sys.executable, \"-m\", \"pip\",", "if frame in localizations_by_frame: for l in localizations_by_frame[frame]: l['bgr'] =", "1 assert(len(detections) == len(track_ids)) for d,tid in zip(detections, track_ids): tracklets[tid].append(d)", "require the video visual_methods = ['hybrid', 'iou-global-motion'] api = tator.get_api(args.host,", "track: sum_h += d['height'] sum_w += d['width'] angle,vel,comps = track_vel(track)", "= ['hybrid', 'iou', 'iou-motion', 'iou-global-motion'] # Weight methods that require", "= {} else: valid = False attrs = {} if", "= split_tracklets(new_tracklets) track_ids = renumber_track_ids(track_ids) return detections, track_ids if __name__==\"__main__\":", "Generator Processed\") != \"No\": print(f\"Skipping media ID {media.id}, name {media.name}", "import pprint from collections import defaultdict import yaml import math", "= det['x'] det['orig_y'] = det['y'] det['orig_w'] = det['width'] det['orig_h'] =", "== 'hybrid': weights_strategy = HybridWeights(comparator, None, None, media_shape, fps, 0.0,", "= extend_tracklets(tracklets, ext_frames) detections, track_ids = split_tracklets(tracklets) # Now we", "pip_package]) print(\"Finished process.\", flush=True) function_name = class_method.get('function',None) classify_args = class_method.get('args',None)", "vid.read() if frame in localizations_by_frame: for l in localizations_by_frame[frame]: l['bgr']", "grouped localizations for x in strategy['frame-diffs']: print(f\"Started {x}\", flush=True) detections,", "else: localizations_by_frame[frame] = [local] detections=[] track_ids=[] track_id=1 # If media", "round(localization['width'] * img_width) box_height = round(localization['height'] * img_height) img_crop =", "\"w\") as f: json.dump(new_objs,f) if not args.dry_run: for response in", "det['height'] restore_det(track[0]) restore_det(track[-1]) for d in track: sum_h += d['height']", "= min_y track[-1]['width'] = min(max(0,abs(new_x-track[-1]['x'])+avg_w),1) track[-1]['height'] = min(max(0,abs(new_x-track[-1]['y'])+avg_h),1) else: track[-1]['width']", "args.token) detection_type = api.get_localization_type(args.detection_type_id) project = detection_type.project version_id = args.version_id", "video for frame,frame_detections in localizations_by_frame.items(): for det in frame_detections: detections.append(det)", "...] # [ track#, track#, track#,...] # [ 133, 33,", "args.version_id default_strategy = {\"method\": \"hybrid\", \"frame-diffs\": [1,2,4,8,16,32,64,128,256], \"args\": {}, \"extension\":", "IoUGlobalMotionWeights(media_shape, media_file, **strategy['args']) # Generate localization bgr based on grouped", "download it. if strategy['method'] == 'iou-global-motion': if not os.path.exists(media_file): temp_path", "x > 1 and strategy['extension']['method'] == 'linear-motion': ext_frames=x print(f\"Extending by", "for track_id,track in tracklets.items(): if len(track) <= 16: continue ext_length", "min(track[0]['y'],old_y) if min_x > 0 and min_y > 0: track[0]['x']", "vel_x = comps[0] vel_y = comps[1] avg_h = sum_h /", "tracklets def split_tracklets(tracklets): track_ids=[] detections=[] for track_id,track in tracklets.items(): for", "video visual_methods = ['hybrid', 'iou-global-motion'] api = tator.get_api(args.host, args.token) detection_type", "lid, local in enumerate(localizations): frame = local['frame'] if frame in", "**classify_args) elif len(track) >= strategy['min-length']: valid = True attrs =", "Processed\") != \"No\": print(f\"Skipping media ID {media.id}, name {media.name} due", "for d in track: sum_h += d['height'] sum_w += d['width']", "133, 33, 13, 133,] # [ 0,0,1,1] # TODO: Handle", "getattr(module,names[-1]) print(\"Strategy: \", flush=True) pprint(strategy) print(args.media_files, flush=True) optional_fetch_args = {}", "comps=os.path.splitext(os.path.basename(media_file))[0] media_id=comps.split('_')[0] media = api.get_media(media_id) if media.attributes.get(\"Tracklet Generator Processed\") !=", "type=args.detection_type_id, media_id=[media_id], **optional_fetch_args) localizations = [l.to_dict() for l in localizations]", "python3 import argparse import openem import os import cv2 import", "None] print(f\"New objects = {len(new_objs)}\") with open(f\"/work/{media_id}.json\", \"w\") as f:", "Generator Processed\": str(datetime.datetime.now())}}) except: print(\"WARNING: Unable to set 'Tracklet Generator", "localizations_by_frame[frame] = [local] detections=[] track_ids=[] track_id=1 # If media does", "tracklets def extend_tracklets(tracklets, length): for track_id,track in tracklets.items(): if len(track)", "os.remove(temp_path) if strategy['method'] == 'hybrid': # Not all visual methods", "# from the graph solver # [ detection, detection, detection,", "img_width = frame_bgr.shape[1] img_height = frame_bgr.shape[0] box_x = round(localization['x'] *", "track_ids = split_tracklets(tracklets) # Now we make new track objects", "min_y = min(track[-1]['y'],new_y) if min_x > 0 and min_y >", "split_tracklets(tracklets) # Now we make new track objects based on", "make_object(track): track.sort(key=lambda x:x['frame']) if classify_function: valid,attrs = classify_function(media.to_dict(), track, **classify_args)", "= detections[start:end] next_track_id += 1 detections, track_ids = split_tracklets(new_tracklets) track_ids", "media_id=comps.split('_')[0] media = api.get_media(media_id) if media.attributes.get(\"Tracklet Generator Processed\") != \"No\":", "= {} if class_method: pip_package=class_method.get('pip',None) if pip_package: p = subprocess.run([sys.executable,", "it. if strategy['method'] == 'iou-global-motion': if not os.path.exists(media_file): temp_path =", "detections, track_ids = split_tracklets(tracklets) # Now we make new track", "strategy['min-length']: valid = True attrs = {} else: valid =", "[ 0,0,1,1] # TODO: Handle is_cut? def join_up_final(detections, track_ids): tracklets", "in tracklets.items(): if len(track) <= 16: continue ext_length = min(length,len(track))", "= True attrs = {} else: valid = False attrs", "track#, track#, track#,...] # [ 133, 33, 13, 133,] #", "else: return None tracklets = join_up_final(detections, track_ids) new_objs=[make_object(tracklet) for tracklet", "import cv2 import numpy as np from openem.tracking import *", "= 0 while ok: ok,frame_bgr = vid.read() if frame in", "type=int, required=True) parser.add_argument(\"--version-id\", type=int) parser.add_argument(\"--input-version-id\", type=int) parser.add_argument(\"--strategy-config\", type=str) parser.add_argument(\"--dry-run\", action='store_true')", "= HybridWeights(comparator, None, None, media_shape, fps, 0.0, batch_size) elif strategy['method']", "for response in tator.util.chunked_create(api.create_state_list,project, state_spec=new_objs): pass try: api.update_media(int(media_id), {\"attributes\":{\"Tracklet Generator", "from pprint import pprint from collections import defaultdict import yaml", "fps, 0.0, batch_size) elif strategy['method'] == 'iou': weights_strategy = IoUWeights(media_shape,", "math import subprocess import sys def crop_localization(frame_bgr, localization): img_width =", "= defaultdict(list) num_tracklets = np.max(track_ids) + 1 assert(len(detections) == len(track_ids))", "function_name.split('.') module = __import__(names[0]) for name in names[1:-1]: module =", "0.0, batch_size) elif strategy['method'] == 'iou': weights_strategy = IoUWeights(media_shape, **strategy['args'])", "track], **attrs, \"version\": version_id} return obj else: return None tracklets", "track#, track#,...] # [ 133, 33, 13, 133,] # [", "tracklets def make_object(track): track.sort(key=lambda x:x['frame']) if classify_function: valid,attrs = classify_function(media.to_dict(),", "0: track[0]['x'] = min(max(0,min_x),1) track[0]['y'] = min(max(0,min_y),1) track[0]['width'] = min(max(abs(old_x-track[0]['x'])+avg_w,0),1)", "detections, track_ids if __name__==\"__main__\": parser = argparse.ArgumentParser(description=__doc__) tator.get_parser(parser) parser.add_argument(\"--detection-type-id\", type=int,", "\"frame-diffs\": [1,2,4,8,16,32,64,128,256], \"args\": {}, \"extension\": {'method' : None}, \"max-length\": {},", "{media_file}\", flush=True) continue print(f\"Processing {len(localizations)} detections\", flush=True) # Group by", "\"install\", pip_package]) print(\"Finished process.\", flush=True) function_name = class_method.get('function',None) classify_args =", "class_method = strategy.get('class-method',None) classify_function = None classify_args = {} if", "* img_height) img_crop = frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:] return img_crop def join_up_iteration(detections, track_ids):", "continue print(f\"Processing {len(localizations)} detections\", flush=True) # Group by localizations by", "strategy['method'] == 'iou-global-motion': weights_strategy = IoUGlobalMotionWeights(media_shape, media_file, **strategy['args']) # Generate", "'copy', media_file]) os.remove(temp_path) if strategy['method'] == 'hybrid': # Not all", "is not None] print(f\"New objects = {len(new_objs)}\") with open(f\"/work/{media_id}.json\", \"w\")", "flush=True) continue print(f\"Processing {len(localizations)} detections\", flush=True) # Group by localizations", "obj else: return None tracklets = join_up_final(detections, track_ids) new_objs=[make_object(tracklet) for", "temp_path): print(f\"Downloading {media_file}, {progress}%...\") print(\"Download finished!\") # Unfrag the file", "type=str, nargs='*') args = parser.parse_args() # Weight methods methods =", "'-i', temp_path, '-c:v', 'copy', media_file]) os.remove(temp_path) if strategy['method'] == 'hybrid':", "if pip_package: p = subprocess.run([sys.executable, \"-m\", \"pip\", \"install\", pip_package]) print(\"Finished", "'linear-motion': ext_frames=x print(f\"Extending by linear motion, {ext_frames}\") tracklets = join_up_iteration(detections,track_ids)", "track[-1]['y'] = min_y track[-1]['width'] = min(max(0,abs(new_x-track[-1]['x'])+avg_w),1) track[-1]['height'] = min(max(0,abs(new_x-track[-1]['y'])+avg_h),1) else:", "media = api.get_media(media_id) if media.attributes.get(\"Tracklet Generator Processed\") != \"No\": print(f\"Skipping", "1 frame+=1 else: # The method is analytical on the", "= det.get('orig_x',det['x']) det['y'] = det.get('orig_y',det['y']) det['width'] = det.get('orig_w',det['width']) det['height'] =", "join_up_iteration(detections, track_ids) next_track_id = 1 new_tracklets = {} for track_id,detections", "= parser.parse_args() # Weight methods methods = ['hybrid', 'iou', 'iou-motion',", "detection, detection, detection, ...] # [ track#, track#, track#,...] #", "= __import__(names[0]) for name in names[1:-1]: module = getattr(module,name) classify_function", "np from openem.tracking import * import json import sys import", "media.fps localizations_by_frame = {} localizations = api.get_localization_list(project, type=args.detection_type_id, media_id=[media_id], **optional_fetch_args)", "old_y = min(1,max(0,track[0]['y']-(vel_y*ext_length))) min_x = min(track[-1]['x'],new_x) min_y = min(track[-1]['y'],new_y) if", "== len(track_ids)) for d,tid in zip(detections, track_ids): tracklets[tid].append(d) return tracklets", "else: track[0]['width'] = 0 track[0]['height'] = 0 return tracklets def", "min_x = min(track[-1]['x'],new_x) min_y = min(track[-1]['y'],new_y) if min_x > 0", "\"-m\", \"pip\", \"install\", pip_package]) print(\"Finished process.\", flush=True) function_name = class_method.get('function',None)", "f\"something other than 'No'!\") continue media_shape = (media.height, media.width) fps", "track_ids) next_track_id = 1 new_tracklets = {} for track_id,detections in", "{len(new_objs)}\") with open(f\"/work/{media_id}.json\", \"w\") as f: json.dump(new_objs,f) if not args.dry_run:", "solver # [ detection, detection, detection, ...] # [ track#,", "box_width = round(localization['width'] * img_width) box_height = round(localization['height'] * img_height)", "sum_h / len(track) avg_w = sum_w / len(track) new_x =", "img_height = frame_bgr.shape[0] box_x = round(localization['x'] * img_width) box_y =", "= join_up_iteration(detections, track_ids) next_track_id = 1 new_tracklets = {} for", "= getattr(module,names[-1]) print(\"Strategy: \", flush=True) pprint(strategy) print(args.media_files, flush=True) optional_fetch_args =", "\"No\": print(f\"Skipping media ID {media.id}, name {media.name} due to \"", "tracklets.items(): for d in track: track_ids.append(track_id) detections.append(d) return detections,track_ids def", "== 'iou-global-motion': if not os.path.exists(media_file): temp_path = f'/tmp/{os.path.basename(media_file)}' for progress", "not os.path.exists(media_file): temp_path = f'/tmp/{os.path.basename(media_file)}' for progress in tator.util.download_media(api, media,", "pip_package=class_method.get('pip',None) if pip_package: p = subprocess.run([sys.executable, \"-m\", \"pip\", \"install\", pip_package])", "while ok: ok,frame_bgr = vid.read() if frame in localizations_by_frame: for", "localization bgr based on grouped localizations for x in strategy['frame-diffs']:", "version_id} return obj else: return None tracklets = join_up_final(detections, track_ids)", "join_up_iteration(detections, track_ids): tracklets = defaultdict(list) num_tracklets = np.max(track_ids) + 1", "+ 1 assert(len(detections) == len(track_ids)) for d,tid in zip(detections, track_ids):", "tator.get_api(args.host, args.token) detection_type = api.get_localization_type(args.detection_type_id) project = detection_type.project version_id =", "collections import defaultdict import yaml import math import subprocess import", "strategy['method'] == 'hybrid': model_file = strategy['args']['model_file'] batch_size = strategy['args'].get('batch_size', 4)", "openem import os import cv2 import numpy as np from", "strategy['method'] == 'iou': weights_strategy = IoUWeights(media_shape, **strategy['args']) elif strategy['method'] ==", "# and does not require processing the video for frame,frame_detections", "weights_strategy = IoUGlobalMotionWeights(media_shape, media_file, **strategy['args']) # Generate localization bgr based", "[local] detections=[] track_ids=[] track_id=1 # If media does not exist,", "0 min_x = min(track[0]['x'],old_x) min_y = min(track[0]['y'],old_y) if min_x >", "file subprocess.run([\"ffmpeg\", '-i', temp_path, '-c:v', 'copy', media_file]) os.remove(temp_path) if strategy['method']", "new_objs=[x for x in new_objs if x is not None]", "crop_localization(frame_bgr, l) if l['attributes']['Confidence'] < 0.50: continue detections.append(l) track_ids.append(track_id) track_id", "d in track: sum_h += d['height'] sum_w += d['width'] angle,vel,comps", "= getattr(module,name) classify_function = getattr(module,names[-1]) print(\"Strategy: \", flush=True) pprint(strategy) print(args.media_files,", "# [ 0,0,1,1] # TODO: Handle is_cut? def join_up_final(detections, track_ids):", "len(track) new_x = min(1,max(0,track[-1]['x']+(vel_x*ext_length))) new_y = min(1,max(0,track[-1]['y']+(vel_y*ext_length))) old_x = min(1,max(0,track[0]['x']-(vel_x*ext_length)))", "import numpy as np from openem.tracking import * import json", "/ len(track) new_x = min(1,max(0,track[-1]['x']+(vel_x*ext_length))) new_y = min(1,max(0,track[-1]['y']+(vel_y*ext_length))) old_x =", "track_id,track in tracklets.items(): if len(track) <= 16: continue ext_length =", "= min(length,len(track)) sum_h=0.0 sum_w=0.0 track.sort(key=lambda x:x['frame']) def restore_det(det): det['x'] =", "subprocess.run([sys.executable, \"-m\", \"pip\", \"install\", pip_package]) print(\"Finished process.\", flush=True) function_name =", "in args.media_files: comps=os.path.splitext(os.path.basename(media_file))[0] media_id=comps.split('_')[0] media = api.get_media(media_id) if media.attributes.get(\"Tracklet Generator", "round(localization['height'] * img_height) img_crop = frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:] return img_crop def join_up_iteration(detections,", "than 'No'!\") continue media_shape = (media.height, media.width) fps = media.fps", "= [l.to_dict() for l in localizations] if len(localizations) == 0:", "x in strategy['frame-diffs']: print(f\"Started {x}\", flush=True) detections, track_ids, pairs, weights,", "elif strategy['method'] == 'iou': weights_strategy = IoUWeights(media_shape, **strategy['args']) elif strategy['method']", "zip(detections, track_ids): tracklets[tid].append(d) return tracklets def make_object(track): track.sort(key=lambda x:x['frame']) if", "\"version\": version_id} return obj else: return None tracklets = join_up_final(detections,", "tracklet in tracklets.values()] new_objs=[x for x in new_objs if x", "continue media_shape = (media.height, media.width) fps = media.fps localizations_by_frame =", "+= d['height'] sum_w += d['width'] angle,vel,comps = track_vel(track) vel_x =", "start=max_length*i end=max_length+(max_length*i) new_tracklets[next_track_id] = detections[start:end] next_track_id += 1 detections, track_ids", "['hybrid', 'iou', 'iou-motion', 'iou-global-motion'] # Weight methods that require the", "print(\"Strategy: \", flush=True) pprint(strategy) print(args.media_files, flush=True) optional_fetch_args = {} if", "detections=[] track_ids=[] track_id=1 # If media does not exist, download", "strategy['max-length']: trim_to = strategy['max-length'][x] print(f\"Trimming track to max length of", "analytical on the detections coordinates # and does not require", "track[0]['y'] = min(max(0,min_y),1) track[0]['width'] = min(max(abs(old_x-track[0]['x'])+avg_w,0),1) track[0]['height'] = min(max(abs(old_x-track[0]['y'])+avg_h,0),1) else:", "d,tid in zip(detections, track_ids): tracklets[tid].append(d) return tracklets def make_object(track): track.sort(key=lambda", "tator from pprint import pprint from collections import defaultdict import", "track_id,detections in tracklets.items(): new_track_count=math.ceil(len(detections)/max_length) for i in range(new_track_count): start=max_length*i end=max_length+(max_length*i)", "l['attributes']['Confidence'] < 0.50: continue detections.append(l) track_ids.append(track_id) track_id += 1 frame+=1", "weights_strategy = IoUMotionWeights(media_shape, **strategy['args']) elif strategy['method'] == 'iou-global-motion': weights_strategy =", "HybridWeights(comparator, None, None, media_shape, fps, 0.0, batch_size) elif strategy['method'] ==", "track objects based on the result # from the graph", "the result # from the graph solver # [ detection,", "sys import datetime import tator from pprint import pprint from", "for x in new_objs if x is not None] print(f\"New", "tracklets = join_up_final(detections, track_ids) new_objs=[make_object(tracklet) for tracklet in tracklets.values()] new_objs=[x", "track_ids=[] detections=[] for track_id,track in tracklets.items(): for d in track:", "4) comparator=FeaturesComparator(model_file) #extractor=FeaturesExtractor(args.model_file) class_method = strategy.get('class-method',None) classify_function = None classify_args", "zip(detections, track_ids): tracklets[tid].append(d) return tracklets def extend_tracklets(tracklets, length): for track_id,track", "1 new_tracklets = {} for track_id,detections in tracklets.items(): new_track_count=math.ceil(len(detections)/max_length) for", "import * import json import sys import datetime import tator", "print(args.media_files, flush=True) optional_fetch_args = {} if args.input_version_id: optional_fetch_args['version'] = [args.input_version_id]", "import defaultdict import yaml import math import subprocess import sys", "= round(localization['width'] * img_width) box_height = round(localization['height'] * img_height) img_crop", "for name in names[1:-1]: module = getattr(module,name) classify_function = getattr(module,names[-1])", "detections\", flush=True) # Group by localizations by frame for lid,", "min_y > 0: track[-1]['x'] = min_x track[-1]['y'] = min_y track[-1]['width']", "if strategy['method'] == 'hybrid': # Not all visual methods need", "ext_frames) detections, track_ids = split_tracklets(tracklets) # Now we make new", "det.get('orig_x',det['x']) det['y'] = det.get('orig_y',det['y']) det['width'] = det.get('orig_w',det['width']) det['height'] = det.get('orig_h',det['height'])", "= min(1,max(0,track[-1]['y']+(vel_y*ext_length))) old_x = min(1,max(0,track[0]['x']-(vel_x*ext_length))) old_y = min(1,max(0,track[0]['y']-(vel_y*ext_length))) min_x =", "ext_frames=x print(f\"Extending by linear motion, {ext_frames}\") tracklets = join_up_iteration(detections,track_ids) tracklets", "= join_up_iteration(detections,track_ids) tracklets = extend_tracklets(tracklets, ext_frames) detections, track_ids = split_tracklets(tracklets)", "new_objs=[make_object(tracklet) for tracklet in tracklets.values()] new_objs=[x for x in new_objs", "if args.strategy_config: strategy = {**default_strategy} with open(args.strategy_config, \"r\") as strategy_file:", "range(new_track_count): start=max_length*i end=max_length+(max_length*i) new_tracklets[next_track_id] = detections[start:end] next_track_id += 1 detections,", "try: api.update_media(int(media_id), {\"attributes\":{\"Tracklet Generator Processed\": str(datetime.datetime.now())}}) except: print(\"WARNING: Unable to", "min_y = min(track[0]['y'],old_y) if min_x > 0 and min_y >", "track_ids, max_length): tracklets = join_up_iteration(detections, track_ids) next_track_id = 1 new_tracklets", "flush=True) function_name = class_method.get('function',None) classify_args = class_method.get('args',None) names = function_name.split('.')", "type=int, required=True) parser.add_argument(\"--tracklet-type-id\", type=int, required=True) parser.add_argument(\"--version-id\", type=int) parser.add_argument(\"--input-version-id\", type=int) parser.add_argument(\"--strategy-config\",", "= split_tracklets(tracklets) # Now we make new track objects based", ": None}, \"max-length\": {}, \"min-length\": 0} if args.strategy_config: strategy =", "localizations by frame for lid, local in enumerate(localizations): frame =", "response in tator.util.chunked_create(api.create_state_list,project, state_spec=new_objs): pass try: api.update_media(int(media_id), {\"attributes\":{\"Tracklet Generator Processed\":", "if x > 1 and strategy['extension']['method'] == 'linear-motion': ext_frames=x print(f\"Extending", "require processing the video for frame,frame_detections in localizations_by_frame.items(): for det", "= {len(new_objs)}\") with open(f\"/work/{media_id}.json\", \"w\") as f: json.dump(new_objs,f) if not", "for d,tid in zip(detections, track_ids): tracklets[tid].append(d) return tracklets def make_object(track):", "ok: ok,frame_bgr = vid.read() if frame in localizations_by_frame: for l", "not None] print(f\"New objects = {len(new_objs)}\") with open(f\"/work/{media_id}.json\", \"w\") as", "# TODO: Handle is_cut? def join_up_final(detections, track_ids): tracklets = defaultdict(list)", "in tator.util.chunked_create(api.create_state_list,project, state_spec=new_objs): pass try: api.update_media(int(media_id), {\"attributes\":{\"Tracklet Generator Processed\": str(datetime.datetime.now())}})", "not require processing the video for frame,frame_detections in localizations_by_frame.items(): for", "frame_bgr.shape[0] box_x = round(localization['x'] * img_width) box_y = round(localization['y'] *", "yaml import math import subprocess import sys def crop_localization(frame_bgr, localization):", "img_crop = frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:] return img_crop def join_up_iteration(detections, track_ids): tracklets =", "else: strategy = default_strategy if strategy['method'] == 'hybrid': model_file =", "and does not require processing the video for frame,frame_detections in", "track_ids if __name__==\"__main__\": parser = argparse.ArgumentParser(description=__doc__) tator.get_parser(parser) parser.add_argument(\"--detection-type-id\", type=int, required=True)", "det['x'] det['orig_y'] = det['y'] det['orig_w'] = det['width'] det['orig_h'] = det['height']", "split_tracklets(tracklets): track_ids=[] detections=[] for track_id,track in tracklets.items(): for d in", "join_up_iteration(detections,track_ids) tracklets = extend_tracklets(tracklets, ext_frames) detections, track_ids = split_tracklets(tracklets) #", "if x in strategy['max-length']: trim_to = strategy['max-length'][x] print(f\"Trimming track to", "= 1 new_tracklets = {} for track_id,detections in tracklets.items(): new_track_count=math.ceil(len(detections)/max_length)", "track_ids = renumber_track_ids(track_ids) return detections, track_ids if __name__==\"__main__\": parser =", "images vid=cv2.VideoCapture(media_file) ok=True frame = 0 while ok: ok,frame_bgr =", "for track_id,detections in tracklets.items(): new_track_count=math.ceil(len(detections)/max_length) for i in range(new_track_count): start=max_length*i", "and min_y > 0: track[-1]['x'] = min_x track[-1]['y'] = min_y", "avg_w = sum_w / len(track) new_x = min(1,max(0,track[-1]['x']+(vel_x*ext_length))) new_y =", "= {} for track_id,detections in tracklets.items(): new_track_count=math.ceil(len(detections)/max_length) for i in", "end=max_length+(max_length*i) new_tracklets[next_track_id] = detections[start:end] next_track_id += 1 detections, track_ids =", "= class_method.get('function',None) classify_args = class_method.get('args',None) names = function_name.split('.') module =", "does not exist, download it. if strategy['method'] == 'iou-global-motion': if", "= join_tracklets( detections, track_ids, x, weights_strategy) if x in strategy['max-length']:", "= min_x track[-1]['y'] = min_y track[-1]['width'] = min(max(0,abs(new_x-track[-1]['x'])+avg_w),1) track[-1]['height'] =", "on the detections coordinates # and does not require processing", "_,det_counts_per_track=np.unique(track_ids,return_counts=True) print(f\"frame-diff {x}: {len(detections)} to {len(det_counts_per_track)}\", flush=True) if x >", "as f: json.dump(new_objs,f) if not args.dry_run: for response in tator.util.chunked_create(api.create_state_list,project,", "restore_det(track[0]) restore_det(track[-1]) for d in track: sum_h += d['height'] sum_w", "sum_w / len(track) new_x = min(1,max(0,track[-1]['x']+(vel_x*ext_length))) new_y = min(1,max(0,track[-1]['y']+(vel_y*ext_length))) old_x", "elif strategy['method'] == 'iou-global-motion': weights_strategy = IoUGlobalMotionWeights(media_shape, media_file, **strategy['args']) #", "# Now we make new track objects based on the", "classify_function: valid,attrs = classify_function(media.to_dict(), track, **classify_args) elif len(track) >= strategy['min-length']:", "subprocess.run([\"ffmpeg\", '-i', temp_path, '-c:v', 'copy', media_file]) os.remove(temp_path) if strategy['method'] ==", "default_strategy = {\"method\": \"hybrid\", \"frame-diffs\": [1,2,4,8,16,32,64,128,256], \"args\": {}, \"extension\": {'method'", "classify_args = {} if class_method: pip_package=class_method.get('pip',None) if pip_package: p =", "set to \" f\"something other than 'No'!\") continue media_shape =", "obj={\"type\": args.tracklet_type_id, \"media_ids\": [int(media_id)], \"localization_ids\": [x['id'] for x in track],", "= sum_h / len(track) avg_w = sum_w / len(track) new_x", "= trim_tracklets(detections, track_ids, trim_to) _,det_counts_per_track=np.unique(track_ids,return_counts=True) print(f\"frame-diff {x}: {len(detections)} to {len(det_counts_per_track)}\",", "Not all visual methods need detection images vid=cv2.VideoCapture(media_file) ok=True frame", "for d in track: track_ids.append(track_id) detections.append(d) return detections,track_ids def trim_tracklets(detections,", "= min(1,max(0,track[0]['y']-(vel_y*ext_length))) min_x = min(track[-1]['x'],new_x) min_y = min(track[-1]['y'],new_y) if min_x", "graph solver # [ detection, detection, detection, ...] # [", "media.width) fps = media.fps localizations_by_frame = {} localizations = api.get_localization_list(project,", "present in media {media_file}\", flush=True) continue print(f\"Processing {len(localizations)} detections\", flush=True)", "constraints = join_tracklets( detections, track_ids, x, weights_strategy) if x in", "{x}: {len(detections)} to {len(det_counts_per_track)}\", flush=True) if x > 1 and", "det.get('orig_y',det['y']) det['width'] = det.get('orig_w',det['width']) det['height'] = det.get('orig_h',det['height']) det['orig_x'] = det['x']", "if not os.path.exists(media_file): temp_path = f'/tmp/{os.path.basename(media_file)}' for progress in tator.util.download_media(api,", "defaultdict(list) num_tracklets = np.max(track_ids) + 1 assert(len(detections) == len(track_ids)) for", "is_cut? def join_up_final(detections, track_ids): tracklets = defaultdict(list) num_tracklets = np.max(track_ids)", "0} if args.strategy_config: strategy = {**default_strategy} with open(args.strategy_config, \"r\") as", "# Weight methods methods = ['hybrid', 'iou', 'iou-motion', 'iou-global-motion'] #", "len(localizations) == 0: print(f\"No localizations present in media {media_file}\", flush=True)", "f'/tmp/{os.path.basename(media_file)}' for progress in tator.util.download_media(api, media, temp_path): print(f\"Downloading {media_file}, {progress}%...\")", "for x in track], **attrs, \"version\": version_id} return obj else:", "join_tracklets( detections, track_ids, x, weights_strategy) if x in strategy['max-length']: trim_to", "'iou-motion': weights_strategy = IoUMotionWeights(media_shape, **strategy['args']) elif strategy['method'] == 'iou-global-motion': weights_strategy", "{}, \"min-length\": 0} if args.strategy_config: strategy = {**default_strategy} with open(args.strategy_config,", "d,tid in zip(detections, track_ids): tracklets[tid].append(d) return tracklets def extend_tracklets(tracklets, length):", "= tator.get_api(args.host, args.token) detection_type = api.get_localization_type(args.detection_type_id) project = detection_type.project version_id", "d['width'] angle,vel,comps = track_vel(track) vel_x = comps[0] vel_y = comps[1]", "trim_tracklets(detections, track_ids, max_length): tracklets = join_up_iteration(detections, track_ids) next_track_id = 1", "for frame,frame_detections in localizations_by_frame.items(): for det in frame_detections: detections.append(det) track_ids.append(track_id)", "f: json.dump(new_objs,f) if not args.dry_run: for response in tator.util.chunked_create(api.create_state_list,project, state_spec=new_objs):", "import tator from pprint import pprint from collections import defaultdict", "tator.util.download_media(api, media, temp_path): print(f\"Downloading {media_file}, {progress}%...\") print(\"Download finished!\") # Unfrag", "localizations_by_frame: localizations_by_frame[frame].append(local) else: localizations_by_frame[frame] = [local] detections=[] track_ids=[] track_id=1 #", "track_id,track in tracklets.items(): for d in track: track_ids.append(track_id) detections.append(d) return", "**strategy['args']) elif strategy['method'] == 'iou-global-motion': weights_strategy = IoUGlobalMotionWeights(media_shape, media_file, **strategy['args'])", "Generate localization bgr based on grouped localizations for x in", "strategy['extension']['method'] == 'linear-motion': ext_frames=x print(f\"Extending by linear motion, {ext_frames}\") tracklets", "flush=True) track_ids = renumber_track_ids(track_ids) if strategy['method'] == 'hybrid': weights_strategy =", "None tracklets = join_up_final(detections, track_ids) new_objs=[make_object(tracklet) for tracklet in tracklets.values()]", "f\"'Tracklet Generator Processed' attribute being set to \" f\"something other", "datetime import tator from pprint import pprint from collections import", "tracklets = join_up_iteration(detections,track_ids) tracklets = extend_tracklets(tracklets, ext_frames) detections, track_ids =", "{} if class_method: pip_package=class_method.get('pip',None) if pip_package: p = subprocess.run([sys.executable, \"-m\",", "frame in localizations_by_frame: for l in localizations_by_frame[frame]: l['bgr'] = crop_localization(frame_bgr,", "strategy = default_strategy if strategy['method'] == 'hybrid': model_file = strategy['args']['model_file']", "track_ids = trim_tracklets(detections, track_ids, trim_to) _,det_counts_per_track=np.unique(track_ids,return_counts=True) print(f\"frame-diff {x}: {len(detections)} to", "in tator.util.download_media(api, media, temp_path): print(f\"Downloading {media_file}, {progress}%...\") print(\"Download finished!\") #", "restore_det(track[-1]) for d in track: sum_h += d['height'] sum_w +=", "track[0]['height'] = 0 return tracklets def split_tracklets(tracklets): track_ids=[] detections=[] for", "__name__==\"__main__\": parser = argparse.ArgumentParser(description=__doc__) tator.get_parser(parser) parser.add_argument(\"--detection-type-id\", type=int, required=True) parser.add_argument(\"--tracklet-type-id\", type=int,", "return tracklets def extend_tracklets(tracklets, length): for track_id,track in tracklets.items(): if", "class_method.get('args',None) names = function_name.split('.') module = __import__(names[0]) for name in", "<reponame>openem-team/openem #!/usr/bin/env python3 import argparse import openem import os import", "det['orig_x'] = det['x'] det['orig_y'] = det['y'] det['orig_w'] = det['width'] det['orig_h']", "pprint(strategy) print(args.media_files, flush=True) optional_fetch_args = {} if args.input_version_id: optional_fetch_args['version'] =", "by frame for lid, local in enumerate(localizations): frame = local['frame']", "= IoUWeights(media_shape, **strategy['args']) elif strategy['method'] == 'iou-motion': weights_strategy = IoUMotionWeights(media_shape,", "args = parser.parse_args() # Weight methods methods = ['hybrid', 'iou',", "None}, \"max-length\": {}, \"min-length\": 0} if args.strategy_config: strategy = {**default_strategy}", "'No'!\") continue media_shape = (media.height, media.width) fps = media.fps localizations_by_frame", "+= 1 print(\"Loaded all detections\", flush=True) track_ids = renumber_track_ids(track_ids) if", "det['y'] = det.get('orig_y',det['y']) det['width'] = det.get('orig_w',det['width']) det['height'] = det.get('orig_h',det['height']) det['orig_x']", "if min_x > 0 and min_y > 0: track[-1]['x'] =", "if media.attributes.get(\"Tracklet Generator Processed\") != \"No\": print(f\"Skipping media ID {media.id},", "= crop_localization(frame_bgr, l) if l['attributes']['Confidence'] < 0.50: continue detections.append(l) track_ids.append(track_id)", "x in new_objs if x is not None] print(f\"New objects", "type=str) parser.add_argument(\"--dry-run\", action='store_true') parser.add_argument('media_files', type=str, nargs='*') args = parser.parse_args() #", "= vid.read() if frame in localizations_by_frame: for l in localizations_by_frame[frame]:", "tracklets.items(): new_track_count=math.ceil(len(detections)/max_length) for i in range(new_track_count): start=max_length*i end=max_length+(max_length*i) new_tracklets[next_track_id] =", "coordinates # and does not require processing the video for", "__import__(names[0]) for name in names[1:-1]: module = getattr(module,name) classify_function =", "None, media_shape, fps, 0.0, batch_size) elif strategy['method'] == 'iou': weights_strategy", "detections, track_ids, x, weights_strategy) if x in strategy['max-length']: trim_to =", "detection, detection, ...] # [ track#, track#, track#,...] # [", "split_tracklets(new_tracklets) track_ids = renumber_track_ids(track_ids) return detections, track_ids if __name__==\"__main__\": parser", "= api.get_localization_type(args.detection_type_id) project = detection_type.project version_id = args.version_id default_strategy =", "\" f\"'Tracklet Generator Processed' attribute being set to \" f\"something", "track_vel(track) vel_x = comps[0] vel_y = comps[1] avg_h = sum_h", "\"localization_ids\": [x['id'] for x in track], **attrs, \"version\": version_id} return", "parser.add_argument(\"--detection-type-id\", type=int, required=True) parser.add_argument(\"--tracklet-type-id\", type=int, required=True) parser.add_argument(\"--version-id\", type=int) parser.add_argument(\"--input-version-id\", type=int)", "being set to \" f\"something other than 'No'!\") continue media_shape", "args.strategy_config: strategy = {**default_strategy} with open(args.strategy_config, \"r\") as strategy_file: strategy.update(yaml.load(strategy_file))", "l in localizations] if len(localizations) == 0: print(f\"No localizations present", "= min(track[-1]['y'],new_y) if min_x > 0 and min_y > 0:", "1 detections, track_ids = split_tracklets(new_tracklets) track_ids = renumber_track_ids(track_ids) return detections,", "= IoUGlobalMotionWeights(media_shape, media_file, **strategy['args']) # Generate localization bgr based on", "* img_width) box_height = round(localization['height'] * img_height) img_crop = frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:]", "media does not exist, download it. if strategy['method'] == 'iou-global-motion':", "temp_path, '-c:v', 'copy', media_file]) os.remove(temp_path) if strategy['method'] == 'hybrid': #", "== 'iou': weights_strategy = IoUWeights(media_shape, **strategy['args']) elif strategy['method'] == 'iou-motion':", "parser.parse_args() # Weight methods methods = ['hybrid', 'iou', 'iou-motion', 'iou-global-motion']" ]
[ "combine = MultiComponent(combine='concat', components=losses) g_loss = combine.g_loss_features d_loss = combine.d_loss_features", "= combine.g_loss_features d_loss = combine.d_loss_features self.d_loss = d_loss self.g_loss =", "= self.split_batch(d.sample, split) d_real = ds[0] d_fake = tf.add_n(ds[1:])/(len(ds)-1) loss_object", "layer? combine = MultiComponent(combine='concat', components=losses) g_loss = combine.g_loss_features d_loss =", "approximator\"\"\" def _create(self, d_real, d_fake): gan = self.gan config =", "np import hyperchamber as hc from hypergan.losses.base_loss import BaseLoss from", "additional approximator\"\"\" def _create(self, d_real, d_fake): gan = self.gan config", "an additional approximator\"\"\" def _create(self, d_real, d_fake): gan = self.gan", "loss_object = self.config['loss_class'](gan, self.config, d_real=d_real, d_fake=d_fake) losses.append(loss_object) #relational layer? combine", "import numpy as np import hyperchamber as hc from hypergan.losses.base_loss", "combine.d_loss_features self.d_loss = d_loss self.g_loss = g_loss self.losses = losses", "distributions and does an additional approximator\"\"\" def _create(self, d_real, d_fake):", "split = self.split for d in gan.discriminator.children: if config.swapped: d_swap", "= ds[0] d_fake = tf.add_n(ds[1:])/(len(ds)-1) loss_object = self.config['loss_class'](gan, self.config, d_real=d_real,", "d_real, d_fake): gan = self.gan config = self.config losses =", "multiple distributions and does an additional approximator\"\"\" def _create(self, d_real,", "gan.discriminator.children: if config.swapped: d_swap = d_real d_real = d_fake d_fake", "components=losses) g_loss = combine.g_loss_features d_loss = combine.d_loss_features self.d_loss = d_loss", "combine.g_loss_features d_loss = combine.d_loss_features self.d_loss = d_loss self.g_loss = g_loss", "= d_swap ds = self.split_batch(d.sample, split) d_real = ds[0] d_fake", "= MultiComponent(combine='concat', components=losses) g_loss = combine.g_loss_features d_loss = combine.d_loss_features self.d_loss", "numpy as np import hyperchamber as hc from hypergan.losses.base_loss import", "= [] split = self.split for d in gan.discriminator.children: if", "as np import hyperchamber as hc from hypergan.losses.base_loss import BaseLoss", "ds[0] d_fake = tf.add_n(ds[1:])/(len(ds)-1) loss_object = self.config['loss_class'](gan, self.config, d_real=d_real, d_fake=d_fake)", "d_fake d_fake = d_swap ds = self.split_batch(d.sample, split) d_real =", "self.split for d in gan.discriminator.children: if config.swapped: d_swap = d_real", "d_swap ds = self.split_batch(d.sample, split) d_real = ds[0] d_fake =", "losses = [] split = self.split for d in gan.discriminator.children:", "= combine.d_loss_features self.d_loss = d_loss self.g_loss = g_loss self.losses =", "import BaseLoss from hypergan.multi_component import MultiComponent TINY=1e-8 class MultiLoss(BaseLoss): \"\"\"Takes", "= d_loss self.g_loss = g_loss self.losses = losses return [d_loss,", "[] split = self.split for d in gan.discriminator.children: if config.swapped:", "if config.swapped: d_swap = d_real d_real = d_fake d_fake =", "MultiLoss(BaseLoss): \"\"\"Takes multiple distributions and does an additional approximator\"\"\" def", "ds = self.split_batch(d.sample, split) d_real = ds[0] d_fake = tf.add_n(ds[1:])/(len(ds)-1)", "d_real = ds[0] d_fake = tf.add_n(ds[1:])/(len(ds)-1) loss_object = self.config['loss_class'](gan, self.config,", "losses.append(loss_object) #relational layer? combine = MultiComponent(combine='concat', components=losses) g_loss = combine.g_loss_features", "= self.split for d in gan.discriminator.children: if config.swapped: d_swap =", "self.gan config = self.config losses = [] split = self.split", "gan = self.gan config = self.config losses = [] split", "config = self.config losses = [] split = self.split for", "self.config['loss_class'](gan, self.config, d_real=d_real, d_fake=d_fake) losses.append(loss_object) #relational layer? combine = MultiComponent(combine='concat',", "= d_fake d_fake = d_swap ds = self.split_batch(d.sample, split) d_real", "self.d_loss = d_loss self.g_loss = g_loss self.losses = losses return", "tensorflow as tf import numpy as np import hyperchamber as", "as hc from hypergan.losses.base_loss import BaseLoss from hypergan.multi_component import MultiComponent", "= tf.add_n(ds[1:])/(len(ds)-1) loss_object = self.config['loss_class'](gan, self.config, d_real=d_real, d_fake=d_fake) losses.append(loss_object) #relational", "MultiComponent TINY=1e-8 class MultiLoss(BaseLoss): \"\"\"Takes multiple distributions and does an", "\"\"\"Takes multiple distributions and does an additional approximator\"\"\" def _create(self,", "= self.config losses = [] split = self.split for d", "BaseLoss from hypergan.multi_component import MultiComponent TINY=1e-8 class MultiLoss(BaseLoss): \"\"\"Takes multiple", "split) d_real = ds[0] d_fake = tf.add_n(ds[1:])/(len(ds)-1) loss_object = self.config['loss_class'](gan,", "self.split_batch(d.sample, split) d_real = ds[0] d_fake = tf.add_n(ds[1:])/(len(ds)-1) loss_object =", "tf.add_n(ds[1:])/(len(ds)-1) loss_object = self.config['loss_class'](gan, self.config, d_real=d_real, d_fake=d_fake) losses.append(loss_object) #relational layer?", "hc from hypergan.losses.base_loss import BaseLoss from hypergan.multi_component import MultiComponent TINY=1e-8", "d_fake = d_swap ds = self.split_batch(d.sample, split) d_real = ds[0]", "tf import numpy as np import hyperchamber as hc from", "as tf import numpy as np import hyperchamber as hc", "hypergan.losses.base_loss import BaseLoss from hypergan.multi_component import MultiComponent TINY=1e-8 class MultiLoss(BaseLoss):", "self.config losses = [] split = self.split for d in", "= self.gan config = self.config losses = [] split =", "d_loss = combine.d_loss_features self.d_loss = d_loss self.g_loss = g_loss self.losses", "d_fake = tf.add_n(ds[1:])/(len(ds)-1) loss_object = self.config['loss_class'](gan, self.config, d_real=d_real, d_fake=d_fake) losses.append(loss_object)", "self.config, d_real=d_real, d_fake=d_fake) losses.append(loss_object) #relational layer? combine = MultiComponent(combine='concat', components=losses)", "d_real d_real = d_fake d_fake = d_swap ds = self.split_batch(d.sample,", "does an additional approximator\"\"\" def _create(self, d_real, d_fake): gan =", "and does an additional approximator\"\"\" def _create(self, d_real, d_fake): gan", "= d_real d_real = d_fake d_fake = d_swap ds =", "from hypergan.losses.base_loss import BaseLoss from hypergan.multi_component import MultiComponent TINY=1e-8 class", "import MultiComponent TINY=1e-8 class MultiLoss(BaseLoss): \"\"\"Takes multiple distributions and does", "hyperchamber as hc from hypergan.losses.base_loss import BaseLoss from hypergan.multi_component import", "d_fake=d_fake) losses.append(loss_object) #relational layer? combine = MultiComponent(combine='concat', components=losses) g_loss =", "import tensorflow as tf import numpy as np import hyperchamber", "MultiComponent(combine='concat', components=losses) g_loss = combine.g_loss_features d_loss = combine.d_loss_features self.d_loss =", "d in gan.discriminator.children: if config.swapped: d_swap = d_real d_real =", "_create(self, d_real, d_fake): gan = self.gan config = self.config losses", "d_loss self.g_loss = g_loss self.losses = losses return [d_loss, g_loss]", "in gan.discriminator.children: if config.swapped: d_swap = d_real d_real = d_fake", "= self.config['loss_class'](gan, self.config, d_real=d_real, d_fake=d_fake) losses.append(loss_object) #relational layer? combine =", "def _create(self, d_real, d_fake): gan = self.gan config = self.config", "d_fake): gan = self.gan config = self.config losses = []", "d_swap = d_real d_real = d_fake d_fake = d_swap ds", "import hyperchamber as hc from hypergan.losses.base_loss import BaseLoss from hypergan.multi_component", "hypergan.multi_component import MultiComponent TINY=1e-8 class MultiLoss(BaseLoss): \"\"\"Takes multiple distributions and", "config.swapped: d_swap = d_real d_real = d_fake d_fake = d_swap", "for d in gan.discriminator.children: if config.swapped: d_swap = d_real d_real", "#relational layer? combine = MultiComponent(combine='concat', components=losses) g_loss = combine.g_loss_features d_loss", "TINY=1e-8 class MultiLoss(BaseLoss): \"\"\"Takes multiple distributions and does an additional", "d_real=d_real, d_fake=d_fake) losses.append(loss_object) #relational layer? combine = MultiComponent(combine='concat', components=losses) g_loss", "from hypergan.multi_component import MultiComponent TINY=1e-8 class MultiLoss(BaseLoss): \"\"\"Takes multiple distributions", "class MultiLoss(BaseLoss): \"\"\"Takes multiple distributions and does an additional approximator\"\"\"", "g_loss = combine.g_loss_features d_loss = combine.d_loss_features self.d_loss = d_loss self.g_loss", "d_real = d_fake d_fake = d_swap ds = self.split_batch(d.sample, split)" ]
[ "config found with key {storage_destination_key}\" ) failure = { \"message\":", "Params = Depends(), ) -> AbstractPage[Policy]: \"\"\" Return a paginated", "fidesops.schemas import policy as schemas from fidesops.schemas.api import BulkUpdateFailed from", "for policy: %s\", exc) failure = { \"message\": \"This record", "*, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.POLICY_CREATE_OR_UPDATE], ), db: Session", "for rule target {schema.key} on rule {rule_key}: {exc}\" ) failure", "FidesOpsKey, rule_key: FidesOpsKey, rule_target_key: FidesOpsKey, db: Session = Depends(deps.get_db), )", "= Depends(deps.get_db), data: conlist(schemas.Policy, max_items=50) = Body(...), # type: ignore", "a list of policy data elements, create or update corresponding", "Rule {rule_key} on Policy {policy_key}.\", ) logger.info(f\"Deleting rule target with", "data elements, create corresponding Rule objects or report failure \"\"\"", "HTTP_404_NOT_FOUND from fidesops.api import deps from fidesops.api.v1 import scope_registry as", "\"\"\" Return a single Policy \"\"\" return get_policy_or_error(db, policy_key) @router.patch(", "Policy or throw a 404\"\"\" logger.info(f\"Finding policy with key '{policy_key}'\")", "= None if schema.masking_strategy: masking_strategy_data = schema.masking_strategy.dict() try: rule =", "with key '{rule_key}'\") rule.delete(db=db) @router.patch( urls.RULE_TARGET_LIST, status_code=200, response_model=schemas.BulkPutRuleTargetResponse, ) def", "def delete_rule_target( *, policy_key: FidesOpsKey, rule_key: FidesOpsKey, rule_target_key: FidesOpsKey, db:", "\"This record could not be added because the data provided", "Depends(deps.get_db), input_data: conlist(schemas.RuleTarget, max_items=50) = Body(...), # type: ignore )", "'{rule_target_key}'\") target = RuleTarget.filter( db=db, conditions=( RuleTarget.key == rule_target_key and", "Only validate the associated StorageConfig on access rules storage_destination_key =", "fidesops.models.policy import ( ActionType, Policy, Rule, RuleTarget, ) from fidesops.models.storage", "if not policy: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No Policy found for", "def delete_rule( *, policy_key: FidesOpsKey, rule_key: FidesOpsKey, db: Session =", "failed.append(BulkUpdateFailed(**failure)) continue except RuleValidationError as exc: logger.warning( f\"Create/update failed for", "= Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE] ), policy_key: FidesOpsKey, rule_key: FidesOpsKey, db:", "client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.POLICY_CREATE_OR_UPDATE], ), db: Session =", ") for schema in input_data: # Validate all FKs in", "from starlette.status import HTTP_404_NOT_FOUND from fidesops.api import deps from fidesops.api.v1", "the data provided was invalid.\", \"data\": policy_data, } failed.append(BulkUpdateFailed(**failure)) continue", "urls.RULE_TARGET_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def delete_rule_target( *, policy_key: FidesOpsKey,", "from fidesops.api.v1 import urn_registry as urls from fidesops.common_exceptions import (", "exist\", \"data\": dict( schema ), # Be sure to pass", "@router.delete( urls.RULE_TARGET_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def delete_rule_target( *, policy_key:", "sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session from starlette.exceptions import", "HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No RuleTarget found for key {rule_target_key} at Rule", "{rule_key}\" ) for schema in input_data: try: target = RuleTarget.create_or_update(", ") -> schemas.BulkPutPolicyResponse: \"\"\" Given a list of policy data", "Depends(deps.get_db), ) -> None: \"\"\" Delete the rule target. \"\"\"", "the rule target. \"\"\" policy = get_policy_or_error(db, policy_key) logger.info(f\"Finding rule", "status_code=200, response_model=schemas.BulkPutPolicyResponse, ) def create_or_update_policies( *, client: ClientDetail = Security(", "= None if schema.action_type == ActionType.access.value: # Only validate the", "Rule data elements, create or update corresponding Rule objects or", "in } failed.append(BulkUpdateFailed(**failure)) continue else: associated_storage_config_id = associated_storage_config.id masking_strategy_data =", "ID {rule.id}\", \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) else: created_or_updated.append(target) return schemas.BulkPutRuleTargetResponse(", "Session = Depends(deps.get_db), ) -> None: \"\"\" Delete the rule", "ActionType.access.value: # Only validate the associated StorageConfig on access rules", "from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session from starlette.exceptions", "system \"\"\" logger.info(f\"Finding all policies with pagination params '{params}'\") policies", "RuleTarget.create_or_update( db=db, data={ \"name\": schema.name, \"key\": schema.key, \"data_category\": schema.data_category, \"rule_id\":", "List[Rule] = [] failed: List[BulkUpdateFailed] = [] logger.info( f\"Starting bulk", "scopes from fidesops.api.v1 import urn_registry as urls from fidesops.common_exceptions import", "found for key {rule_key} on Policy {policy_key}.\", ) created_or_updated =", "{rule_key} on Policy {policy_key}.\", ) created_or_updated = [] failed =", "= Rule.filter( db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id)", "policy_key: FidesOpsKey, rule_key: FidesOpsKey, db: Session = Depends(deps.get_db), input_data: conlist(schemas.RuleTarget,", "import conlist from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session", "), # Be sure to pass the schema out the", "except PolicyValidationError as exc: logger.warning(\"Create/update failed for policy: %s\", exc)", "could not be added because the data provided was invalid.\",", "Page, Params, ) from fastapi_pagination.bases import AbstractPage from fastapi_pagination.ext.sqlalchemy import", "RuleTarget found for key {rule_target_key} at Rule {rule_key} on Policy", "failure \"\"\" created_or_updated: List[Policy] = [] failed: List[BulkUpdateFailed] = []", "key '{policy_key}'\") policy = Policy.get_by(db=db, field=\"key\", value=policy_key) if not policy:", "objects or report failure \"\"\" created_or_updated: List[Policy] = [] failed:", "fidesops.models.storage import StorageConfig from fidesops.schemas import policy as schemas from", "already specified on Rule with ID {rule.id}\", \"data\": dict(schema), }", "{ \"message\": exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except ValueError", "except KeyOrNameAlreadyExists as exc: logger.warning( f\"Create/update failed for rule target", "list of all Policy records in this system \"\"\" logger.info(f\"Finding", "except ( DataCategoryNotSupported, PolicyValidationError, RuleTargetValidationError, ) as exc: logger.warning( f\"Create/update", "\"\"\"Helper method to load Policy or throw a 404\"\"\" logger.info(f\"Finding", "get_policy_or_error(db, policy_key) logger.info(f\"Finding rule with key '{rule_key}'\") rule = Rule.filter(", "failed for policy: %s\", exc) failure = { \"message\": exc.args[0],", "Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE], ), policy_key: FidesOpsKey, db: Session = Depends(deps.get_db),", ") as exc: logger.warning( f\"Create/update failed for rule target {schema.key}", "scopes=[scopes.RULE_DELETE])], ) def delete_rule_target( *, policy_key: FidesOpsKey, rule_key: FidesOpsKey, rule_target_key:", "client.id, }, ) except KeyOrNameAlreadyExists as exc: logger.warning(\"Create/update failed for", "rule with key '{rule_key}'\") rule.delete(db=db) @router.patch( urls.RULE_TARGET_LIST, status_code=200, response_model=schemas.BulkPutRuleTargetResponse, )", "verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE], ), policy_key: FidesOpsKey, db: Session = Depends(deps.get_db), input_data:", "%s\", exc) failure = { \"message\": \"This record could not", "\"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) else: created_or_updated.append(target) return schemas.BulkPutRuleTargetResponse( succeeded=created_or_updated, failed=failed,", "= Policy.create_or_update( db=db, data={ \"name\": policy_data[\"name\"], \"key\": policy_data.get(\"key\"), \"client_id\": client.id,", "exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except ( DataCategoryNotSupported, PolicyValidationError,", "for {len(input_data)} rules on policy {policy_key}\" ) for schema in", "StorageConfig from fidesops.schemas import policy as schemas from fidesops.schemas.api import", "policy = get_policy_or_error(db, policy_key) logger.info(f\"Finding rule with key '{rule_key}'\") rule", "elements, create or update corresponding Rule objects or report failure", ") -> AbstractPage[Policy]: \"\"\" Return a paginated list of all", "failed=failed) @router.delete( urls.RULE_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def delete_rule( *,", "policy {policy_key}: {exc}\" ) failure = { \"message\": exc.args[0], \"data\":", "FidesOpsKey, db: Session = Depends(deps.get_db), input_data: conlist(schemas.RuleTarget, max_items=50) = Body(...),", "KeyOrNameAlreadyExists, ) from fidesops.models.client import ClientDetail from fidesops.models.policy import (", "# Be sure to pass the schema out the same", "f\"Create/update failed for rule target {schema.key} on rule {rule_key}: {exc}\"", "f\"Create/update failed for rule '{schema.key}' on policy {policy_key}: {exc}\" )", "be added because the data provided was invalid.\", \"data\": policy_data,", "continue except ( DataCategoryNotSupported, PolicyValidationError, RuleTargetValidationError, ) as exc: logger.warning(", ") return policy @router.get( urls.POLICY_DETAIL, status_code=200, response_model=schemas.PolicyResponse, dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], )", "Rule.policy_id == policy.id) ).first() if not rule: raise HTTPException( status_code=HTTP_404_NOT_FOUND,", "= logging.getLogger(__name__) @router.get( urls.POLICY_LIST, status_code=200, response_model=Page[schemas.PolicyResponse], dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def", "= { \"message\": \"This record could not be added because", "\"data\": policy_data, } failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(policy) return schemas.BulkPutPolicyResponse( succeeded=created_or_updated,", "Rule objects or report failure \"\"\" policy = get_policy_or_error(db, policy_key)", "response_model=Page[schemas.PolicyResponse], dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def get_policy_list( *, db: Session =", "create corresponding Rule objects or report failure \"\"\" policy =", "@router.patch( urls.POLICY_LIST, status_code=200, response_model=schemas.BulkPutPolicyResponse, ) def create_or_update_policies( *, client: ClientDetail", "-> schemas.BulkPutPolicyResponse: \"\"\" Given a list of policy data elements,", "data elements, create or update corresponding Rule objects or report", "policy_key) created_or_updated: List[Rule] = [] failed: List[BulkUpdateFailed] = [] logger.info(", "on policy {policy_key}\" ) for schema in input_data: # Validate", "in input_data: # Validate all FKs in the input data", "and RuleTarget.rule_id == rule.id ), ).first() if not target: raise", "def create_or_update_rules( *, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE], ),", "input_data: conlist(schemas.RuleTarget, max_items=50) = Body(...), # type: ignore ) ->", "if not rule: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No Rule found for", "schema.masking_strategy: masking_strategy_data = schema.masking_strategy.dict() try: rule = Rule.create_or_update( db=db, data={", "schema.masking_strategy.dict() try: rule = Rule.create_or_update( db=db, data={ \"action_type\": schema.action_type, \"client_id\":", ") -> schemas.BulkPutRuleTargetResponse: \"\"\" Given a list of Rule data", "logger.warning( f\"Create/update failed for rule target {schema.key} on rule {rule_key}:", "from fidesops.api.v1 import scope_registry as scopes from fidesops.api.v1 import urn_registry", "logger.info(f\"Deleting rule with key '{rule_key}'\") rule.delete(db=db) @router.patch( urls.RULE_TARGET_LIST, status_code=200, response_model=schemas.BulkPutRuleTargetResponse,", "Session = Depends(deps.get_db), input_data: conlist(schemas.RuleCreate, max_items=50) = Body(...), # type:", "the same way it came in } failed.append(BulkUpdateFailed(**failure)) continue else:", "objects or report failure \"\"\" logger.info(f\"Finding policy with key '{policy_key}'\")", "list of policy data elements, create or update corresponding Policy", "StorageConfig with key {storage_destination_key} does not exist\", \"data\": dict( schema", "key '{policy_key}'\") policy = get_policy_or_error(db, policy_key) created_or_updated: List[Rule] = []", "try: rule = Rule.create_or_update( db=db, data={ \"action_type\": schema.action_type, \"client_id\": client.id,", "data={ \"name\": schema.name, \"key\": schema.key, \"data_category\": schema.data_category, \"rule_id\": rule.id, \"client_id\":", "Policy.get_by(db=db, field=\"key\", value=policy_key) if not policy: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No", "Rule.filter( db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id) ).first()", "fidesops.util.oauth_util import verify_oauth_client router = APIRouter(tags=[\"Policy\"], prefix=urls.V1_URL_PREFIX) logger = logging.getLogger(__name__)", "RuleValidationError as exc: logger.warning( f\"Create/update failed for rule '{schema.key}' on", "Rule, RuleTarget, ) from fidesops.models.storage import StorageConfig from fidesops.schemas import", "with key {storage_destination_key} does not exist\", \"data\": dict( schema ),", "rule. \"\"\" policy = get_policy_or_error(db, policy_key) logger.info(f\"Finding rule with key", "Dict, List from fastapi import APIRouter, Body, Depends, Security from", "data exist associated_storage_config_id = None if schema.action_type == ActionType.access.value: #", "else: associated_storage_config_id = associated_storage_config.id masking_strategy_data = None if schema.masking_strategy: masking_strategy_data", "status_code=200, response_model=Page[schemas.PolicyResponse], dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def get_policy_list( *, db: Session", "exc: logger.warning( f\"Create/update failed for rule target {schema.key} on rule", "key {rule_target_key} at Rule {rule_key} on Policy {policy_key}.\", ) logger.info(f\"Deleting", "not target: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No RuleTarget found for key", ") @router.patch( urls.RULE_LIST, status_code=200, response_model=schemas.BulkPutRuleResponse, ) def create_or_update_rules( *, client:", "db: Session = Depends(deps.get_db), ) -> None: \"\"\" Delete the", "out the same way it came in } failed.append(BulkUpdateFailed(**failure)) continue", "schemas.BulkPutRuleTargetResponse: \"\"\" Given a list of Rule data elements, create", "\"name\": schema.name, \"key\": schema.key, \"data_category\": schema.data_category, \"rule_id\": rule.id, \"client_id\": client.id,", "failed for rule '{schema.key}' on policy {policy_key}: {exc}\" ) failure", "{rule_key}: {exc}\" ) failure = { \"message\": exc.args[0], \"data\": dict(schema),", "client.id, }, ) except KeyOrNameAlreadyExists as exc: logger.warning( f\"Create/update failed", "logger.info(f\"Finding all policies with pagination params '{params}'\") policies = Policy.query(db=db)", "scopes=[scopes.RULE_CREATE_OR_UPDATE], ), policy_key: FidesOpsKey, db: Session = Depends(deps.get_db), input_data: conlist(schemas.RuleCreate,", "type: ignore ) -> schemas.BulkPutPolicyResponse: \"\"\" Given a list of", "get_policy_or_error(db, policy_key) created_or_updated: List[Rule] = [] failed: List[BulkUpdateFailed] = []", "Session from starlette.exceptions import HTTPException from starlette.status import HTTP_404_NOT_FOUND from", ") def get_policy_list( *, db: Session = Depends(deps.get_db), params: Params", "( DataCategoryNotSupported, PolicyValidationError, RuleTargetValidationError, ) as exc: logger.warning( f\"Create/update failed", "policy rule. \"\"\" policy = get_policy_or_error(db, policy_key) logger.info(f\"Finding rule with", "policy.id) ).first() if not rule: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No Rule", "KeyOrNameAlreadyExists as exc: logger.warning( f\"Create/update failed for rule target {schema.key}", "logger.info(f\"Finding rule target with key '{rule_target_key}'\") target = RuleTarget.filter( db=db,", "policy: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No Policy found for key {policy_key}.\",", "IntegrityError from sqlalchemy.orm import Session from starlette.exceptions import HTTPException from", "\"message\": exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(rule) return", "= get_policy_or_error(db, policy_key) logger.info(f\"Finding rule with key '{rule_key}'\") rule =", "pydantic import conlist from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import", "in data: policy_data: Dict[str, Any] = dict(policy_schema) try: policy =", "with key '{rule_target_key}'\") target = RuleTarget.filter( db=db, conditions=( RuleTarget.key ==", "\"action_type\": schema.action_type, \"client_id\": client.id, \"key\": schema.key, \"name\": schema.name, \"policy_id\": policy.id,", "rule.id ), ).first() if not target: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No", "rule: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No Rule found for key {rule_key}", "raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No RuleTarget found for key {rule_target_key} at", ") created_or_updated = [] failed = [] logger.info( f\"Starting bulk", "schema.data_category, \"rule_id\": rule.id, \"client_id\": client.id, }, ) except KeyOrNameAlreadyExists as", "ClientDetail = Security( verify_oauth_client, scopes=[scopes.POLICY_CREATE_OR_UPDATE], ), db: Session = Depends(deps.get_db),", "schemas.BulkPutRuleTargetResponse( succeeded=created_or_updated, failed=failed, ) @router.delete( urls.RULE_TARGET_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], )", "Depends(), ) -> AbstractPage[Policy]: \"\"\" Return a paginated list of", "Any, Dict, List from fastapi import APIRouter, Body, Depends, Security", "import scope_registry as scopes from fidesops.api.v1 import urn_registry as urls", "failure \"\"\" logger.info(f\"Finding policy with key '{policy_key}'\") policy = get_policy_or_error(db,", "fidesops.api.v1 import urn_registry as urls from fidesops.common_exceptions import ( DataCategoryNotSupported,", "fidesops.models.client import ClientDetail from fidesops.models.policy import ( ActionType, Policy, Rule,", "rule.id, \"client_id\": client.id, }, ) except KeyOrNameAlreadyExists as exc: logger.warning(", "RuleTargetValidationError, ) as exc: logger.warning( f\"Create/update failed for rule target", ") from fidesops.models.storage import StorageConfig from fidesops.schemas import policy as", "policies = Policy.query(db=db) return paginate(policies, params=params) def get_policy_or_error(db: Session, policy_key:", "response_model=schemas.BulkPutPolicyResponse, ) def create_or_update_policies( *, client: ClientDetail = Security( verify_oauth_client,", "of policy data elements, create or update corresponding Policy objects", "conlist from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session from", "urn_registry as urls from fidesops.common_exceptions import ( DataCategoryNotSupported, PolicyValidationError, RuleValidationError,", "schemas.BulkPutPolicyResponse: \"\"\" Given a list of policy data elements, create", "\"rule_id\": rule.id, \"client_id\": client.id, }, ) except KeyOrNameAlreadyExists as exc:", "from fastapi_pagination import ( Page, Params, ) from fastapi_pagination.bases import", "}, ) except KeyOrNameAlreadyExists as exc: logger.warning(\"Create/update failed for policy:", ") def delete_rule( *, policy_key: FidesOpsKey, rule_key: FidesOpsKey, db: Session", "\"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except RuleValidationError as exc: logger.warning(", "= [] logger.info(f\"Starting bulk upsert for {len(data)} policies\") for policy_schema", "\"message\": exc.args[0], \"data\": policy_data, } failed.append(BulkUpdateFailed(**failure)) continue except PolicyValidationError as", "urls.RULE_LIST, status_code=200, response_model=schemas.BulkPutRuleResponse, ) def create_or_update_rules( *, client: ClientDetail =", "@router.patch( urls.RULE_TARGET_LIST, status_code=200, response_model=schemas.BulkPutRuleTargetResponse, ) def create_or_update_rule_targets( *, client: ClientDetail", "Params, ) from fastapi_pagination.bases import AbstractPage from fastapi_pagination.ext.sqlalchemy import paginate", "), ).first() if not target: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No RuleTarget", "'{policy_key}'\") policy = Policy.get_by(db=db, field=\"key\", value=policy_key) if not policy: raise", "found for key {rule_target_key} at Rule {rule_key} on Policy {policy_key}.\",", "Policy objects or report failure \"\"\" created_or_updated: List[Policy] = []", "Policy \"\"\" return get_policy_or_error(db, policy_key) @router.patch( urls.POLICY_LIST, status_code=200, response_model=schemas.BulkPutPolicyResponse, )", "schemas.BulkPutRuleResponse: \"\"\" Given a list of Rule data elements, create", "for key {rule_key} on Policy {policy_key}.\", ) logger.info(f\"Finding rule target", "on rule {rule_key}\" ) for schema in input_data: try: target", "get_policy_or_error(db: Session, policy_key: FidesOpsKey) -> Policy: \"\"\"Helper method to load", "fastapi import APIRouter, Body, Depends, Security from fastapi_pagination import (", "type: ignore ) -> schemas.BulkPutRuleResponse: \"\"\" Given a list of", "rule_target_key: FidesOpsKey, db: Session = Depends(deps.get_db), ) -> None: \"\"\"", "does not exist\", \"data\": dict( schema ), # Be sure", "target = RuleTarget.create_or_update( db=db, data={ \"name\": schema.name, \"key\": schema.key, \"data_category\":", "get_policy_or_error(db, policy_key) @router.patch( urls.POLICY_LIST, status_code=200, response_model=schemas.BulkPutPolicyResponse, ) def create_or_update_policies( *,", "corresponding Policy objects or report failure \"\"\" created_or_updated: List[Policy] =", "= Rule.create_or_update( db=db, data={ \"action_type\": schema.action_type, \"client_id\": client.id, \"key\": schema.key,", "policy_schema in data: policy_data: Dict[str, Any] = dict(policy_schema) try: policy", "PolicyValidationError as exc: logger.warning(\"Create/update failed for policy: %s\", exc) failure", "\"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except ValueError as exc: logger.warning(", "status_code=HTTP_404_NOT_FOUND, detail=f\"No Rule found for key {rule_key} on Policy {policy_key}.\",", "db: Session = Depends(deps.get_db), ) -> None: \"\"\" Delete a", "import verify_oauth_client router = APIRouter(tags=[\"Policy\"], prefix=urls.V1_URL_PREFIX) logger = logging.getLogger(__name__) @router.get(", "continue else: created_or_updated.append(policy) return schemas.BulkPutPolicyResponse( succeeded=created_or_updated, failed=failed, ) @router.patch( urls.RULE_LIST,", "rules storage_destination_key = schema.storage_destination_key associated_storage_config: StorageConfig = StorageConfig.get_by( db=db, field=\"key\",", "policy.id, \"storage_destination_id\": associated_storage_config_id, \"masking_strategy\": masking_strategy_data, }, ) except KeyOrNameAlreadyExists as", "rule target {schema.key} on rule {rule_key}: {exc}\" ) failure =", "target. \"\"\" policy = get_policy_or_error(db, policy_key) logger.info(f\"Finding rule with key", "\"message\": exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except ( DataCategoryNotSupported,", "FidesOpsKey, db: Session = Depends(deps.get_db), ) -> None: \"\"\" Delete", "urls from fidesops.common_exceptions import ( DataCategoryNotSupported, PolicyValidationError, RuleValidationError, RuleTargetValidationError, KeyOrNameAlreadyExists,", "policy as schemas from fidesops.schemas.api import BulkUpdateFailed from fidesops.util.oauth_util import", "db: Session = Depends(deps.get_db), params: Params = Depends(), ) ->", "failed.append(BulkUpdateFailed(**failure)) continue except IntegrityError as exc: logger.warning( f\"Create/update failed for", "policy: %s\", exc) failure = { \"message\": \"This record could", "because the data provided was invalid.\", \"data\": policy_data, } failed.append(BulkUpdateFailed(**failure))", "scopes=[scopes.RULE_DELETE])], ) def delete_rule( *, policy_key: FidesOpsKey, rule_key: FidesOpsKey, db:", "status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def delete_rule_target( *, policy_key: FidesOpsKey, rule_key:", "Session = Depends(deps.get_db), ) -> schemas.PolicyResponse: \"\"\" Return a single", "exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except RuleValidationError as exc:", "elements, create or update corresponding Policy objects or report failure", "ClientDetail from fidesops.models.policy import ( ActionType, Policy, Rule, RuleTarget, )", "Body(...), # type: ignore ) -> schemas.BulkPutRuleTargetResponse: \"\"\" Given a", "return schemas.BulkPutPolicyResponse( succeeded=created_or_updated, failed=failed, ) @router.patch( urls.RULE_LIST, status_code=200, response_model=schemas.BulkPutRuleResponse, )", "schema.name, \"policy_id\": policy.id, \"storage_destination_id\": associated_storage_config_id, \"masking_strategy\": masking_strategy_data, }, ) except", "Depends(deps.get_db), data: conlist(schemas.Policy, max_items=50) = Body(...), # type: ignore )", "None if schema.masking_strategy: masking_strategy_data = schema.masking_strategy.dict() try: rule = Rule.create_or_update(", "Return a single Policy \"\"\" return get_policy_or_error(db, policy_key) @router.patch( urls.POLICY_LIST,", "= [] failed = [] logger.info( f\"Starting bulk upsert for", "response_model=schemas.PolicyResponse, dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def get_policy( *, policy_key: FidesOpsKey, db:", "{ \"message\": exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except RuleValidationError", "# type: ignore ) -> schemas.BulkPutRuleResponse: \"\"\" Given a list", ") failure = { \"message\": exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure))", "schema in input_data: # Validate all FKs in the input", "[] logger.info( f\"Starting bulk upsert for {len(input_data)} rule targets on", "import urn_registry as urls from fidesops.common_exceptions import ( DataCategoryNotSupported, PolicyValidationError,", "} failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(rule) return schemas.BulkPutRuleResponse(succeeded=created_or_updated, failed=failed) @router.delete( urls.RULE_DETAIL,", "return get_policy_or_error(db, policy_key) @router.patch( urls.POLICY_LIST, status_code=200, response_model=schemas.BulkPutPolicyResponse, ) def create_or_update_policies(", "import ( DataCategoryNotSupported, PolicyValidationError, RuleValidationError, RuleTargetValidationError, KeyOrNameAlreadyExists, ) from fidesops.models.client", "created_or_updated = [] failed = [] logger.info( f\"Starting bulk upsert", "create_or_update_policies( *, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.POLICY_CREATE_OR_UPDATE], ), db:", "for key {rule_key} on Policy {policy_key}.\", ) logger.info(f\"Deleting rule with", "[] failed: List[BulkUpdateFailed] = [] logger.info( f\"Starting bulk upsert for", "report failure \"\"\" policy = get_policy_or_error(db, policy_key) logger.info(f\"Finding rule with", "throw a 404\"\"\" logger.info(f\"Finding policy with key '{policy_key}'\") policy =", "from fidesops.models.storage import StorageConfig from fidesops.schemas import policy as schemas", "raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No Policy found for key {policy_key}.\", )", "in the input data exist associated_storage_config_id = None if schema.action_type", ") if not associated_storage_config: logger.warning( f\"No storage config found with", "logging from typing import Any, Dict, List from fastapi import", "with key '{policy_key}'\") policy = get_policy_or_error(db, policy_key) created_or_updated: List[Rule] =", "access rules storage_destination_key = schema.storage_destination_key associated_storage_config: StorageConfig = StorageConfig.get_by( db=db,", "Given a list of Rule data elements, create or update", "data={ \"name\": policy_data[\"name\"], \"key\": policy_data.get(\"key\"), \"client_id\": client.id, }, ) except", "RuleTarget, ) from fidesops.models.storage import StorageConfig from fidesops.schemas import policy", "( DataCategoryNotSupported, PolicyValidationError, RuleValidationError, RuleTargetValidationError, KeyOrNameAlreadyExists, ) from fidesops.models.client import", "@router.get( urls.POLICY_DETAIL, status_code=200, response_model=schemas.PolicyResponse, dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def get_policy( *,", "dict( schema ), # Be sure to pass the schema", "from fastapi_pagination.bases import AbstractPage from fastapi_pagination.ext.sqlalchemy import paginate from fidesops.schemas.shared_schemas", "or report failure \"\"\" policy = get_policy_or_error(db, policy_key) logger.info(f\"Finding rule", "logger.info( f\"Starting bulk upsert for {len(input_data)} rule targets on rule", "with pagination params '{params}'\") policies = Policy.query(db=db) return paginate(policies, params=params)", "scopes=[scopes.POLICY_CREATE_OR_UPDATE], ), db: Session = Depends(deps.get_db), data: conlist(schemas.Policy, max_items=50) =", "masking_strategy_data = schema.masking_strategy.dict() try: rule = Rule.create_or_update( db=db, data={ \"action_type\":", "\"data\": policy_data, } failed.append(BulkUpdateFailed(**failure)) continue except PolicyValidationError as exc: logger.warning(\"Create/update", "of Rule data elements, create corresponding Rule objects or report", "a 404\"\"\" logger.info(f\"Finding policy with key '{policy_key}'\") policy = Policy.get_by(db=db,", "create_or_update_rules( *, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE], ), policy_key:", "{ \"message\": exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(rule)", ") logger.info(f\"Deleting rule with key '{rule_key}'\") rule.delete(db=db) @router.patch( urls.RULE_TARGET_LIST, status_code=200,", "dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def get_policy_list( *, db: Session = Depends(deps.get_db),", "failure = { \"message\": f\"DataCategory {schema.data_category} is already specified on", "db=db, data={ \"name\": policy_data[\"name\"], \"key\": policy_data.get(\"key\"), \"client_id\": client.id, }, )", "scopes=[scopes.POLICY_READ])], ) def get_policy( *, policy_key: FidesOpsKey, db: Session =", "Rule.create_or_update( db=db, data={ \"action_type\": schema.action_type, \"client_id\": client.id, \"key\": schema.key, \"name\":", "or report failure \"\"\" created_or_updated: List[Policy] = [] failed: List[BulkUpdateFailed]", "as urls from fidesops.common_exceptions import ( DataCategoryNotSupported, PolicyValidationError, RuleValidationError, RuleTargetValidationError,", "failed for policy: %s\", exc) failure = { \"message\": \"This", "failed.append(BulkUpdateFailed(**failure)) continue except ValueError as exc: logger.warning( f\"Create/update failed for", "db: Session = Depends(deps.get_db), ) -> schemas.PolicyResponse: \"\"\" Return a", "BulkUpdateFailed from fidesops.util.oauth_util import verify_oauth_client router = APIRouter(tags=[\"Policy\"], prefix=urls.V1_URL_PREFIX) logger", "max_items=50) = Body(...), # type: ignore ) -> schemas.BulkPutPolicyResponse: \"\"\"", "import StorageConfig from fidesops.schemas import policy as schemas from fidesops.schemas.api", "and Rule.policy_id == policy.id) ).first() if not rule: raise HTTPException(", "{storage_destination_key} does not exist\", \"data\": dict( schema ), # Be", "Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE] ), policy_key: FidesOpsKey, rule_key: FidesOpsKey, db: Session", "load Policy or throw a 404\"\"\" logger.info(f\"Finding policy with key", "ignore ) -> schemas.BulkPutRuleResponse: \"\"\" Given a list of Rule", "\"data_category\": schema.data_category, \"rule_id\": rule.id, \"client_id\": client.id, }, ) except KeyOrNameAlreadyExists", "import paginate from fidesops.schemas.shared_schemas import FidesOpsKey from pydantic import conlist", "\"message\": \"This record could not be added because the data", "conditions=(Rule.key == rule_key and Rule.policy_id == policy.id) ).first() if not", "\"\"\" Given a list of Rule data elements, create corresponding", "failure \"\"\" policy = get_policy_or_error(db, policy_key) logger.info(f\"Finding rule with key", "key {policy_key}.\", ) return policy @router.get( urls.POLICY_DETAIL, status_code=200, response_model=schemas.PolicyResponse, dependencies=[Security(verify_oauth_client,", ") from fastapi_pagination.bases import AbstractPage from fastapi_pagination.ext.sqlalchemy import paginate from", "except RuleValidationError as exc: logger.warning( f\"Create/update failed for rule '{schema.key}'", ") def delete_rule_target( *, policy_key: FidesOpsKey, rule_key: FidesOpsKey, rule_target_key: FidesOpsKey,", "-> AbstractPage[Policy]: \"\"\" Return a paginated list of all Policy", "= RuleTarget.create_or_update( db=db, data={ \"name\": schema.name, \"key\": schema.key, \"data_category\": schema.data_category,", "Session = Depends(deps.get_db), ) -> None: \"\"\" Delete a policy", "else: created_or_updated.append(policy) return schemas.BulkPutPolicyResponse( succeeded=created_or_updated, failed=failed, ) @router.patch( urls.RULE_LIST, status_code=200,", "= { \"message\": exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except", "except KeyOrNameAlreadyExists as exc: logger.warning(\"Create/update failed for policy: %s\", exc)", "== rule_key and Rule.policy_id == policy.id) ).first() if not rule:", "List[BulkUpdateFailed] = [] logger.info(f\"Starting bulk upsert for {len(data)} policies\") for", "[] failed: List[BulkUpdateFailed] = [] logger.info(f\"Starting bulk upsert for {len(data)}", "rule = Rule.create_or_update( db=db, data={ \"action_type\": schema.action_type, \"client_id\": client.id, \"key\":", "{ \"message\": f\"DataCategory {schema.data_category} is already specified on Rule with", "= APIRouter(tags=[\"Policy\"], prefix=urls.V1_URL_PREFIX) logger = logging.getLogger(__name__) @router.get( urls.POLICY_LIST, status_code=200, response_model=Page[schemas.PolicyResponse],", "policy_key: FidesOpsKey) -> Policy: \"\"\"Helper method to load Policy or", "policy_data: Dict[str, Any] = dict(policy_schema) try: policy = Policy.create_or_update( db=db,", "key '{rule_key}'\") rule.delete(db=db) @router.patch( urls.RULE_TARGET_LIST, status_code=200, response_model=schemas.BulkPutRuleTargetResponse, ) def create_or_update_rule_targets(", "= [] failed: List[BulkUpdateFailed] = [] logger.info( f\"Starting bulk upsert", "), policy_key: FidesOpsKey, rule_key: FidesOpsKey, db: Session = Depends(deps.get_db), input_data:", "from pydantic import conlist from sqlalchemy.exc import IntegrityError from sqlalchemy.orm", "failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(rule) return schemas.BulkPutRuleResponse(succeeded=created_or_updated, failed=failed) @router.delete( urls.RULE_DETAIL, status_code=204,", "created_or_updated.append(target) return schemas.BulkPutRuleTargetResponse( succeeded=created_or_updated, failed=failed, ) @router.delete( urls.RULE_TARGET_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client,", "{len(data)} policies\") for policy_schema in data: policy_data: Dict[str, Any] =", "Rule found for key {rule_key} on Policy {policy_key}.\", ) created_or_updated", "return policy @router.get( urls.POLICY_DETAIL, status_code=200, response_model=schemas.PolicyResponse, dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def", "storage config found with key {storage_destination_key}\" ) failure = {", "FidesOpsKey, db: Session = Depends(deps.get_db), input_data: conlist(schemas.RuleCreate, max_items=50) = Body(...),", "db: Session = Depends(deps.get_db), data: conlist(schemas.Policy, max_items=50) = Body(...), #", "continue else: created_or_updated.append(rule) return schemas.BulkPutRuleResponse(succeeded=created_or_updated, failed=failed) @router.delete( urls.RULE_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client,", "from fidesops.schemas import policy as schemas from fidesops.schemas.api import BulkUpdateFailed", "f\"Starting bulk upsert for {len(input_data)} rule targets on rule {rule_key}\"", "or update corresponding Policy objects or report failure \"\"\" created_or_updated:", "\"message\": exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except IntegrityError as", "rules on policy {policy_key}\" ) for schema in input_data: #", "as exc: logger.warning(\"Create/update failed for policy: %s\", exc) failure =", "policy_key: FidesOpsKey, db: Session = Depends(deps.get_db), ) -> schemas.PolicyResponse: \"\"\"", "rule target with key '{rule_target_key}'\") target = RuleTarget.filter( db=db, conditions=(", "from sqlalchemy.orm import Session from starlette.exceptions import HTTPException from starlette.status", "import deps from fidesops.api.v1 import scope_registry as scopes from fidesops.api.v1", "for {len(input_data)} rule targets on rule {rule_key}\" ) for schema", "key {rule_key} on Policy {policy_key}.\", ) logger.info(f\"Deleting rule with key", "{ \"message\": exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except IntegrityError", "{policy_key}\" ) for schema in input_data: # Validate all FKs", "value=policy_key) if not policy: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No Policy found", "found with key {storage_destination_key}\" ) failure = { \"message\": f\"A", "not rule: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No Rule found for key", "dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def delete_rule_target( *, policy_key: FidesOpsKey, rule_key: FidesOpsKey,", "rule_key: FidesOpsKey, db: Session = Depends(deps.get_db), ) -> None: \"\"\"", "policy_data.get(\"key\"), \"client_id\": client.id, }, ) except KeyOrNameAlreadyExists as exc: logger.warning(\"Create/update", "status_code=200, response_model=schemas.BulkPutRuleResponse, ) def create_or_update_rules( *, client: ClientDetail = Security(", "failed.append(BulkUpdateFailed(**failure)) continue except PolicyValidationError as exc: logger.warning(\"Create/update failed for policy:", "fidesops.api.v1 import scope_registry as scopes from fidesops.api.v1 import urn_registry as", "\"key\": policy_data.get(\"key\"), \"client_id\": client.id, }, ) except KeyOrNameAlreadyExists as exc:", ") def create_or_update_rule_targets( *, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE]", "failed.append(BulkUpdateFailed(**failure)) else: created_or_updated.append(target) return schemas.BulkPutRuleTargetResponse( succeeded=created_or_updated, failed=failed, ) @router.delete( urls.RULE_TARGET_DETAIL,", "def get_policy( *, policy_key: FidesOpsKey, db: Session = Depends(deps.get_db), )", "if not associated_storage_config: logger.warning( f\"No storage config found with key", "data: conlist(schemas.Policy, max_items=50) = Body(...), # type: ignore ) ->", "= StorageConfig.get_by( db=db, field=\"key\", value=storage_destination_key, ) if not associated_storage_config: logger.warning(", "it came in } failed.append(BulkUpdateFailed(**failure)) continue else: associated_storage_config_id = associated_storage_config.id", "policy: %s\", exc) failure = { \"message\": exc.args[0], \"data\": policy_data,", "all policies with pagination params '{params}'\") policies = Policy.query(db=db) return", "on policy {policy_key}: {exc}\" ) failure = { \"message\": exc.args[0],", "import AbstractPage from fastapi_pagination.ext.sqlalchemy import paginate from fidesops.schemas.shared_schemas import FidesOpsKey", "for key {policy_key}.\", ) return policy @router.get( urls.POLICY_DETAIL, status_code=200, response_model=schemas.PolicyResponse,", "\"data\": dict( schema ), # Be sure to pass the", "with key '{rule_key}'\") rule = Rule.filter( db=db, conditions=(Rule.key == rule_key", "\"message\": exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except RuleValidationError as", "verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE] ), policy_key: FidesOpsKey, rule_key: FidesOpsKey, db: Session =", "RuleTargetValidationError, KeyOrNameAlreadyExists, ) from fidesops.models.client import ClientDetail from fidesops.models.policy import", ") failure = { \"message\": f\"A StorageConfig with key {storage_destination_key}", "# type: ignore ) -> schemas.BulkPutRuleTargetResponse: \"\"\" Given a list", "failed.append(BulkUpdateFailed(**failure)) continue else: associated_storage_config_id = associated_storage_config.id masking_strategy_data = None if", "rule = Rule.filter( db=db, conditions=(Rule.key == rule_key and Rule.policy_id ==", "status_code=200, response_model=schemas.PolicyResponse, dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def get_policy( *, policy_key: FidesOpsKey,", "storage_destination_key = schema.storage_destination_key associated_storage_config: StorageConfig = StorageConfig.get_by( db=db, field=\"key\", value=storage_destination_key,", "from fidesops.api import deps from fidesops.api.v1 import scope_registry as scopes", "urls.RULE_TARGET_LIST, status_code=200, response_model=schemas.BulkPutRuleTargetResponse, ) def create_or_update_rule_targets( *, client: ClientDetail =", "params: Params = Depends(), ) -> AbstractPage[Policy]: \"\"\" Return a", "db: Session = Depends(deps.get_db), input_data: conlist(schemas.RuleCreate, max_items=50) = Body(...), #", "else: created_or_updated.append(rule) return schemas.BulkPutRuleResponse(succeeded=created_or_updated, failed=failed) @router.delete( urls.RULE_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])],", "AbstractPage[Policy]: \"\"\" Return a paginated list of all Policy records", "import HTTP_404_NOT_FOUND from fidesops.api import deps from fidesops.api.v1 import scope_registry", "key '{rule_key}'\") rule = Rule.filter( db=db, conditions=(Rule.key == rule_key and", "except KeyOrNameAlreadyExists as exc: logger.warning( f\"Create/update failed for rule '{schema.key}'", "def get_policy_list( *, db: Session = Depends(deps.get_db), params: Params =", "key {rule_key} on Policy {policy_key}.\", ) created_or_updated = [] failed", "upsert for {len(input_data)} rule targets on rule {rule_key}\" ) for", "schema.key, \"name\": schema.name, \"policy_id\": policy.id, \"storage_destination_id\": associated_storage_config_id, \"masking_strategy\": masking_strategy_data, },", "sqlalchemy.orm import Session from starlette.exceptions import HTTPException from starlette.status import", "dict(policy_schema) try: policy = Policy.create_or_update( db=db, data={ \"name\": policy_data[\"name\"], \"key\":", "exc.args[0], \"data\": policy_data, } failed.append(BulkUpdateFailed(**failure)) continue except PolicyValidationError as exc:", "exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except IntegrityError as exc:", "'{policy_key}'\") policy = get_policy_or_error(db, policy_key) created_or_updated: List[Rule] = [] failed:", "if schema.action_type == ActionType.access.value: # Only validate the associated StorageConfig", "schema.action_type, \"client_id\": client.id, \"key\": schema.key, \"name\": schema.name, \"policy_id\": policy.id, \"storage_destination_id\":", "response_model=schemas.BulkPutRuleResponse, ) def create_or_update_rules( *, client: ClientDetail = Security( verify_oauth_client,", "of all Policy records in this system \"\"\" logger.info(f\"Finding all", "list of Rule data elements, create or update corresponding Rule", "schema.name, \"key\": schema.key, \"data_category\": schema.data_category, \"rule_id\": rule.id, \"client_id\": client.id, },", "@router.patch( urls.RULE_LIST, status_code=200, response_model=schemas.BulkPutRuleResponse, ) def create_or_update_rules( *, client: ClientDetail", "} failed.append(BulkUpdateFailed(**failure)) continue else: associated_storage_config_id = associated_storage_config.id masking_strategy_data = None", "try: target = RuleTarget.create_or_update( db=db, data={ \"name\": schema.name, \"key\": schema.key,", "{policy_key}.\", ) return policy @router.get( urls.POLICY_DETAIL, status_code=200, response_model=schemas.PolicyResponse, dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])],", "logging.getLogger(__name__) @router.get( urls.POLICY_LIST, status_code=200, response_model=Page[schemas.PolicyResponse], dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def get_policy_list(", "from fastapi_pagination.ext.sqlalchemy import paginate from fidesops.schemas.shared_schemas import FidesOpsKey from pydantic", "associated_storage_config.id masking_strategy_data = None if schema.masking_strategy: masking_strategy_data = schema.masking_strategy.dict() try:", "\"\"\" Delete the rule target. \"\"\" policy = get_policy_or_error(db, policy_key)", "fidesops.schemas.api import BulkUpdateFailed from fidesops.util.oauth_util import verify_oauth_client router = APIRouter(tags=[\"Policy\"],", "corresponding Rule objects or report failure \"\"\" logger.info(f\"Finding policy with", "schemas.BulkPutRuleResponse(succeeded=created_or_updated, failed=failed) @router.delete( urls.RULE_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def delete_rule(", "except ValueError as exc: logger.warning( f\"Create/update failed for rule '{schema.key}'", "exc: logger.warning(\"Create/update failed for policy: %s\", exc) failure = {", "target: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No RuleTarget found for key {rule_target_key}", "conlist(schemas.Policy, max_items=50) = Body(...), # type: ignore ) -> schemas.BulkPutPolicyResponse:", "import policy as schemas from fidesops.schemas.api import BulkUpdateFailed from fidesops.util.oauth_util", "FKs in the input data exist associated_storage_config_id = None if", "for schema in input_data: # Validate all FKs in the", "a list of Rule data elements, create corresponding Rule objects", "for policy_schema in data: policy_data: Dict[str, Any] = dict(policy_schema) try:", "{policy_key}: {exc}\" ) failure = { \"message\": exc.args[0], \"data\": dict(schema),", "\"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except ( DataCategoryNotSupported, PolicyValidationError, RuleTargetValidationError,", "policy_key: FidesOpsKey, db: Session = Depends(deps.get_db), input_data: conlist(schemas.RuleCreate, max_items=50) =", "Body(...), # type: ignore ) -> schemas.BulkPutPolicyResponse: \"\"\" Given a", "logger.info(f\"Starting bulk upsert for {len(data)} policies\") for policy_schema in data:", "# Only validate the associated StorageConfig on access rules storage_destination_key", "sure to pass the schema out the same way it", "-> schemas.BulkPutRuleResponse: \"\"\" Given a list of Rule data elements,", ") failure = { \"message\": f\"DataCategory {schema.data_category} is already specified", "{rule.id}\", \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) else: created_or_updated.append(target) return schemas.BulkPutRuleTargetResponse( succeeded=created_or_updated,", "on rule {rule_key}: {exc}\" ) failure = { \"message\": f\"DataCategory", "single Policy \"\"\" return get_policy_or_error(db, policy_key) @router.patch( urls.POLICY_LIST, status_code=200, response_model=schemas.BulkPutPolicyResponse,", "import ( Page, Params, ) from fastapi_pagination.bases import AbstractPage from", "RuleTarget.key == rule_target_key and RuleTarget.rule_id == rule.id ), ).first() if", "key {storage_destination_key} does not exist\", \"data\": dict( schema ), #", "scopes=[scopes.POLICY_READ])], ) def get_policy_list( *, db: Session = Depends(deps.get_db), params:", "Delete a policy rule. \"\"\" policy = get_policy_or_error(db, policy_key) logger.info(f\"Finding", "policy data elements, create or update corresponding Policy objects or", "== rule.id ), ).first() if not target: raise HTTPException( status_code=HTTP_404_NOT_FOUND,", "rule_key: FidesOpsKey, rule_target_key: FidesOpsKey, db: Session = Depends(deps.get_db), ) ->", "status_code=200, response_model=schemas.BulkPutRuleTargetResponse, ) def create_or_update_rule_targets( *, client: ClientDetail = Security(", "RuleTarget.filter( db=db, conditions=( RuleTarget.key == rule_target_key and RuleTarget.rule_id == rule.id", "IntegrityError as exc: logger.warning( f\"Create/update failed for rule target {schema.key}", "List[Policy] = [] failed: List[BulkUpdateFailed] = [] logger.info(f\"Starting bulk upsert", "= Body(...), # type: ignore ) -> schemas.BulkPutPolicyResponse: \"\"\" Given", "= schema.storage_destination_key associated_storage_config: StorageConfig = StorageConfig.get_by( db=db, field=\"key\", value=storage_destination_key, )", "*, db: Session = Depends(deps.get_db), params: Params = Depends(), )", "\"client_id\": client.id, }, ) except KeyOrNameAlreadyExists as exc: logger.warning(\"Create/update failed", "\"\"\" logger.info(f\"Finding all policies with pagination params '{params}'\") policies =", "from fastapi import APIRouter, Body, Depends, Security from fastapi_pagination import", "Depends(deps.get_db), ) -> schemas.PolicyResponse: \"\"\" Return a single Policy \"\"\"", "record could not be added because the data provided was", "scopes=[scopes.RULE_CREATE_OR_UPDATE] ), policy_key: FidesOpsKey, rule_key: FidesOpsKey, db: Session = Depends(deps.get_db),", "APIRouter, Body, Depends, Security from fastapi_pagination import ( Page, Params,", "key {rule_key} on Policy {policy_key}.\", ) logger.info(f\"Finding rule target with", "FidesOpsKey, db: Session = Depends(deps.get_db), ) -> schemas.PolicyResponse: \"\"\" Return", "None: \"\"\" Delete the rule target. \"\"\" policy = get_policy_or_error(db,", "Body(...), # type: ignore ) -> schemas.BulkPutRuleResponse: \"\"\" Given a", "on access rules storage_destination_key = schema.storage_destination_key associated_storage_config: StorageConfig = StorageConfig.get_by(", "failed: List[BulkUpdateFailed] = [] logger.info( f\"Starting bulk upsert for {len(input_data)}", "way it came in } failed.append(BulkUpdateFailed(**failure)) continue else: associated_storage_config_id =", "\"key\": schema.key, \"name\": schema.name, \"policy_id\": policy.id, \"storage_destination_id\": associated_storage_config_id, \"masking_strategy\": masking_strategy_data,", "Policy {policy_key}.\", ) logger.info(f\"Deleting rule with key '{rule_key}'\") rule.delete(db=db) @router.patch(", "list of Rule data elements, create corresponding Rule objects or", "logger.info( f\"Starting bulk upsert for {len(input_data)} rules on policy {policy_key}\"", "logger.info(f\"Finding policy with key '{policy_key}'\") policy = get_policy_or_error(db, policy_key) created_or_updated:", "failure = { \"message\": f\"A StorageConfig with key {storage_destination_key} does", "= { \"message\": f\"A StorageConfig with key {storage_destination_key} does not", "associated_storage_config_id, \"masking_strategy\": masking_strategy_data, }, ) except KeyOrNameAlreadyExists as exc: logger.warning(", "( Page, Params, ) from fastapi_pagination.bases import AbstractPage from fastapi_pagination.ext.sqlalchemy", "404\"\"\" logger.info(f\"Finding policy with key '{policy_key}'\") policy = Policy.get_by(db=db, field=\"key\",", "{ \"message\": exc.args[0], \"data\": policy_data, } failed.append(BulkUpdateFailed(**failure)) continue except PolicyValidationError", "deps from fidesops.api.v1 import scope_registry as scopes from fidesops.api.v1 import", "import logging from typing import Any, Dict, List from fastapi", "added because the data provided was invalid.\", \"data\": policy_data, }", "from starlette.exceptions import HTTPException from starlette.status import HTTP_404_NOT_FOUND from fidesops.api", "all Policy records in this system \"\"\" logger.info(f\"Finding all policies", "\"\"\" logger.info(f\"Finding policy with key '{policy_key}'\") policy = get_policy_or_error(db, policy_key)", "logger.info(f\"Finding rule with key '{rule_key}'\") rule = Rule.filter( db=db, conditions=(Rule.key", "} failed.append(BulkUpdateFailed(**failure)) else: created_or_updated.append(target) return schemas.BulkPutRuleTargetResponse( succeeded=created_or_updated, failed=failed, ) @router.delete(", "== ActionType.access.value: # Only validate the associated StorageConfig on access", "} failed.append(BulkUpdateFailed(**failure)) continue except IntegrityError as exc: logger.warning( f\"Create/update failed", "import ClientDetail from fidesops.models.policy import ( ActionType, Policy, Rule, RuleTarget,", "typing import Any, Dict, List from fastapi import APIRouter, Body,", ") for schema in input_data: try: target = RuleTarget.create_or_update( db=db,", "Rule found for key {rule_key} on Policy {policy_key}.\", ) logger.info(f\"Finding", "continue except ValueError as exc: logger.warning( f\"Create/update failed for rule", "urls.RULE_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def delete_rule( *, policy_key: FidesOpsKey,", "failure = { \"message\": exc.args[0], \"data\": policy_data, } failed.append(BulkUpdateFailed(**failure)) continue", "{schema.key} on rule {rule_key}: {exc}\" ) failure = { \"message\":", "policy with key '{policy_key}'\") policy = get_policy_or_error(db, policy_key) created_or_updated: List[Rule]", "on Rule with ID {rule.id}\", \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) else:", "{ \"message\": exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except (", "\"name\": policy_data[\"name\"], \"key\": policy_data.get(\"key\"), \"client_id\": client.id, }, ) except KeyOrNameAlreadyExists", "dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(rule) return schemas.BulkPutRuleResponse(succeeded=created_or_updated, failed=failed) @router.delete(", "continue except RuleValidationError as exc: logger.warning( f\"Create/update failed for rule", "detail=f\"No Rule found for key {rule_key} on Policy {policy_key}.\", )", "rule targets on rule {rule_key}\" ) for schema in input_data:", "exc) failure = { \"message\": exc.args[0], \"data\": policy_data, } failed.append(BulkUpdateFailed(**failure))", "Session = Depends(deps.get_db), data: conlist(schemas.Policy, max_items=50) = Body(...), # type:", "provided was invalid.\", \"data\": policy_data, } failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(policy)", "Return a paginated list of all Policy records in this", "dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def delete_rule( *, policy_key: FidesOpsKey, rule_key: FidesOpsKey,", "\"client_id\": client.id, }, ) except KeyOrNameAlreadyExists as exc: logger.warning( f\"Create/update", "# type: ignore ) -> schemas.BulkPutPolicyResponse: \"\"\" Given a list", "continue else: associated_storage_config_id = associated_storage_config.id masking_strategy_data = None if schema.masking_strategy:", "-> Policy: \"\"\"Helper method to load Policy or throw a", "Session = Depends(deps.get_db), params: Params = Depends(), ) -> AbstractPage[Policy]:", "= Policy.query(db=db) return paginate(policies, params=params) def get_policy_or_error(db: Session, policy_key: FidesOpsKey)", "as exc: logger.warning( f\"Create/update failed for rule '{schema.key}' on policy", "in input_data: try: target = RuleTarget.create_or_update( db=db, data={ \"name\": schema.name,", "\"policy_id\": policy.id, \"storage_destination_id\": associated_storage_config_id, \"masking_strategy\": masking_strategy_data, }, ) except KeyOrNameAlreadyExists", "not exist\", \"data\": dict( schema ), # Be sure to", "fidesops.api import deps from fidesops.api.v1 import scope_registry as scopes from", "policy_data, } failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(policy) return schemas.BulkPutPolicyResponse( succeeded=created_or_updated, failed=failed,", "all FKs in the input data exist associated_storage_config_id = None", "data={ \"action_type\": schema.action_type, \"client_id\": client.id, \"key\": schema.key, \"name\": schema.name, \"policy_id\":", "\"\"\" Delete a policy rule. \"\"\" policy = get_policy_or_error(db, policy_key)", "else: created_or_updated.append(target) return schemas.BulkPutRuleTargetResponse( succeeded=created_or_updated, failed=failed, ) @router.delete( urls.RULE_TARGET_DETAIL, status_code=204,", "detail=f\"No Policy found for key {policy_key}.\", ) return policy @router.get(", "*, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE], ), policy_key: FidesOpsKey,", "import IntegrityError from sqlalchemy.orm import Session from starlette.exceptions import HTTPException", "status_code=HTTP_404_NOT_FOUND, detail=f\"No RuleTarget found for key {rule_target_key} at Rule {rule_key}", "fastapi_pagination import ( Page, Params, ) from fastapi_pagination.bases import AbstractPage", "in this system \"\"\" logger.info(f\"Finding all policies with pagination params", "= Depends(deps.get_db), input_data: conlist(schemas.RuleTarget, max_items=50) = Body(...), # type: ignore", "KeyOrNameAlreadyExists as exc: logger.warning( f\"Create/update failed for rule '{schema.key}' on", "rule {rule_key}\" ) for schema in input_data: try: target =", "from fidesops.util.oauth_util import verify_oauth_client router = APIRouter(tags=[\"Policy\"], prefix=urls.V1_URL_PREFIX) logger =", "schemas.PolicyResponse: \"\"\" Return a single Policy \"\"\" return get_policy_or_error(db, policy_key)", "input data exist associated_storage_config_id = None if schema.action_type == ActionType.access.value:", "urls.POLICY_LIST, status_code=200, response_model=Page[schemas.PolicyResponse], dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def get_policy_list( *, db:", "data: policy_data: Dict[str, Any] = dict(policy_schema) try: policy = Policy.create_or_update(", "from fidesops.models.policy import ( ActionType, Policy, Rule, RuleTarget, ) from", "rule.delete(db=db) @router.patch( urls.RULE_TARGET_LIST, status_code=200, response_model=schemas.BulkPutRuleTargetResponse, ) def create_or_update_rule_targets( *, client:", "= [] failed: List[BulkUpdateFailed] = [] logger.info(f\"Starting bulk upsert for", "policy_key) @router.patch( urls.POLICY_LIST, status_code=200, response_model=schemas.BulkPutPolicyResponse, ) def create_or_update_policies( *, client:", "[] logger.info(f\"Starting bulk upsert for {len(data)} policies\") for policy_schema in", "= get_policy_or_error(db, policy_key) created_or_updated: List[Rule] = [] failed: List[BulkUpdateFailed] =", "conlist(schemas.RuleTarget, max_items=50) = Body(...), # type: ignore ) -> schemas.BulkPutRuleTargetResponse:", "RuleTarget.rule_id == rule.id ), ).first() if not target: raise HTTPException(", "== policy.id) ).first() if not rule: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No", "at Rule {rule_key} on Policy {policy_key}.\", ) logger.info(f\"Deleting rule target", "\"\"\" created_or_updated: List[Policy] = [] failed: List[BulkUpdateFailed] = [] logger.info(f\"Starting", "policy with key '{policy_key}'\") policy = Policy.get_by(db=db, field=\"key\", value=policy_key) if", "max_items=50) = Body(...), # type: ignore ) -> schemas.BulkPutRuleTargetResponse: \"\"\"", "Policy {policy_key}.\", ) created_or_updated = [] failed = [] logger.info(", "associated StorageConfig on access rules storage_destination_key = schema.storage_destination_key associated_storage_config: StorageConfig", "client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE] ), policy_key: FidesOpsKey, rule_key:", "AbstractPage from fastapi_pagination.ext.sqlalchemy import paginate from fidesops.schemas.shared_schemas import FidesOpsKey from", "from typing import Any, Dict, List from fastapi import APIRouter,", "succeeded=created_or_updated, failed=failed, ) @router.delete( urls.RULE_TARGET_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def", "the input data exist associated_storage_config_id = None if schema.action_type ==", "with key '{policy_key}'\") policy = Policy.get_by(db=db, field=\"key\", value=policy_key) if not", "except IntegrityError as exc: logger.warning( f\"Create/update failed for rule target", "as schemas from fidesops.schemas.api import BulkUpdateFailed from fidesops.util.oauth_util import verify_oauth_client", "\"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(rule) return schemas.BulkPutRuleResponse(succeeded=created_or_updated, failed=failed)", "DataCategoryNotSupported, PolicyValidationError, RuleValidationError, RuleTargetValidationError, KeyOrNameAlreadyExists, ) from fidesops.models.client import ClientDetail", "= Depends(deps.get_db), ) -> None: \"\"\" Delete the rule target.", "= Depends(deps.get_db), params: Params = Depends(), ) -> AbstractPage[Policy]: \"\"\"", "'{params}'\") policies = Policy.query(db=db) return paginate(policies, params=params) def get_policy_or_error(db: Session,", "== rule_target_key and RuleTarget.rule_id == rule.id ), ).first() if not", "pagination params '{params}'\") policies = Policy.query(db=db) return paginate(policies, params=params) def", "Rule found for key {rule_key} on Policy {policy_key}.\", ) logger.info(f\"Deleting", "{rule_key}: {exc}\" ) failure = { \"message\": f\"DataCategory {schema.data_category} is", "value=storage_destination_key, ) if not associated_storage_config: logger.warning( f\"No storage config found", "paginate(policies, params=params) def get_policy_or_error(db: Session, policy_key: FidesOpsKey) -> Policy: \"\"\"Helper", "-> schemas.PolicyResponse: \"\"\" Return a single Policy \"\"\" return get_policy_or_error(db,", "Rule data elements, create corresponding Rule objects or report failure", "'{schema.key}' on policy {policy_key}: {exc}\" ) failure = { \"message\":", "target = RuleTarget.filter( db=db, conditions=( RuleTarget.key == rule_target_key and RuleTarget.rule_id", "target {schema.key} on rule {rule_key}: {exc}\" ) failure = {", "rule_key: FidesOpsKey, db: Session = Depends(deps.get_db), input_data: conlist(schemas.RuleTarget, max_items=50) =", "Given a list of Rule data elements, create corresponding Rule", "not be added because the data provided was invalid.\", \"data\":", ") -> schemas.PolicyResponse: \"\"\" Return a single Policy \"\"\" return", "bulk upsert for {len(input_data)} rules on policy {policy_key}\" ) for", "HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No Policy found for key {policy_key}.\", ) return", ").first() if not target: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No RuleTarget found", "import Session from starlette.exceptions import HTTPException from starlette.status import HTTP_404_NOT_FOUND", "db=db, conditions=( RuleTarget.key == rule_target_key and RuleTarget.rule_id == rule.id ),", "Session, policy_key: FidesOpsKey) -> Policy: \"\"\"Helper method to load Policy", "{exc}\" ) failure = { \"message\": f\"DataCategory {schema.data_category} is already", "of Rule data elements, create or update corresponding Rule objects", ") from fidesops.models.client import ClientDetail from fidesops.models.policy import ( ActionType,", "detail=f\"No RuleTarget found for key {rule_target_key} at Rule {rule_key} on", "validate the associated StorageConfig on access rules storage_destination_key = schema.storage_destination_key", "f\"Starting bulk upsert for {len(input_data)} rules on policy {policy_key}\" )", "\"\"\" return get_policy_or_error(db, policy_key) @router.patch( urls.POLICY_LIST, status_code=200, response_model=schemas.BulkPutPolicyResponse, ) def", "db=db, field=\"key\", value=storage_destination_key, ) if not associated_storage_config: logger.warning( f\"No storage", "policy {policy_key}\" ) for schema in input_data: # Validate all", "( ActionType, Policy, Rule, RuleTarget, ) from fidesops.models.storage import StorageConfig", "the schema out the same way it came in }", "policy = get_policy_or_error(db, policy_key) created_or_updated: List[Rule] = [] failed: List[BulkUpdateFailed]", "conditions=( RuleTarget.key == rule_target_key and RuleTarget.rule_id == rule.id ), ).first()", "key {storage_destination_key}\" ) failure = { \"message\": f\"A StorageConfig with", "import BulkUpdateFailed from fidesops.util.oauth_util import verify_oauth_client router = APIRouter(tags=[\"Policy\"], prefix=urls.V1_URL_PREFIX)", "{policy_key}.\", ) logger.info(f\"Finding rule target with key '{rule_target_key}'\") target =", "if not target: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No RuleTarget found for", "f\"No storage config found with key {storage_destination_key}\" ) failure =", "from fidesops.common_exceptions import ( DataCategoryNotSupported, PolicyValidationError, RuleValidationError, RuleTargetValidationError, KeyOrNameAlreadyExists, )", "Policy: \"\"\"Helper method to load Policy or throw a 404\"\"\"", "Policy.query(db=db) return paginate(policies, params=params) def get_policy_or_error(db: Session, policy_key: FidesOpsKey) ->", "conlist(schemas.RuleCreate, max_items=50) = Body(...), # type: ignore ) -> schemas.BulkPutRuleResponse:", "{len(input_data)} rule targets on rule {rule_key}\" ) for schema in", "objects or report failure \"\"\" policy = get_policy_or_error(db, policy_key) logger.info(f\"Finding", "continue except IntegrityError as exc: logger.warning( f\"Create/update failed for rule", "-> None: \"\"\" Delete the rule target. \"\"\" policy =", "StorageConfig on access rules storage_destination_key = schema.storage_destination_key associated_storage_config: StorageConfig =", "created_or_updated: List[Policy] = [] failed: List[BulkUpdateFailed] = [] logger.info(f\"Starting bulk", "= Body(...), # type: ignore ) -> schemas.BulkPutRuleTargetResponse: \"\"\" Given", "for policy: %s\", exc) failure = { \"message\": exc.args[0], \"data\":", "policy @router.get( urls.POLICY_DETAIL, status_code=200, response_model=schemas.PolicyResponse, dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def get_policy(", "Depends, Security from fastapi_pagination import ( Page, Params, ) from", "urls.POLICY_DETAIL, status_code=200, response_model=schemas.PolicyResponse, dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def get_policy( *, policy_key:", "or report failure \"\"\" logger.info(f\"Finding policy with key '{policy_key}'\") policy", "\"masking_strategy\": masking_strategy_data, }, ) except KeyOrNameAlreadyExists as exc: logger.warning( f\"Create/update", "f\"A StorageConfig with key {storage_destination_key} does not exist\", \"data\": dict(", "= Policy.get_by(db=db, field=\"key\", value=policy_key) if not policy: raise HTTPException( status_code=HTTP_404_NOT_FOUND,", "{ \"message\": \"This record could not be added because the", "params '{params}'\") policies = Policy.query(db=db) return paginate(policies, params=params) def get_policy_or_error(db:", "Security from fastapi_pagination import ( Page, Params, ) from fastapi_pagination.bases", "{storage_destination_key}\" ) failure = { \"message\": f\"A StorageConfig with key", "on Policy {policy_key}.\", ) logger.info(f\"Finding rule target with key '{rule_target_key}'\")", "rule '{schema.key}' on policy {policy_key}: {exc}\" ) failure = {", ") def create_or_update_policies( *, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.POLICY_CREATE_OR_UPDATE],", "failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(policy) return schemas.BulkPutPolicyResponse( succeeded=created_or_updated, failed=failed, ) @router.patch(", "masking_strategy_data, }, ) except KeyOrNameAlreadyExists as exc: logger.warning( f\"Create/update failed", "input_data: try: target = RuleTarget.create_or_update( db=db, data={ \"name\": schema.name, \"key\":", "def create_or_update_rule_targets( *, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE] ),", "input_data: # Validate all FKs in the input data exist", "delete_rule_target( *, policy_key: FidesOpsKey, rule_key: FidesOpsKey, rule_target_key: FidesOpsKey, db: Session", "associated_storage_config_id = associated_storage_config.id masking_strategy_data = None if schema.masking_strategy: masking_strategy_data =", "List[BulkUpdateFailed] = [] logger.info( f\"Starting bulk upsert for {len(input_data)} rules", "policy_key: FidesOpsKey, rule_key: FidesOpsKey, rule_target_key: FidesOpsKey, db: Session = Depends(deps.get_db),", "Policy {policy_key}.\", ) logger.info(f\"Finding rule target with key '{rule_target_key}'\") target", "type: ignore ) -> schemas.BulkPutRuleTargetResponse: \"\"\" Given a list of", "failed: List[BulkUpdateFailed] = [] logger.info(f\"Starting bulk upsert for {len(data)} policies\")", "{rule_key} on Policy {policy_key}.\", ) logger.info(f\"Finding rule target with key", ").first() if not rule: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No Rule found", "rule_target_key and RuleTarget.rule_id == rule.id ), ).first() if not target:", "created_or_updated: List[Rule] = [] failed: List[BulkUpdateFailed] = [] logger.info( f\"Starting", "not associated_storage_config: logger.warning( f\"No storage config found with key {storage_destination_key}\"", "failed=failed, ) @router.patch( urls.RULE_LIST, status_code=200, response_model=schemas.BulkPutRuleResponse, ) def create_or_update_rules( *,", "Delete the rule target. \"\"\" policy = get_policy_or_error(db, policy_key) logger.info(f\"Finding", "} failed.append(BulkUpdateFailed(**failure)) continue except RuleValidationError as exc: logger.warning( f\"Create/update failed", "@router.delete( urls.RULE_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def delete_rule( *, policy_key:", "a single Policy \"\"\" return get_policy_or_error(db, policy_key) @router.patch( urls.POLICY_LIST, status_code=200,", "PolicyValidationError, RuleValidationError, RuleTargetValidationError, KeyOrNameAlreadyExists, ) from fidesops.models.client import ClientDetail from", "return paginate(policies, params=params) def get_policy_or_error(db: Session, policy_key: FidesOpsKey) -> Policy:", "policy_key: FidesOpsKey, rule_key: FidesOpsKey, db: Session = Depends(deps.get_db), ) ->", "a policy rule. \"\"\" policy = get_policy_or_error(db, policy_key) logger.info(f\"Finding rule", "policy_key) logger.info(f\"Finding rule with key '{rule_key}'\") rule = Rule.filter( db=db,", "associated_storage_config_id = None if schema.action_type == ActionType.access.value: # Only validate", "}, ) except KeyOrNameAlreadyExists as exc: logger.warning( f\"Create/update failed for", "), db: Session = Depends(deps.get_db), data: conlist(schemas.Policy, max_items=50) = Body(...),", "Rule objects or report failure \"\"\" logger.info(f\"Finding policy with key", "created_or_updated.append(policy) return schemas.BulkPutPolicyResponse( succeeded=created_or_updated, failed=failed, ) @router.patch( urls.RULE_LIST, status_code=200, response_model=schemas.BulkPutRuleResponse,", "db: Session = Depends(deps.get_db), input_data: conlist(schemas.RuleTarget, max_items=50) = Body(...), #", "failure = { \"message\": exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue", "= associated_storage_config.id masking_strategy_data = None if schema.masking_strategy: masking_strategy_data = schema.masking_strategy.dict()", "-> None: \"\"\" Delete a policy rule. \"\"\" policy =", "DataCategoryNotSupported, PolicyValidationError, RuleTargetValidationError, ) as exc: logger.warning( f\"Create/update failed for", "'{rule_key}'\") rule = Rule.filter( db=db, conditions=(Rule.key == rule_key and Rule.policy_id", "Given a list of policy data elements, create or update", "associated_storage_config: logger.warning( f\"No storage config found with key {storage_destination_key}\" )", "HTTPException from starlette.status import HTTP_404_NOT_FOUND from fidesops.api import deps from", "schema in input_data: try: target = RuleTarget.create_or_update( db=db, data={ \"name\":", "with ID {rule.id}\", \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) else: created_or_updated.append(target) return", "), policy_key: FidesOpsKey, db: Session = Depends(deps.get_db), input_data: conlist(schemas.RuleCreate, max_items=50)", "Security( verify_oauth_client, scopes=[scopes.POLICY_CREATE_OR_UPDATE], ), db: Session = Depends(deps.get_db), data: conlist(schemas.Policy,", "rule target. \"\"\" policy = get_policy_or_error(db, policy_key) logger.info(f\"Finding rule with", "policy_data, } failed.append(BulkUpdateFailed(**failure)) continue except PolicyValidationError as exc: logger.warning(\"Create/update failed", "Depends(deps.get_db), params: Params = Depends(), ) -> AbstractPage[Policy]: \"\"\" Return", "invalid.\", \"data\": policy_data, } failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(policy) return schemas.BulkPutPolicyResponse(", "response_model=schemas.BulkPutRuleTargetResponse, ) def create_or_update_rule_targets( *, client: ClientDetail = Security( verify_oauth_client,", "this system \"\"\" logger.info(f\"Finding all policies with pagination params '{params}'\")", "Policy, Rule, RuleTarget, ) from fidesops.models.storage import StorageConfig from fidesops.schemas", "exc: logger.warning( f\"Create/update failed for rule '{schema.key}' on policy {policy_key}:", "succeeded=created_or_updated, failed=failed, ) @router.patch( urls.RULE_LIST, status_code=200, response_model=schemas.BulkPutRuleResponse, ) def create_or_update_rules(", "Rule with ID {rule.id}\", \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) else: created_or_updated.append(target)", "client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE], ), policy_key: FidesOpsKey, db:", "\"message\": exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except ValueError as", "rule {rule_key}: {exc}\" ) failure = { \"message\": f\"DataCategory {schema.data_category}", "@router.get( urls.POLICY_LIST, status_code=200, response_model=Page[schemas.PolicyResponse], dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def get_policy_list( *,", "dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except ( DataCategoryNotSupported, PolicyValidationError, RuleTargetValidationError, )", "create or update corresponding Rule objects or report failure \"\"\"", "\"\"\" Given a list of policy data elements, create or", "None if schema.action_type == ActionType.access.value: # Only validate the associated", "\"key\": schema.key, \"data_category\": schema.data_category, \"rule_id\": rule.id, \"client_id\": client.id, }, )", "*, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE] ), policy_key: FidesOpsKey,", "create or update corresponding Policy objects or report failure \"\"\"", ") @router.delete( urls.RULE_TARGET_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def delete_rule_target( *,", "= [] logger.info( f\"Starting bulk upsert for {len(input_data)} rules on", ") except KeyOrNameAlreadyExists as exc: logger.warning( f\"Create/update failed for rule", "APIRouter(tags=[\"Policy\"], prefix=urls.V1_URL_PREFIX) logger = logging.getLogger(__name__) @router.get( urls.POLICY_LIST, status_code=200, response_model=Page[schemas.PolicyResponse], dependencies=[Security(verify_oauth_client,", "for key {rule_key} on Policy {policy_key}.\", ) created_or_updated = []", "continue except PolicyValidationError as exc: logger.warning(\"Create/update failed for policy: %s\",", "\"\"\" Given a list of Rule data elements, create or", "\"client_id\": client.id, \"key\": schema.key, \"name\": schema.name, \"policy_id\": policy.id, \"storage_destination_id\": associated_storage_config_id,", "\"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except IntegrityError as exc: logger.warning(", "specified on Rule with ID {rule.id}\", \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure))", "FidesOpsKey, rule_key: FidesOpsKey, db: Session = Depends(deps.get_db), ) -> None:", "schema.storage_destination_key associated_storage_config: StorageConfig = StorageConfig.get_by( db=db, field=\"key\", value=storage_destination_key, ) if", "came in } failed.append(BulkUpdateFailed(**failure)) continue else: associated_storage_config_id = associated_storage_config.id masking_strategy_data", "ValueError as exc: logger.warning( f\"Create/update failed for rule '{schema.key}' on", "verify_oauth_client router = APIRouter(tags=[\"Policy\"], prefix=urls.V1_URL_PREFIX) logger = logging.getLogger(__name__) @router.get( urls.POLICY_LIST,", "fastapi_pagination.ext.sqlalchemy import paginate from fidesops.schemas.shared_schemas import FidesOpsKey from pydantic import", "FidesOpsKey from pydantic import conlist from sqlalchemy.exc import IntegrityError from", "failure = { \"message\": \"This record could not be added", "exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(rule) return schemas.BulkPutRuleResponse(succeeded=created_or_updated,", "List from fastapi import APIRouter, Body, Depends, Security from fastapi_pagination", "create_or_update_rule_targets( *, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE] ), policy_key:", "input_data: conlist(schemas.RuleCreate, max_items=50) = Body(...), # type: ignore ) ->", "same way it came in } failed.append(BulkUpdateFailed(**failure)) continue else: associated_storage_config_id", "-> schemas.BulkPutRuleTargetResponse: \"\"\" Given a list of Rule data elements,", "to load Policy or throw a 404\"\"\" logger.info(f\"Finding policy with", "report failure \"\"\" created_or_updated: List[Policy] = [] failed: List[BulkUpdateFailed] =", "*, policy_key: FidesOpsKey, rule_key: FidesOpsKey, db: Session = Depends(deps.get_db), )", "[] logger.info( f\"Starting bulk upsert for {len(input_data)} rules on policy", "= Security( verify_oauth_client, scopes=[scopes.POLICY_CREATE_OR_UPDATE], ), db: Session = Depends(deps.get_db), data:", "a list of Rule data elements, create or update corresponding", "not policy: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No Policy found for key", "= Depends(deps.get_db), input_data: conlist(schemas.RuleCreate, max_items=50) = Body(...), # type: ignore", "= { \"message\": f\"DataCategory {schema.data_category} is already specified on Rule", "\"\"\" Return a paginated list of all Policy records in", "masking_strategy_data = None if schema.masking_strategy: masking_strategy_data = schema.masking_strategy.dict() try: rule", "ignore ) -> schemas.BulkPutPolicyResponse: \"\"\" Given a list of policy", "on Policy {policy_key}.\", ) logger.info(f\"Deleting rule with key '{rule_key}'\") rule.delete(db=db)", "prefix=urls.V1_URL_PREFIX) logger = logging.getLogger(__name__) @router.get( urls.POLICY_LIST, status_code=200, response_model=Page[schemas.PolicyResponse], dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])],", "schema ), # Be sure to pass the schema out", "= { \"message\": exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue else:", "fidesops.common_exceptions import ( DataCategoryNotSupported, PolicyValidationError, RuleValidationError, RuleTargetValidationError, KeyOrNameAlreadyExists, ) from", "get_policy( *, policy_key: FidesOpsKey, db: Session = Depends(deps.get_db), ) ->", "StorageConfig.get_by( db=db, field=\"key\", value=storage_destination_key, ) if not associated_storage_config: logger.warning( f\"No", "{policy_key}.\", ) created_or_updated = [] failed = [] logger.info( f\"Starting", "policy_data[\"name\"], \"key\": policy_data.get(\"key\"), \"client_id\": client.id, }, ) except KeyOrNameAlreadyExists as", "schema out the same way it came in } failed.append(BulkUpdateFailed(**failure))", ") -> schemas.BulkPutRuleResponse: \"\"\" Given a list of Rule data", "exc) failure = { \"message\": \"This record could not be", "Policy {policy_key}.\", ) logger.info(f\"Deleting rule target with key '{rule_target_key}'\") target.delete(db=db)", "db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id) ).first() if", "created_or_updated.append(rule) return schemas.BulkPutRuleResponse(succeeded=created_or_updated, failed=failed) @router.delete( urls.RULE_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], )", "} failed.append(BulkUpdateFailed(**failure)) continue except PolicyValidationError as exc: logger.warning(\"Create/update failed for", "Depends(deps.get_db), input_data: conlist(schemas.RuleCreate, max_items=50) = Body(...), # type: ignore )", ") -> None: \"\"\" Delete a policy rule. \"\"\" policy", "update corresponding Rule objects or report failure \"\"\" logger.info(f\"Finding policy", "for rule '{schema.key}' on policy {policy_key}: {exc}\" ) failure =", "try: policy = Policy.create_or_update( db=db, data={ \"name\": policy_data[\"name\"], \"key\": policy_data.get(\"key\"),", "policy = Policy.create_or_update( db=db, data={ \"name\": policy_data[\"name\"], \"key\": policy_data.get(\"key\"), \"client_id\":", "ActionType, Policy, Rule, RuleTarget, ) from fidesops.models.storage import StorageConfig from", "HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No Rule found for key {rule_key} on Policy", "targets on rule {rule_key}\" ) for schema in input_data: try:", "or throw a 404\"\"\" logger.info(f\"Finding policy with key '{policy_key}'\") policy", "verify_oauth_client, scopes=[scopes.POLICY_CREATE_OR_UPDATE], ), db: Session = Depends(deps.get_db), data: conlist(schemas.Policy, max_items=50)", "logger.warning(\"Create/update failed for policy: %s\", exc) failure = { \"message\":", "schema.action_type == ActionType.access.value: # Only validate the associated StorageConfig on", "bulk upsert for {len(data)} policies\") for policy_schema in data: policy_data:", "None: \"\"\" Delete a policy rule. \"\"\" policy = get_policy_or_error(db,", "\"message\": f\"DataCategory {schema.data_category} is already specified on Rule with ID", "upsert for {len(input_data)} rules on policy {policy_key}\" ) for schema", "if schema.masking_strategy: masking_strategy_data = schema.masking_strategy.dict() try: rule = Rule.create_or_update( db=db,", "client.id, \"key\": schema.key, \"name\": schema.name, \"policy_id\": policy.id, \"storage_destination_id\": associated_storage_config_id, \"masking_strategy\":", "Policy found for key {policy_key}.\", ) return policy @router.get( urls.POLICY_DETAIL,", "dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except IntegrityError as exc: logger.warning( f\"Create/update", "policies\") for policy_schema in data: policy_data: Dict[str, Any] = dict(policy_schema)", "pass the schema out the same way it came in", "from fidesops.schemas.shared_schemas import FidesOpsKey from pydantic import conlist from sqlalchemy.exc", "ClientDetail = Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE] ), policy_key: FidesOpsKey, rule_key: FidesOpsKey,", "policy = Policy.get_by(db=db, field=\"key\", value=policy_key) if not policy: raise HTTPException(", "KeyOrNameAlreadyExists as exc: logger.warning(\"Create/update failed for policy: %s\", exc) failure", "= [] logger.info( f\"Starting bulk upsert for {len(input_data)} rule targets", "rule {rule_key}: {exc}\" ) failure = { \"message\": exc.args[0], \"data\":", "to pass the schema out the same way it came", "method to load Policy or throw a 404\"\"\" logger.info(f\"Finding policy", "= Body(...), # type: ignore ) -> schemas.BulkPutRuleResponse: \"\"\" Given", "Policy records in this system \"\"\" logger.info(f\"Finding all policies with", "target with key '{rule_target_key}'\") target = RuleTarget.filter( db=db, conditions=( RuleTarget.key", "*, policy_key: FidesOpsKey, db: Session = Depends(deps.get_db), ) -> schemas.PolicyResponse:", "{schema.data_category} is already specified on Rule with ID {rule.id}\", \"data\":", "failed for rule target {schema.key} on rule {rule_key}: {exc}\" )", "field=\"key\", value=policy_key) if not policy: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No Policy", "def get_policy_or_error(db: Session, policy_key: FidesOpsKey) -> Policy: \"\"\"Helper method to", "on Policy {policy_key}.\", ) created_or_updated = [] failed = []", "StorageConfig = StorageConfig.get_by( db=db, field=\"key\", value=storage_destination_key, ) if not associated_storage_config:", "{rule_key} on Policy {policy_key}.\", ) logger.info(f\"Deleting rule target with key", "on Policy {policy_key}.\", ) logger.info(f\"Deleting rule target with key '{rule_target_key}'\")", "the associated StorageConfig on access rules storage_destination_key = schema.storage_destination_key associated_storage_config:", "found for key {rule_key} on Policy {policy_key}.\", ) logger.info(f\"Finding rule", "data elements, create or update corresponding Policy objects or report", "paginate from fidesops.schemas.shared_schemas import FidesOpsKey from pydantic import conlist from", "dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except RuleValidationError as exc: logger.warning( f\"Create/update", "return schemas.BulkPutRuleResponse(succeeded=created_or_updated, failed=failed) @router.delete( urls.RULE_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def", "{rule_target_key} at Rule {rule_key} on Policy {policy_key}.\", ) logger.info(f\"Deleting rule", "Dict[str, Any] = dict(policy_schema) try: policy = Policy.create_or_update( db=db, data={", "logger.warning( f\"No storage config found with key {storage_destination_key}\" ) failure", ") def get_policy( *, policy_key: FidesOpsKey, db: Session = Depends(deps.get_db),", "key '{rule_target_key}'\") target = RuleTarget.filter( db=db, conditions=( RuleTarget.key == rule_target_key", "router = APIRouter(tags=[\"Policy\"], prefix=urls.V1_URL_PREFIX) logger = logging.getLogger(__name__) @router.get( urls.POLICY_LIST, status_code=200,", "get_policy_list( *, db: Session = Depends(deps.get_db), params: Params = Depends(),", "field=\"key\", value=storage_destination_key, ) if not associated_storage_config: logger.warning( f\"No storage config", "params=params) def get_policy_or_error(db: Session, policy_key: FidesOpsKey) -> Policy: \"\"\"Helper method", "report failure \"\"\" logger.info(f\"Finding policy with key '{policy_key}'\") policy =", "FidesOpsKey) -> Policy: \"\"\"Helper method to load Policy or throw", "import Any, Dict, List from fastapi import APIRouter, Body, Depends,", "} failed.append(BulkUpdateFailed(**failure)) continue except ValueError as exc: logger.warning( f\"Create/update failed", "found for key {rule_key} on Policy {policy_key}.\", ) logger.info(f\"Deleting rule", "logger = logging.getLogger(__name__) @router.get( urls.POLICY_LIST, status_code=200, response_model=Page[schemas.PolicyResponse], dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], )", "from fidesops.models.client import ClientDetail from fidesops.models.policy import ( ActionType, Policy,", "data provided was invalid.\", \"data\": policy_data, } failed.append(BulkUpdateFailed(**failure)) continue else:", "\"name\": schema.name, \"policy_id\": policy.id, \"storage_destination_id\": associated_storage_config_id, \"masking_strategy\": masking_strategy_data, }, )", ") -> None: \"\"\" Delete the rule target. \"\"\" policy", "for schema in input_data: try: target = RuleTarget.create_or_update( db=db, data={", "return schemas.BulkPutRuleTargetResponse( succeeded=created_or_updated, failed=failed, ) @router.delete( urls.RULE_TARGET_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])],", "exist associated_storage_config_id = None if schema.action_type == ActionType.access.value: # Only", "starlette.exceptions import HTTPException from starlette.status import HTTP_404_NOT_FOUND from fidesops.api import", "for key {rule_target_key} at Rule {rule_key} on Policy {policy_key}.\", )", "schema.key, \"data_category\": schema.data_category, \"rule_id\": rule.id, \"client_id\": client.id, }, ) except", "Be sure to pass the schema out the same way", "policies with pagination params '{params}'\") policies = Policy.query(db=db) return paginate(policies,", "with key {storage_destination_key}\" ) failure = { \"message\": f\"A StorageConfig", "schemas from fidesops.schemas.api import BulkUpdateFailed from fidesops.util.oauth_util import verify_oauth_client router", ") except KeyOrNameAlreadyExists as exc: logger.warning(\"Create/update failed for policy: %s\",", "= { \"message\": exc.args[0], \"data\": policy_data, } failed.append(BulkUpdateFailed(**failure)) continue except", "Session = Depends(deps.get_db), input_data: conlist(schemas.RuleTarget, max_items=50) = Body(...), # type:", "{len(input_data)} rules on policy {policy_key}\" ) for schema in input_data:", "{exc}\" ) failure = { \"message\": exc.args[0], \"data\": dict(schema), }", "import ( ActionType, Policy, Rule, RuleTarget, ) from fidesops.models.storage import", "db=db, data={ \"name\": schema.name, \"key\": schema.key, \"data_category\": schema.data_category, \"rule_id\": rule.id,", "as scopes from fidesops.api.v1 import urn_registry as urls from fidesops.common_exceptions", "'{rule_key}'\") rule.delete(db=db) @router.patch( urls.RULE_TARGET_LIST, status_code=200, response_model=schemas.BulkPutRuleTargetResponse, ) def create_or_update_rule_targets( *,", "= Depends(deps.get_db), ) -> None: \"\"\" Delete a policy rule.", "Body, Depends, Security from fastapi_pagination import ( Page, Params, )", "ClientDetail = Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE], ), policy_key: FidesOpsKey, db: Session", "import FidesOpsKey from pydantic import conlist from sqlalchemy.exc import IntegrityError", "a paginated list of all Policy records in this system", "= schema.masking_strategy.dict() try: rule = Rule.create_or_update( db=db, data={ \"action_type\": schema.action_type,", "\"\"\" policy = get_policy_or_error(db, policy_key) logger.info(f\"Finding rule with key '{rule_key}'\")", "logger.info(f\"Finding policy with key '{policy_key}'\") policy = Policy.get_by(db=db, field=\"key\", value=policy_key)", "import HTTPException from starlette.status import HTTP_404_NOT_FOUND from fidesops.api import deps", "[] failed = [] logger.info( f\"Starting bulk upsert for {len(input_data)}", "urls.POLICY_LIST, status_code=200, response_model=schemas.BulkPutPolicyResponse, ) def create_or_update_policies( *, client: ClientDetail =", "update corresponding Policy objects or report failure \"\"\" created_or_updated: List[Policy]", "FidesOpsKey, rule_key: FidesOpsKey, db: Session = Depends(deps.get_db), input_data: conlist(schemas.RuleTarget, max_items=50)", "\"message\": f\"A StorageConfig with key {storage_destination_key} does not exist\", \"data\":", "Validate all FKs in the input data exist associated_storage_config_id =", "or update corresponding Rule objects or report failure \"\"\" logger.info(f\"Finding", "*, policy_key: FidesOpsKey, rule_key: FidesOpsKey, rule_target_key: FidesOpsKey, db: Session =", "corresponding Rule objects or report failure \"\"\" policy = get_policy_or_error(db,", "delete_rule( *, policy_key: FidesOpsKey, rule_key: FidesOpsKey, db: Session = Depends(deps.get_db),", "exc.args[0], \"data\": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except ValueError as exc:", "%s\", exc) failure = { \"message\": exc.args[0], \"data\": policy_data, }", "= Depends(deps.get_db), ) -> schemas.PolicyResponse: \"\"\" Return a single Policy", "from fidesops.schemas.api import BulkUpdateFailed from fidesops.util.oauth_util import verify_oauth_client router =", "status_code=HTTP_404_NOT_FOUND, detail=f\"No Policy found for key {policy_key}.\", ) return policy", "Policy.create_or_update( db=db, data={ \"name\": policy_data[\"name\"], \"key\": policy_data.get(\"key\"), \"client_id\": client.id, },", "starlette.status import HTTP_404_NOT_FOUND from fidesops.api import deps from fidesops.api.v1 import", "def create_or_update_policies( *, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.POLICY_CREATE_OR_UPDATE], ),", "schemas.BulkPutPolicyResponse( succeeded=created_or_updated, failed=failed, ) @router.patch( urls.RULE_LIST, status_code=200, response_model=schemas.BulkPutRuleResponse, ) def", "as exc: logger.warning( f\"Create/update failed for rule target {schema.key} on", "Depends(deps.get_db), ) -> None: \"\"\" Delete a policy rule. \"\"\"", "failed.append(BulkUpdateFailed(**failure)) continue except ( DataCategoryNotSupported, PolicyValidationError, RuleTargetValidationError, ) as exc:", "failed=failed, ) @router.delete( urls.RULE_TARGET_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def delete_rule_target(", "= Depends(), ) -> AbstractPage[Policy]: \"\"\" Return a paginated list", "{rule_key} on Policy {policy_key}.\", ) logger.info(f\"Deleting rule with key '{rule_key}'\")", "found for key {policy_key}.\", ) return policy @router.get( urls.POLICY_DETAIL, status_code=200,", ") def create_or_update_rules( *, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE],", "rule_key and Rule.policy_id == policy.id) ).first() if not rule: raise", "Any] = dict(policy_schema) try: policy = Policy.create_or_update( db=db, data={ \"name\":", "on rule {rule_key}: {exc}\" ) failure = { \"message\": exc.args[0],", "ignore ) -> schemas.BulkPutRuleTargetResponse: \"\"\" Given a list of Rule", "RuleValidationError, RuleTargetValidationError, KeyOrNameAlreadyExists, ) from fidesops.models.client import ClientDetail from fidesops.models.policy", "} failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(policy) return schemas.BulkPutPolicyResponse( succeeded=created_or_updated, failed=failed, )", "{policy_key}.\", ) logger.info(f\"Deleting rule with key '{rule_key}'\") rule.delete(db=db) @router.patch( urls.RULE_TARGET_LIST,", "associated_storage_config: StorageConfig = StorageConfig.get_by( db=db, field=\"key\", value=storage_destination_key, ) if not", "elements, create corresponding Rule objects or report failure \"\"\" policy", "f\"DataCategory {schema.data_category} is already specified on Rule with ID {rule.id}\",", "upsert for {len(data)} policies\") for policy_schema in data: policy_data: Dict[str,", "bulk upsert for {len(input_data)} rule targets on rule {rule_key}\" )", "\"storage_destination_id\": associated_storage_config_id, \"masking_strategy\": masking_strategy_data, }, ) except KeyOrNameAlreadyExists as exc:", "fastapi_pagination.bases import AbstractPage from fastapi_pagination.ext.sqlalchemy import paginate from fidesops.schemas.shared_schemas import", "} failed.append(BulkUpdateFailed(**failure)) continue except ( DataCategoryNotSupported, PolicyValidationError, RuleTargetValidationError, ) as", "dict(schema), } failed.append(BulkUpdateFailed(**failure)) else: created_or_updated.append(target) return schemas.BulkPutRuleTargetResponse( succeeded=created_or_updated, failed=failed, )", "db=db, data={ \"action_type\": schema.action_type, \"client_id\": client.id, \"key\": schema.key, \"name\": schema.name,", "fidesops.schemas.shared_schemas import FidesOpsKey from pydantic import conlist from sqlalchemy.exc import", "raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f\"No Rule found for key {rule_key} on", "scope_registry as scopes from fidesops.api.v1 import urn_registry as urls from", ") logger.info(f\"Finding rule target with key '{rule_target_key}'\") target = RuleTarget.filter(", "= Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE], ), policy_key: FidesOpsKey, db: Session =", "records in this system \"\"\" logger.info(f\"Finding all policies with pagination", "= RuleTarget.filter( db=db, conditions=( RuleTarget.key == rule_target_key and RuleTarget.rule_id ==", "was invalid.\", \"data\": policy_data, } failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(policy) return", "rule with key '{rule_key}'\") rule = Rule.filter( db=db, conditions=(Rule.key ==", "paginated list of all Policy records in this system \"\"\"", "dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def get_policy( *, policy_key: FidesOpsKey, db: Session", "{ \"message\": f\"A StorageConfig with key {storage_destination_key} does not exist\",", "failed = [] logger.info( f\"Starting bulk upsert for {len(input_data)} rule", "status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def delete_rule( *, policy_key: FidesOpsKey, rule_key:", "is already specified on Rule with ID {rule.id}\", \"data\": dict(schema),", "# Validate all FKs in the input data exist associated_storage_config_id", "FidesOpsKey, rule_target_key: FidesOpsKey, db: Session = Depends(deps.get_db), ) -> None:", "for {len(data)} policies\") for policy_schema in data: policy_data: Dict[str, Any]", "dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except ValueError as exc: logger.warning( f\"Create/update", "PolicyValidationError, RuleTargetValidationError, ) as exc: logger.warning( f\"Create/update failed for rule", "max_items=50) = Body(...), # type: ignore ) -> schemas.BulkPutRuleResponse: \"\"\"", "= dict(policy_schema) try: policy = Policy.create_or_update( db=db, data={ \"name\": policy_data[\"name\"],", "logger.warning( f\"Create/update failed for rule '{schema.key}' on policy {policy_key}: {exc}\"", "import APIRouter, Body, Depends, Security from fastapi_pagination import ( Page," ]
[ "item_analyze[2] temp = {} temp.update(id=topic_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topics.append(temp) self.table_topics", "read_json_file(\"en_US.json\") self.image_base64_sr = None self.image_base64_topics = None def sort_by_dimension_sentiment_table(self) ->", "\"\"\" entities = ClientsLanguageSentiment.count_entities(entities=entities) entities = ClientsLanguageSentiment.filter_black_list(entities=entities) return entities def", "dimension=item_analyze[0], week=company_week) dimension = extract_dimension(self.info_file, dimension=item_analyze[0]) comment = item_analyze[1] sentiment", "insert_to_list_topic_comments(self, features: list) -> None: \"\"\" Create array with the", "surveys: dict, company_id: str, weeks: list, g_client: ClientsLanguageSentiment, api_source_manager: APISourcesFetcher):", "self.company_id = company_id self.weeks = weeks self.g_client = g_client self.api_source_manager", "= item_analyze[1] sentiment = item_analyze[2] temp = {} temp.update(dimension=dimension) temp.update(question=question)", "heading topic_headings = nested_lookup(global_cons.TOPIC_CONTENT, topics) topic_headings_sentiments = nested_lookup(global_cons.TOPIC_SENTIMENT, topics) topic_ids", "sentiment_analysis.src.report.cons_report as cons import sentiment_analysis.src.constants as global_cons from utils.data_connection.api_data_manager import", "item_analyze[0] comment = item_analyze[1] sentiment = item_analyze[2] temp = {}", "= words_clouds(self.counter_text_topics, cons.path_image_topics_wc) @staticmethod def __count_filter_keys(entities: list) -> object: \"\"\"", "nested_lookup(global_cons.SR_DIMENSION, periods) sr_content = nested_lookup(global_cons.SR_CONTENT, periods) sr_sentiment = nested_lookup(global_cons.SENTIMENT, periods)", "= nested_lookup(global_cons.TOPIC_CONTENT, topics) topic_headings_sentiments = nested_lookup(global_cons.TOPIC_SENTIMENT, topics) topic_ids = list(topics.keys())", "None: \"\"\" Create array with the dictionary for interface -", "-> None: \"\"\" Process the surveys replies :return: \"\"\" for", "sentiment = item_analyze[2] temp = {} temp.update(id=topic_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment)", "dimension and by sentiment :return: \"\"\" temp_table = [] for", "comments :param features: list of features to extract :return: \"\"\"", "temp = {} temp.update(id=topic_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topics.append(temp) self.table_topics =", "interface - referenced to topic headlines :param features: list of", "main words :return: \"\"\" self.image_base64_sr = words_clouds(self.counter_text_sr, cons.path_image_sr_wc) self.image_base64_topics =", "sentiment_analysis.src.constants as global_cons from utils.data_connection.api_data_manager import APISourcesFetcher from utils.utilities import", "= item_analyze[1] sentiment = item_analyze[2] temp = {} temp.update(id=topic_id_comment_id) temp.update(comment=emoji.emojize(comment,", "= self.api_source_manager.get_company_week_from_period(week=period_parts[0], year=period_parts[1], company_id=self.company_id) sr_dimension = nested_lookup(global_cons.SR_DIMENSION, periods) sr_content =", "g_client: ClientsLanguageSentiment, api_source_manager: APISourcesFetcher): self.topics = topics self.surveys = surveys", ":return: \"\"\" for item_analyze in features: topic_id_comment_id = item_analyze[0] comment", "import sentiment_analysis.src.constants as global_cons from utils.data_connection.api_data_manager import APISourcesFetcher from utils.utilities", "in features: question = extract_question(self.info_file, dimension=item_analyze[0], week=company_week) dimension = extract_dimension(self.info_file,", "global_cons from utils.data_connection.api_data_manager import APISourcesFetcher from utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG,", "Create wordcloud of the main words :return: \"\"\" self.image_base64_sr =", "api_source_manager self.thresholds = () self.table_surveys_replies = [] self.table_topics = []", "[d for d in self.table_surveys_replies if d['dimension'] == dimension] temp", "self.topics.items(): # heading topic_headings = nested_lookup(global_cons.TOPIC_CONTENT, topics) topic_headings_sentiments = nested_lookup(global_cons.TOPIC_SENTIMENT,", "words_clouds(self.counter_text_sr, cons.path_image_sr_wc) self.image_base64_topics = words_clouds(self.counter_text_topics, cons.path_image_topics_wc) @staticmethod def __count_filter_keys(entities: list)", "item_analyze[1] sentiment = item_analyze[2] temp = {} temp.update(dimension=dimension) temp.update(question=question) temp.update(comment=emoji.emojize(comment,", "cons.dimensions: temp = [d for d in self.table_surveys_replies if d['dimension']", "temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topic_comment.append(temp) self.table_topic_comment = sorted(self.table_topic_comment, key=lambda k: k['sentiment'],", "APISourcesFetcher): self.topics = topics self.surveys = surveys self.company_id = company_id", "self.table_surveys_replies.append(temp) self.sort_by_dimension_sentiment_table() def insert_to_list_topics(self, features: list) -> None: \"\"\" Create", "CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question from sentiment_analysis.src.word_cloud import words_clouds from sentiment_analysis.src.clients_language_sentiments_entity import", ":return: \"\"\" for company_id, periods in self.surveys.items(): for period in", "= period.split(CUSTOM_YEAR_WEEK_AGG) translations_week = self.api_source_manager.get_company_week_from_period(week=period_parts[0], year=period_parts[1], company_id=self.company_id) sr_dimension = nested_lookup(global_cons.SR_DIMENSION,", "\"\"\" for company_id, topics in self.topics.items(): # heading topic_headings =", "topic) topic_list_ids = [topic_id] * len(topic_comments) topic_w_scores = list(zip(topic_list_ids, topic_comments,", "self.weeks: period_parts = period.split(CUSTOM_YEAR_WEEK_AGG) translations_week = self.api_source_manager.get_company_week_from_period(week=period_parts[0], year=period_parts[1], company_id=self.company_id) sr_dimension", "<gh_stars>0 import emoji import sentiment_analysis.src.report.cons_report as cons import sentiment_analysis.src.constants as", "topic_list_ids = [topic_id] * len(topic_comments) topic_w_scores = list(zip(topic_list_ids, topic_comments, topic_comments_scores))", "features: topic_id_comment_id = item_analyze[0] comment = item_analyze[1] sentiment = item_analyze[2]", "ClientsLanguageSentiment.count_entities(entities) def process_interface(self) -> None: \"\"\" Take the info needed", "dimension] temp = sorted(temp, key=lambda k: k['sentiment'], reverse=True) temp_table.extend(temp) self.table_surveys_replies", "features to extract :return: \"\"\" for item_analyze in features: topic_id", "import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question from sentiment_analysis.src.word_cloud import words_clouds from", "interface :param features: list of features to extract :param company_week:", "self.image_base64_topics = words_clouds(self.counter_text_topics, cons.path_image_topics_wc) @staticmethod def __count_filter_keys(entities: list) -> object:", "None: \"\"\" Process the surveys replies :return: \"\"\" for company_id,", "def word_cloud(self): \"\"\" Create wordcloud of the main words :return:", "nested_lookup(global_cons.SR_ENTITIES, periods) sr_comment_score = list(zip(sr_dimension, sr_content, sr_sentiment)) self.insert_to_list_surveys_replies(sr_comment_score, company_week=translations_week) self.counter_text_sr", "sr_content = nested_lookup(global_cons.SR_CONTENT, periods) sr_sentiment = nested_lookup(global_cons.SENTIMENT, periods) sr_entities =", "with the dictionary for interface :param features: list of features", "for interface - referenced to topic comments :param features: list", "k['sentiment'], reverse=True) temp_table.extend(temp) self.table_surveys_replies = temp_table def insert_to_list_surveys_replies(self, features: list,", "topic_comments_scores)) self.insert_to_list_topic_comments(topic_w_scores) entities = nested_lookup(global_cons.TOPIC_ENTITIES, topics) self.counter_text_topics = ClientsLanguageSentiment.count_entities(entities) def", "to extract :return: \"\"\" for item_analyze in features: topic_id =", "return entities def __process_sr(self) -> None: \"\"\" Process the surveys", "= item_analyze[2] temp = {} temp.update(id=topic_id_comment_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topic_comment.append(temp)", "self.insert_to_list_surveys_replies(sr_comment_score, company_week=translations_week) self.counter_text_sr = self.__count_filter_keys(entities=sr_entities) def __process_topics(self) -> None: \"\"\"", "def process_interface(self) -> None: \"\"\" Take the info needed to", "in features: topic_id = item_analyze[0] comment = item_analyze[1] sentiment =", "array with the dictionary for interface :param features: list of", "topic in topics.items(): topic_comments = nested_lookup(global_cons.TOPIC_COMMENT, topic) topic_comments_scores = nested_lookup(global_cons.TOPIC_COMMENT_SENTIMENT,", "[] for dimension in cons.dimensions: temp = [d for d", ":return: \"\"\" temp_table = [] for dimension in cons.dimensions: temp", "= nested_lookup(global_cons.TOPIC_COMMENT, topic) topic_comments_scores = nested_lookup(global_cons.TOPIC_COMMENT_SENTIMENT, topic) topic_list_ids = [topic_id]", "= {} temp.update(id=topic_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topics.append(temp) self.table_topics = sorted(self.table_topics,", "= sorted(self.table_topics, key=lambda k: k['sentiment'], reverse=True) def insert_to_list_topic_comments(self, features: list)", "self.table_topic_comment = sorted(self.table_topic_comment, key=lambda k: k['sentiment'], reverse=True) def word_cloud(self): \"\"\"", "surveys replies :return: \"\"\" for company_id, periods in self.surveys.items(): for", "dict, surveys: dict, company_id: str, weeks: list, g_client: ClientsLanguageSentiment, api_source_manager:", "to extract :return: \"\"\" for item_analyze in features: topic_id_comment_id =", "list of entities text :return: \"\"\" entities = ClientsLanguageSentiment.count_entities(entities=entities) entities", "list of features to extract :param company_week: company week of", "by sentiment :return: \"\"\" temp_table = [] for dimension in", "= item_analyze[2] temp = {} temp.update(id=topic_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topics.append(temp)", "topics in self.topics.items(): # heading topic_headings = nested_lookup(global_cons.TOPIC_CONTENT, topics) topic_headings_sentiments", "of features to extract :param company_week: company week of the", "entities text :return: \"\"\" entities = ClientsLanguageSentiment.count_entities(entities=entities) entities = ClientsLanguageSentiment.filter_black_list(entities=entities)", ":return: \"\"\" self.image_base64_sr = words_clouds(self.counter_text_sr, cons.path_image_sr_wc) self.image_base64_topics = words_clouds(self.counter_text_topics, cons.path_image_topics_wc)", "= self.__count_filter_keys(entities=sr_entities) def __process_topics(self) -> None: \"\"\" Process the topics", "self.table_surveys_replies if d['dimension'] == dimension] temp = sorted(temp, key=lambda k:", "str, weeks: list, g_client: ClientsLanguageSentiment, api_source_manager: APISourcesFetcher): self.topics = topics", "== dimension] temp = sorted(temp, key=lambda k: k['sentiment'], reverse=True) temp_table.extend(temp)", "company_id, periods in self.surveys.items(): for period in self.weeks: period_parts =", "entities = ClientsLanguageSentiment.filter_black_list(entities=entities) return entities def __process_sr(self) -> None: \"\"\"", "temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_surveys_replies.append(temp) self.sort_by_dimension_sentiment_table() def insert_to_list_topics(self, features: list) ->", "nested_lookup(global_cons.TOPIC_CONTENT, topics) topic_headings_sentiments = nested_lookup(global_cons.TOPIC_SENTIMENT, topics) topic_ids = list(topics.keys()) topic_w_sentiments", "self.counter_text_topics = None self.info_file = read_json_file(\"en_US.json\") self.image_base64_sr = None self.image_base64_topics", "list) -> object: \"\"\" Count and filter keys :param entities:", "import words_clouds from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment from nested_lookup import nested_lookup", "nested_lookup(global_cons.SR_CONTENT, periods) sr_sentiment = nested_lookup(global_cons.SENTIMENT, periods) sr_entities = nested_lookup(global_cons.SR_ENTITIES, periods)", "topics) topic_headings_sentiments = nested_lookup(global_cons.TOPIC_SENTIMENT, topics) topic_ids = list(topics.keys()) topic_w_sentiments =", "of entities text :return: \"\"\" entities = ClientsLanguageSentiment.count_entities(entities=entities) entities =", "= list(zip(topic_ids, topic_headings, topic_headings_sentiments)) self.insert_to_list_topics(topic_w_sentiments) # comments for topic_id, topic", "int) -> None: \"\"\" Create array with the dictionary for", "features: list of features to extract :param company_week: company week", "None self.info_file = read_json_file(\"en_US.json\") self.image_base64_sr = None self.image_base64_topics = None", "self.info_file = read_json_file(\"en_US.json\") self.image_base64_sr = None self.image_base64_topics = None def", "nested_lookup(global_cons.TOPIC_COMMENT, topic) topic_comments_scores = nested_lookup(global_cons.TOPIC_COMMENT_SENTIMENT, topic) topic_list_ids = [topic_id] *", "with the dictionary for interface - referenced to topic headlines", "temp.update(id=topic_id_comment_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topic_comment.append(temp) self.table_topic_comment = sorted(self.table_topic_comment, key=lambda k:", "referenced to topic headlines :param features: list of features to", "dict, company_id: str, weeks: list, g_client: ClientsLanguageSentiment, api_source_manager: APISourcesFetcher): self.topics", "= read_json_file(\"en_US.json\") self.image_base64_sr = None self.image_base64_topics = None def sort_by_dimension_sentiment_table(self)", "use_aliases=True)) temp.update(sentiment=sentiment) self.table_surveys_replies.append(temp) self.sort_by_dimension_sentiment_table() def insert_to_list_topics(self, features: list) -> None:", "for item_analyze in features: question = extract_question(self.info_file, dimension=item_analyze[0], week=company_week) dimension", "self.g_client = g_client self.api_source_manager = api_source_manager self.thresholds = () self.table_surveys_replies", "= None def sort_by_dimension_sentiment_table(self) -> None: \"\"\" Sort by dimension", "self.api_source_manager.get_company_week_from_period(week=period_parts[0], year=period_parts[1], company_id=self.company_id) sr_dimension = nested_lookup(global_cons.SR_DIMENSION, periods) sr_content = nested_lookup(global_cons.SR_CONTENT,", "comment = item_analyze[1] sentiment = item_analyze[2] temp = {} temp.update(id=topic_id)", "k: k['sentiment'], reverse=True) def insert_to_list_topic_comments(self, features: list) -> None: \"\"\"", "= [] self.counter_text_sr = None self.counter_text_topics = None self.info_file =", "self.table_topics = sorted(self.table_topics, key=lambda k: k['sentiment'], reverse=True) def insert_to_list_topic_comments(self, features:", "comment = item_analyze[1] sentiment = item_analyze[2] temp = {} temp.update(id=topic_id_comment_id)", "None self.image_base64_topics = None def sort_by_dimension_sentiment_table(self) -> None: \"\"\" Sort", "topics: dict, surveys: dict, company_id: str, weeks: list, g_client: ClientsLanguageSentiment,", "def sort_by_dimension_sentiment_table(self) -> None: \"\"\" Sort by dimension and by", "sr_dimension = nested_lookup(global_cons.SR_DIMENSION, periods) sr_content = nested_lookup(global_cons.SR_CONTENT, periods) sr_sentiment =", "topic_w_sentiments = list(zip(topic_ids, topic_headings, topic_headings_sentiments)) self.insert_to_list_topics(topic_w_sentiments) # comments for topic_id,", "topic_headings_sentiments = nested_lookup(global_cons.TOPIC_SENTIMENT, topics) topic_ids = list(topics.keys()) topic_w_sentiments = list(zip(topic_ids,", "sr_entities = nested_lookup(global_cons.SR_ENTITIES, periods) sr_comment_score = list(zip(sr_dimension, sr_content, sr_sentiment)) self.insert_to_list_surveys_replies(sr_comment_score,", "entities def __process_sr(self) -> None: \"\"\" Process the surveys replies", "read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question from sentiment_analysis.src.word_cloud import words_clouds from sentiment_analysis.src.clients_language_sentiments_entity", "ClientsLanguageSentiment from nested_lookup import nested_lookup class InterFaceReport: def __init__(self, topics:", "\"\"\" Sort by dimension and by sentiment :return: \"\"\" temp_table", "use_aliases=True)) temp.update(sentiment=sentiment) self.table_topics.append(temp) self.table_topics = sorted(self.table_topics, key=lambda k: k['sentiment'], reverse=True)", "\"\"\" Count and filter keys :param entities: list of entities", "g_client self.api_source_manager = api_source_manager self.thresholds = () self.table_surveys_replies = []", "= weeks self.g_client = g_client self.api_source_manager = api_source_manager self.thresholds =", "None: \"\"\" Create array with the dictionary for interface :param", "and filter keys :param entities: list of entities text :return:", "self.table_topics = [] self.table_topic_comment = [] self.counter_text_sr = None self.counter_text_topics", "company_week: company week of the company :return: \"\"\" for item_analyze", "sr_comment_score = list(zip(sr_dimension, sr_content, sr_sentiment)) self.insert_to_list_surveys_replies(sr_comment_score, company_week=translations_week) self.counter_text_sr = self.__count_filter_keys(entities=sr_entities)", "= ClientsLanguageSentiment.count_entities(entities=entities) entities = ClientsLanguageSentiment.filter_black_list(entities=entities) return entities def __process_sr(self) ->", "topic_headings = nested_lookup(global_cons.TOPIC_CONTENT, topics) topic_headings_sentiments = nested_lookup(global_cons.TOPIC_SENTIMENT, topics) topic_ids =", "Count and filter keys :param entities: list of entities text", "cons.path_image_topics_wc) @staticmethod def __count_filter_keys(entities: list) -> object: \"\"\" Count and", "{} temp.update(dimension=dimension) temp.update(question=question) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_surveys_replies.append(temp) self.sort_by_dimension_sentiment_table() def insert_to_list_topics(self,", "None: \"\"\" Take the info needed to write into report_pdf", "sorted(self.table_topic_comment, key=lambda k: k['sentiment'], reverse=True) def word_cloud(self): \"\"\" Create wordcloud", "[] self.table_topics = [] self.table_topic_comment = [] self.counter_text_sr = None", "wordcloud of the main words :return: \"\"\" self.image_base64_sr = words_clouds(self.counter_text_sr,", "__init__(self, topics: dict, surveys: dict, company_id: str, weeks: list, g_client:", "-> None: \"\"\" Sort by dimension and by sentiment :return:", "of features to extract :return: \"\"\" for item_analyze in features:", "in self.surveys.items(): for period in self.weeks: period_parts = period.split(CUSTOM_YEAR_WEEK_AGG) translations_week", "temp = [d for d in self.table_surveys_replies if d['dimension'] ==", "def insert_to_list_surveys_replies(self, features: list, company_week: int) -> None: \"\"\" Create", "periods) sr_content = nested_lookup(global_cons.SR_CONTENT, periods) sr_sentiment = nested_lookup(global_cons.SENTIMENT, periods) sr_entities", "None self.counter_text_topics = None self.info_file = read_json_file(\"en_US.json\") self.image_base64_sr = None", "words :return: \"\"\" self.image_base64_sr = words_clouds(self.counter_text_sr, cons.path_image_sr_wc) self.image_base64_topics = words_clouds(self.counter_text_topics,", "Process the topics :return: \"\"\" for company_id, topics in self.topics.items():", "self.insert_to_list_topics(topic_w_sentiments) # comments for topic_id, topic in topics.items(): topic_comments =", "topic headlines :param features: list of features to extract :return:", "topic comments :param features: list of features to extract :return:", "temp.update(id=topic_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topics.append(temp) self.table_topics = sorted(self.table_topics, key=lambda k:", "api_source_manager: APISourcesFetcher): self.topics = topics self.surveys = surveys self.company_id =", "temp_table def insert_to_list_surveys_replies(self, features: list, company_week: int) -> None: \"\"\"", "company_id, topics in self.topics.items(): # heading topic_headings = nested_lookup(global_cons.TOPIC_CONTENT, topics)", "= nested_lookup(global_cons.SR_ENTITIES, periods) sr_comment_score = list(zip(sr_dimension, sr_content, sr_sentiment)) self.insert_to_list_surveys_replies(sr_comment_score, company_week=translations_week)", "dimension=item_analyze[0]) comment = item_analyze[1] sentiment = item_analyze[2] temp = {}", "list) -> None: \"\"\" Create array with the dictionary for", "extract_question(self.info_file, dimension=item_analyze[0], week=company_week) dimension = extract_dimension(self.info_file, dimension=item_analyze[0]) comment = item_analyze[1]", "features: list) -> None: \"\"\" Create array with the dictionary", "the dictionary for interface - referenced to topic comments :param", "None: \"\"\" Sort by dimension and by sentiment :return: \"\"\"", "= nested_lookup(global_cons.TOPIC_ENTITIES, topics) self.counter_text_topics = ClientsLanguageSentiment.count_entities(entities) def process_interface(self) -> None:", "k: k['sentiment'], reverse=True) def word_cloud(self): \"\"\" Create wordcloud of the", "from utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question from sentiment_analysis.src.word_cloud import", "company_id=self.company_id) sr_dimension = nested_lookup(global_cons.SR_DIMENSION, periods) sr_content = nested_lookup(global_cons.SR_CONTENT, periods) sr_sentiment", "self.table_topic_comment = [] self.counter_text_sr = None self.counter_text_topics = None self.info_file", "sorted(temp, key=lambda k: k['sentiment'], reverse=True) temp_table.extend(temp) self.table_surveys_replies = temp_table def", "-> None: \"\"\" Take the info needed to write into", "topics) topic_ids = list(topics.keys()) topic_w_sentiments = list(zip(topic_ids, topic_headings, topic_headings_sentiments)) self.insert_to_list_topics(topic_w_sentiments)", "list of features to extract :return: \"\"\" for item_analyze in", "with the dictionary for interface - referenced to topic comments", "Sort by dimension and by sentiment :return: \"\"\" temp_table =", "= nested_lookup(global_cons.SENTIMENT, periods) sr_entities = nested_lookup(global_cons.SR_ENTITIES, periods) sr_comment_score = list(zip(sr_dimension,", "in topics.items(): topic_comments = nested_lookup(global_cons.TOPIC_COMMENT, topic) topic_comments_scores = nested_lookup(global_cons.TOPIC_COMMENT_SENTIMENT, topic)", "object: \"\"\" Count and filter keys :param entities: list of", "[] self.table_topic_comment = [] self.counter_text_sr = None self.counter_text_topics = None", "def __process_topics(self) -> None: \"\"\" Process the topics :return: \"\"\"", "= [topic_id] * len(topic_comments) topic_w_scores = list(zip(topic_list_ids, topic_comments, topic_comments_scores)) self.insert_to_list_topic_comments(topic_w_scores)", "list, company_week: int) -> None: \"\"\" Create array with the", "self.insert_to_list_topic_comments(topic_w_scores) entities = nested_lookup(global_cons.TOPIC_ENTITIES, topics) self.counter_text_topics = ClientsLanguageSentiment.count_entities(entities) def process_interface(self)", "features: topic_id = item_analyze[0] comment = item_analyze[1] sentiment = item_analyze[2]", "extract_question from sentiment_analysis.src.word_cloud import words_clouds from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment from", "\"\"\" for item_analyze in features: topic_id_comment_id = item_analyze[0] comment =", "nested_lookup(global_cons.TOPIC_SENTIMENT, topics) topic_ids = list(topics.keys()) topic_w_sentiments = list(zip(topic_ids, topic_headings, topic_headings_sentiments))", "= None self.counter_text_topics = None self.info_file = read_json_file(\"en_US.json\") self.image_base64_sr =", "company_week: int) -> None: \"\"\" Create array with the dictionary", "= company_id self.weeks = weeks self.g_client = g_client self.api_source_manager =", "d in self.table_surveys_replies if d['dimension'] == dimension] temp = sorted(temp,", "= sorted(temp, key=lambda k: k['sentiment'], reverse=True) temp_table.extend(temp) self.table_surveys_replies = temp_table", "the surveys replies :return: \"\"\" for company_id, periods in self.surveys.items():", "= list(topics.keys()) topic_w_sentiments = list(zip(topic_ids, topic_headings, topic_headings_sentiments)) self.insert_to_list_topics(topic_w_sentiments) # comments", "* len(topic_comments) topic_w_scores = list(zip(topic_list_ids, topic_comments, topic_comments_scores)) self.insert_to_list_topic_comments(topic_w_scores) entities =", "list(zip(topic_list_ids, topic_comments, topic_comments_scores)) self.insert_to_list_topic_comments(topic_w_scores) entities = nested_lookup(global_cons.TOPIC_ENTITIES, topics) self.counter_text_topics =", "topic) topic_comments_scores = nested_lookup(global_cons.TOPIC_COMMENT_SENTIMENT, topic) topic_list_ids = [topic_id] * len(topic_comments)", "self.__count_filter_keys(entities=sr_entities) def __process_topics(self) -> None: \"\"\" Process the topics :return:", "use_aliases=True)) temp.update(sentiment=sentiment) self.table_topic_comment.append(temp) self.table_topic_comment = sorted(self.table_topic_comment, key=lambda k: k['sentiment'], reverse=True)", "temp = sorted(temp, key=lambda k: k['sentiment'], reverse=True) temp_table.extend(temp) self.table_surveys_replies =", "for company_id, periods in self.surveys.items(): for period in self.weeks: period_parts", "topic_id, topic in topics.items(): topic_comments = nested_lookup(global_cons.TOPIC_COMMENT, topic) topic_comments_scores =", "dimension in cons.dimensions: temp = [d for d in self.table_surveys_replies", "def insert_to_list_topic_comments(self, features: list) -> None: \"\"\" Create array with", "periods) sr_entities = nested_lookup(global_cons.SR_ENTITIES, periods) sr_comment_score = list(zip(sr_dimension, sr_content, sr_sentiment))", "insert_to_list_topics(self, features: list) -> None: \"\"\" Create array with the", "topic_comments = nested_lookup(global_cons.TOPIC_COMMENT, topic) topic_comments_scores = nested_lookup(global_cons.TOPIC_COMMENT_SENTIMENT, topic) topic_list_ids =", "= extract_question(self.info_file, dimension=item_analyze[0], week=company_week) dimension = extract_dimension(self.info_file, dimension=item_analyze[0]) comment =", "the info needed to write into report_pdf :return: \"\"\" self.__process_sr()", "= temp_table def insert_to_list_surveys_replies(self, features: list, company_week: int) -> None:", "temp_table = [] for dimension in cons.dimensions: temp = [d", "features: list of features to extract :return: \"\"\" for item_analyze", "# heading topic_headings = nested_lookup(global_cons.TOPIC_CONTENT, topics) topic_headings_sentiments = nested_lookup(global_cons.TOPIC_SENTIMENT, topics)", "= {} temp.update(dimension=dimension) temp.update(question=question) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_surveys_replies.append(temp) self.sort_by_dimension_sentiment_table() def", "import nested_lookup class InterFaceReport: def __init__(self, topics: dict, surveys: dict,", "company :return: \"\"\" for item_analyze in features: question = extract_question(self.info_file,", "@staticmethod def __count_filter_keys(entities: list) -> object: \"\"\" Count and filter", "-> None: \"\"\" Process the topics :return: \"\"\" for company_id,", "self.topics = topics self.surveys = surveys self.company_id = company_id self.weeks", "= item_analyze[2] temp = {} temp.update(dimension=dimension) temp.update(question=question) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment)", "the dictionary for interface - referenced to topic headlines :param", "\"\"\" for item_analyze in features: topic_id = item_analyze[0] comment =", "\"\"\" self.image_base64_sr = words_clouds(self.counter_text_sr, cons.path_image_sr_wc) self.image_base64_topics = words_clouds(self.counter_text_topics, cons.path_image_topics_wc) @staticmethod", "reverse=True) def word_cloud(self): \"\"\" Create wordcloud of the main words", "periods) sr_sentiment = nested_lookup(global_cons.SENTIMENT, periods) sr_entities = nested_lookup(global_cons.SR_ENTITIES, periods) sr_comment_score", "-> object: \"\"\" Count and filter keys :param entities: list", "nested_lookup class InterFaceReport: def __init__(self, topics: dict, surveys: dict, company_id:", "key=lambda k: k['sentiment'], reverse=True) temp_table.extend(temp) self.table_surveys_replies = temp_table def insert_to_list_surveys_replies(self,", "dictionary for interface :param features: list of features to extract", "item_analyze[2] temp = {} temp.update(id=topic_id_comment_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topic_comment.append(temp) self.table_topic_comment", "self.image_base64_sr = words_clouds(self.counter_text_sr, cons.path_image_sr_wc) self.image_base64_topics = words_clouds(self.counter_text_topics, cons.path_image_topics_wc) @staticmethod def", "period.split(CUSTOM_YEAR_WEEK_AGG) translations_week = self.api_source_manager.get_company_week_from_period(week=period_parts[0], year=period_parts[1], company_id=self.company_id) sr_dimension = nested_lookup(global_cons.SR_DIMENSION, periods)", "- referenced to topic comments :param features: list of features", "import emoji import sentiment_analysis.src.report.cons_report as cons import sentiment_analysis.src.constants as global_cons", "comment = item_analyze[1] sentiment = item_analyze[2] temp = {} temp.update(dimension=dimension)", "self.table_topic_comment.append(temp) self.table_topic_comment = sorted(self.table_topic_comment, key=lambda k: k['sentiment'], reverse=True) def word_cloud(self):", "= item_analyze[0] comment = item_analyze[1] sentiment = item_analyze[2] temp =", "list(zip(sr_dimension, sr_content, sr_sentiment)) self.insert_to_list_surveys_replies(sr_comment_score, company_week=translations_week) self.counter_text_sr = self.__count_filter_keys(entities=sr_entities) def __process_topics(self)", "temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topics.append(temp) self.table_topics = sorted(self.table_topics, key=lambda k: k['sentiment'],", "cons.path_image_sr_wc) self.image_base64_topics = words_clouds(self.counter_text_topics, cons.path_image_topics_wc) @staticmethod def __count_filter_keys(entities: list) ->", "surveys self.company_id = company_id self.weeks = weeks self.g_client = g_client", "the company :return: \"\"\" for item_analyze in features: question =", "info needed to write into report_pdf :return: \"\"\" self.__process_sr() self.__process_topics()", "entities = ClientsLanguageSentiment.count_entities(entities=entities) entities = ClientsLanguageSentiment.filter_black_list(entities=entities) return entities def __process_sr(self)", "temp.update(sentiment=sentiment) self.table_topics.append(temp) self.table_topics = sorted(self.table_topics, key=lambda k: k['sentiment'], reverse=True) def", "sentiment = item_analyze[2] temp = {} temp.update(dimension=dimension) temp.update(question=question) temp.update(comment=emoji.emojize(comment, use_aliases=True))", "self.sort_by_dimension_sentiment_table() def insert_to_list_topics(self, features: list) -> None: \"\"\" Create array", "for interface - referenced to topic headlines :param features: list", "sr_sentiment)) self.insert_to_list_surveys_replies(sr_comment_score, company_week=translations_week) self.counter_text_sr = self.__count_filter_keys(entities=sr_entities) def __process_topics(self) -> None:", "temp.update(dimension=dimension) temp.update(question=question) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_surveys_replies.append(temp) self.sort_by_dimension_sentiment_table() def insert_to_list_topics(self, features:", "= words_clouds(self.counter_text_sr, cons.path_image_sr_wc) self.image_base64_topics = words_clouds(self.counter_text_topics, cons.path_image_topics_wc) @staticmethod def __count_filter_keys(entities:", "\"\"\" Create array with the dictionary for interface - referenced", "self.image_base64_sr = None self.image_base64_topics = None def sort_by_dimension_sentiment_table(self) -> None:", "def __count_filter_keys(entities: list) -> object: \"\"\" Count and filter keys", "None def sort_by_dimension_sentiment_table(self) -> None: \"\"\" Sort by dimension and", "periods) sr_comment_score = list(zip(sr_dimension, sr_content, sr_sentiment)) self.insert_to_list_surveys_replies(sr_comment_score, company_week=translations_week) self.counter_text_sr =", "topic_headings_sentiments)) self.insert_to_list_topics(topic_w_sentiments) # comments for topic_id, topic in topics.items(): topic_comments", "Create array with the dictionary for interface :param features: list", "replies :return: \"\"\" for company_id, periods in self.surveys.items(): for period", "for dimension in cons.dimensions: temp = [d for d in", "entities = nested_lookup(global_cons.TOPIC_ENTITIES, topics) self.counter_text_topics = ClientsLanguageSentiment.count_entities(entities) def process_interface(self) ->", "nested_lookup import nested_lookup class InterFaceReport: def __init__(self, topics: dict, surveys:", "{} temp.update(id=topic_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topics.append(temp) self.table_topics = sorted(self.table_topics, key=lambda", "ClientsLanguageSentiment, api_source_manager: APISourcesFetcher): self.topics = topics self.surveys = surveys self.company_id", "company week of the company :return: \"\"\" for item_analyze in", "extract :return: \"\"\" for item_analyze in features: topic_id_comment_id = item_analyze[0]", "reverse=True) def insert_to_list_topic_comments(self, features: list) -> None: \"\"\" Create array", "extract :return: \"\"\" for item_analyze in features: topic_id = item_analyze[0]", ":param company_week: company week of the company :return: \"\"\" for", ":return: \"\"\" for company_id, topics in self.topics.items(): # heading topic_headings", "sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment from nested_lookup import nested_lookup class InterFaceReport: def", "referenced to topic comments :param features: list of features to", "item_analyze in features: question = extract_question(self.info_file, dimension=item_analyze[0], week=company_week) dimension =", "ClientsLanguageSentiment.count_entities(entities=entities) entities = ClientsLanguageSentiment.filter_black_list(entities=entities) return entities def __process_sr(self) -> None:", "[] self.counter_text_sr = None self.counter_text_topics = None self.info_file = read_json_file(\"en_US.json\")", "topic_w_scores = list(zip(topic_list_ids, topic_comments, topic_comments_scores)) self.insert_to_list_topic_comments(topic_w_scores) entities = nested_lookup(global_cons.TOPIC_ENTITIES, topics)", "for company_id, topics in self.topics.items(): # heading topic_headings = nested_lookup(global_cons.TOPIC_CONTENT,", "entities: list of entities text :return: \"\"\" entities = ClientsLanguageSentiment.count_entities(entities=entities)", "for period in self.weeks: period_parts = period.split(CUSTOM_YEAR_WEEK_AGG) translations_week = self.api_source_manager.get_company_week_from_period(week=period_parts[0],", "InterFaceReport: def __init__(self, topics: dict, surveys: dict, company_id: str, weeks:", "-> None: \"\"\" Create array with the dictionary for interface", ":return: \"\"\" for item_analyze in features: question = extract_question(self.info_file, dimension=item_analyze[0],", "() self.table_surveys_replies = [] self.table_topics = [] self.table_topic_comment = []", "as global_cons from utils.data_connection.api_data_manager import APISourcesFetcher from utils.utilities import read_json_file,", "item_analyze[1] sentiment = item_analyze[2] temp = {} temp.update(id=topic_id_comment_id) temp.update(comment=emoji.emojize(comment, use_aliases=True))", "in self.topics.items(): # heading topic_headings = nested_lookup(global_cons.TOPIC_CONTENT, topics) topic_headings_sentiments =", "from sentiment_analysis.src.word_cloud import words_clouds from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment from nested_lookup", "= [] for dimension in cons.dimensions: temp = [d for", "in features: topic_id_comment_id = item_analyze[0] comment = item_analyze[1] sentiment =", "# comments for topic_id, topic in topics.items(): topic_comments = nested_lookup(global_cons.TOPIC_COMMENT,", "= extract_dimension(self.info_file, dimension=item_analyze[0]) comment = item_analyze[1] sentiment = item_analyze[2] temp", "list(topics.keys()) topic_w_sentiments = list(zip(topic_ids, topic_headings, topic_headings_sentiments)) self.insert_to_list_topics(topic_w_sentiments) # comments for", "topic_headings, topic_headings_sentiments)) self.insert_to_list_topics(topic_w_sentiments) # comments for topic_id, topic in topics.items():", "__count_filter_keys(entities: list) -> object: \"\"\" Count and filter keys :param", "list(zip(topic_ids, topic_headings, topic_headings_sentiments)) self.insert_to_list_topics(topic_w_sentiments) # comments for topic_id, topic in", "= None self.image_base64_topics = None def sort_by_dimension_sentiment_table(self) -> None: \"\"\"", "for topic_id, topic in topics.items(): topic_comments = nested_lookup(global_cons.TOPIC_COMMENT, topic) topic_comments_scores", "temp_table.extend(temp) self.table_surveys_replies = temp_table def insert_to_list_surveys_replies(self, features: list, company_week: int)", "len(topic_comments) topic_w_scores = list(zip(topic_list_ids, topic_comments, topic_comments_scores)) self.insert_to_list_topic_comments(topic_w_scores) entities = nested_lookup(global_cons.TOPIC_ENTITIES,", "temp.update(sentiment=sentiment) self.table_surveys_replies.append(temp) self.sort_by_dimension_sentiment_table() def insert_to_list_topics(self, features: list) -> None: \"\"\"", "{} temp.update(id=topic_id_comment_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topic_comment.append(temp) self.table_topic_comment = sorted(self.table_topic_comment, key=lambda", ":param entities: list of entities text :return: \"\"\" entities =", "self.api_source_manager = api_source_manager self.thresholds = () self.table_surveys_replies = [] self.table_topics", "from nested_lookup import nested_lookup class InterFaceReport: def __init__(self, topics: dict,", "weeks: list, g_client: ClientsLanguageSentiment, api_source_manager: APISourcesFetcher): self.topics = topics self.surveys", "import ClientsLanguageSentiment from nested_lookup import nested_lookup class InterFaceReport: def __init__(self,", "dictionary for interface - referenced to topic comments :param features:", "\"\"\" for company_id, periods in self.surveys.items(): for period in self.weeks:", "= [] self.table_topics = [] self.table_topic_comment = [] self.counter_text_sr =", "interface - referenced to topic comments :param features: list of", "\"\"\" Create wordcloud of the main words :return: \"\"\" self.image_base64_sr", "by dimension and by sentiment :return: \"\"\" temp_table = []", "k: k['sentiment'], reverse=True) temp_table.extend(temp) self.table_surveys_replies = temp_table def insert_to_list_surveys_replies(self, features:", "topics) self.counter_text_topics = ClientsLanguageSentiment.count_entities(entities) def process_interface(self) -> None: \"\"\" Take", "import APISourcesFetcher from utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question from", "topic_ids = list(topics.keys()) topic_w_sentiments = list(zip(topic_ids, topic_headings, topic_headings_sentiments)) self.insert_to_list_topics(topic_w_sentiments) #", "\"\"\" for item_analyze in features: question = extract_question(self.info_file, dimension=item_analyze[0], week=company_week)", "topics :return: \"\"\" for company_id, topics in self.topics.items(): # heading", "self.table_surveys_replies = [] self.table_topics = [] self.table_topic_comment = [] self.counter_text_sr", "dimension = extract_dimension(self.info_file, dimension=item_analyze[0]) comment = item_analyze[1] sentiment = item_analyze[2]", "topic_id = item_analyze[0] comment = item_analyze[1] sentiment = item_analyze[2] temp", "= nested_lookup(global_cons.SR_DIMENSION, periods) sr_content = nested_lookup(global_cons.SR_CONTENT, periods) sr_sentiment = nested_lookup(global_cons.SENTIMENT,", "= nested_lookup(global_cons.TOPIC_SENTIMENT, topics) topic_ids = list(topics.keys()) topic_w_sentiments = list(zip(topic_ids, topic_headings,", "the dictionary for interface :param features: list of features to", "self.image_base64_topics = None def sort_by_dimension_sentiment_table(self) -> None: \"\"\" Sort by", "k['sentiment'], reverse=True) def word_cloud(self): \"\"\" Create wordcloud of the main", "in self.table_surveys_replies if d['dimension'] == dimension] temp = sorted(temp, key=lambda", "nested_lookup(global_cons.TOPIC_ENTITIES, topics) self.counter_text_topics = ClientsLanguageSentiment.count_entities(entities) def process_interface(self) -> None: \"\"\"", "= list(zip(topic_list_ids, topic_comments, topic_comments_scores)) self.insert_to_list_topic_comments(topic_w_scores) entities = nested_lookup(global_cons.TOPIC_ENTITIES, topics) self.counter_text_topics", "company_id self.weeks = weeks self.g_client = g_client self.api_source_manager = api_source_manager", "= sorted(self.table_topic_comment, key=lambda k: k['sentiment'], reverse=True) def word_cloud(self): \"\"\" Create", "translations_week = self.api_source_manager.get_company_week_from_period(week=period_parts[0], year=period_parts[1], company_id=self.company_id) sr_dimension = nested_lookup(global_cons.SR_DIMENSION, periods) sr_content", "sentiment :return: \"\"\" temp_table = [] for dimension in cons.dimensions:", "__process_sr(self) -> None: \"\"\" Process the surveys replies :return: \"\"\"", "sentiment = item_analyze[2] temp = {} temp.update(id=topic_id_comment_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment)", "headlines :param features: list of features to extract :return: \"\"\"", "words_clouds(self.counter_text_topics, cons.path_image_topics_wc) @staticmethod def __count_filter_keys(entities: list) -> object: \"\"\" Count", "week=company_week) dimension = extract_dimension(self.info_file, dimension=item_analyze[0]) comment = item_analyze[1] sentiment =", "features to extract :return: \"\"\" for item_analyze in features: topic_id_comment_id", "sorted(self.table_topics, key=lambda k: k['sentiment'], reverse=True) def insert_to_list_topic_comments(self, features: list) ->", "self.surveys.items(): for period in self.weeks: period_parts = period.split(CUSTOM_YEAR_WEEK_AGG) translations_week =", "period in self.weeks: period_parts = period.split(CUSTOM_YEAR_WEEK_AGG) translations_week = self.api_source_manager.get_company_week_from_period(week=period_parts[0], year=period_parts[1],", "- referenced to topic headlines :param features: list of features", "to extract :param company_week: company week of the company :return:", "extract_dimension(self.info_file, dimension=item_analyze[0]) comment = item_analyze[1] sentiment = item_analyze[2] temp =", "filter keys :param entities: list of entities text :return: \"\"\"", "keys :param entities: list of entities text :return: \"\"\" entities", "class InterFaceReport: def __init__(self, topics: dict, surveys: dict, company_id: str,", "self.thresholds = () self.table_surveys_replies = [] self.table_topics = [] self.table_topic_comment", "question = extract_question(self.info_file, dimension=item_analyze[0], week=company_week) dimension = extract_dimension(self.info_file, dimension=item_analyze[0]) comment", "APISourcesFetcher from utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question from sentiment_analysis.src.word_cloud", "= [] self.table_topic_comment = [] self.counter_text_sr = None self.counter_text_topics =", "= topics self.surveys = surveys self.company_id = company_id self.weeks =", ":return: \"\"\" entities = ClientsLanguageSentiment.count_entities(entities=entities) entities = ClientsLanguageSentiment.filter_black_list(entities=entities) return entities", "process_interface(self) -> None: \"\"\" Take the info needed to write", "item_analyze[1] sentiment = item_analyze[2] temp = {} temp.update(id=topic_id) temp.update(comment=emoji.emojize(comment, use_aliases=True))", "to topic comments :param features: list of features to extract", "None: \"\"\" Process the topics :return: \"\"\" for company_id, topics", "= None self.info_file = read_json_file(\"en_US.json\") self.image_base64_sr = None self.image_base64_topics =", "utils.data_connection.api_data_manager import APISourcesFetcher from utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question", "in cons.dimensions: temp = [d for d in self.table_surveys_replies if", "if d['dimension'] == dimension] temp = sorted(temp, key=lambda k: k['sentiment'],", "sort_by_dimension_sentiment_table(self) -> None: \"\"\" Sort by dimension and by sentiment", "self.counter_text_sr = None self.counter_text_topics = None self.info_file = read_json_file(\"en_US.json\") self.image_base64_sr", "temp = {} temp.update(id=topic_id_comment_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topic_comment.append(temp) self.table_topic_comment =", "temp = {} temp.update(dimension=dimension) temp.update(question=question) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_surveys_replies.append(temp) self.sort_by_dimension_sentiment_table()", "week of the company :return: \"\"\" for item_analyze in features:", "k['sentiment'], reverse=True) def insert_to_list_topic_comments(self, features: list) -> None: \"\"\" Create", ":param features: list of features to extract :param company_week: company", "comments for topic_id, topic in topics.items(): topic_comments = nested_lookup(global_cons.TOPIC_COMMENT, topic)", "Process the surveys replies :return: \"\"\" for company_id, periods in", "text :return: \"\"\" entities = ClientsLanguageSentiment.count_entities(entities=entities) entities = ClientsLanguageSentiment.filter_black_list(entities=entities) return", "import sentiment_analysis.src.report.cons_report as cons import sentiment_analysis.src.constants as global_cons from utils.data_connection.api_data_manager", ":param features: list of features to extract :return: \"\"\" for", "company_id: str, weeks: list, g_client: ClientsLanguageSentiment, api_source_manager: APISourcesFetcher): self.topics =", "topic_comments_scores = nested_lookup(global_cons.TOPIC_COMMENT_SENTIMENT, topic) topic_list_ids = [topic_id] * len(topic_comments) topic_w_scores", "from utils.data_connection.api_data_manager import APISourcesFetcher from utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension,", "= surveys self.company_id = company_id self.weeks = weeks self.g_client =", "sentiment_analysis.src.word_cloud import words_clouds from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment from nested_lookup import", "emoji import sentiment_analysis.src.report.cons_report as cons import sentiment_analysis.src.constants as global_cons from", "self.table_surveys_replies = temp_table def insert_to_list_surveys_replies(self, features: list, company_week: int) ->", "extract :param company_week: company week of the company :return: \"\"\"", "of the company :return: \"\"\" for item_analyze in features: question", "features: list, company_week: int) -> None: \"\"\" Create array with", "ClientsLanguageSentiment.filter_black_list(entities=entities) return entities def __process_sr(self) -> None: \"\"\" Process the", "= list(zip(sr_dimension, sr_content, sr_sentiment)) self.insert_to_list_surveys_replies(sr_comment_score, company_week=translations_week) self.counter_text_sr = self.__count_filter_keys(entities=sr_entities) def", "= nested_lookup(global_cons.TOPIC_COMMENT_SENTIMENT, topic) topic_list_ids = [topic_id] * len(topic_comments) topic_w_scores =", "sr_content, sr_sentiment)) self.insert_to_list_surveys_replies(sr_comment_score, company_week=translations_week) self.counter_text_sr = self.__count_filter_keys(entities=sr_entities) def __process_topics(self) ->", "= g_client self.api_source_manager = api_source_manager self.thresholds = () self.table_surveys_replies =", "reverse=True) temp_table.extend(temp) self.table_surveys_replies = temp_table def insert_to_list_surveys_replies(self, features: list, company_week:", "the topics :return: \"\"\" for company_id, topics in self.topics.items(): #", "company_week=translations_week) self.counter_text_sr = self.__count_filter_keys(entities=sr_entities) def __process_topics(self) -> None: \"\"\" Process", "features: question = extract_question(self.info_file, dimension=item_analyze[0], week=company_week) dimension = extract_dimension(self.info_file, dimension=item_analyze[0])", "list, g_client: ClientsLanguageSentiment, api_source_manager: APISourcesFetcher): self.topics = topics self.surveys =", "topics.items(): topic_comments = nested_lookup(global_cons.TOPIC_COMMENT, topic) topic_comments_scores = nested_lookup(global_cons.TOPIC_COMMENT_SENTIMENT, topic) topic_list_ids", "topics self.surveys = surveys self.company_id = company_id self.weeks = weeks", "array with the dictionary for interface - referenced to topic", "self.counter_text_topics = ClientsLanguageSentiment.count_entities(entities) def process_interface(self) -> None: \"\"\" Take the", "and by sentiment :return: \"\"\" temp_table = [] for dimension", "temp.update(question=question) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_surveys_replies.append(temp) self.sort_by_dimension_sentiment_table() def insert_to_list_topics(self, features: list)", "extract_dimension, extract_question from sentiment_analysis.src.word_cloud import words_clouds from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment", "def insert_to_list_topics(self, features: list) -> None: \"\"\" Create array with", "Take the info needed to write into report_pdf :return: \"\"\"", "for item_analyze in features: topic_id_comment_id = item_analyze[0] comment = item_analyze[1]", "for d in self.table_surveys_replies if d['dimension'] == dimension] temp =", "key=lambda k: k['sentiment'], reverse=True) def insert_to_list_topic_comments(self, features: list) -> None:", "features to extract :param company_week: company week of the company", "key=lambda k: k['sentiment'], reverse=True) def word_cloud(self): \"\"\" Create wordcloud of", "= item_analyze[1] sentiment = item_analyze[2] temp = {} temp.update(id=topic_id) temp.update(comment=emoji.emojize(comment,", "dictionary for interface - referenced to topic headlines :param features:", "year=period_parts[1], company_id=self.company_id) sr_dimension = nested_lookup(global_cons.SR_DIMENSION, periods) sr_content = nested_lookup(global_cons.SR_CONTENT, periods)", "Create array with the dictionary for interface - referenced to", "from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment from nested_lookup import nested_lookup class InterFaceReport:", "utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question from sentiment_analysis.src.word_cloud import words_clouds", "temp.update(sentiment=sentiment) self.table_topic_comment.append(temp) self.table_topic_comment = sorted(self.table_topic_comment, key=lambda k: k['sentiment'], reverse=True) def", "in self.weeks: period_parts = period.split(CUSTOM_YEAR_WEEK_AGG) translations_week = self.api_source_manager.get_company_week_from_period(week=period_parts[0], year=period_parts[1], company_id=self.company_id)", "item_analyze in features: topic_id = item_analyze[0] comment = item_analyze[1] sentiment", "self.counter_text_sr = self.__count_filter_keys(entities=sr_entities) def __process_topics(self) -> None: \"\"\" Process the", "of the main words :return: \"\"\" self.image_base64_sr = words_clouds(self.counter_text_sr, cons.path_image_sr_wc)", "sr_sentiment = nested_lookup(global_cons.SENTIMENT, periods) sr_entities = nested_lookup(global_cons.SR_ENTITIES, periods) sr_comment_score =", "= api_source_manager self.thresholds = () self.table_surveys_replies = [] self.table_topics =", "words_clouds from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment from nested_lookup import nested_lookup class", "= {} temp.update(id=topic_id_comment_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topic_comment.append(temp) self.table_topic_comment = sorted(self.table_topic_comment,", "\"\"\" Take the info needed to write into report_pdf :return:", "\"\"\" temp_table = [] for dimension in cons.dimensions: temp =", "for item_analyze in features: topic_id = item_analyze[0] comment = item_analyze[1]", "the main words :return: \"\"\" self.image_base64_sr = words_clouds(self.counter_text_sr, cons.path_image_sr_wc) self.image_base64_topics", "= [d for d in self.table_surveys_replies if d['dimension'] == dimension]", "= ClientsLanguageSentiment.filter_black_list(entities=entities) return entities def __process_sr(self) -> None: \"\"\" Process", "= nested_lookup(global_cons.SR_CONTENT, periods) sr_sentiment = nested_lookup(global_cons.SENTIMENT, periods) sr_entities = nested_lookup(global_cons.SR_ENTITIES,", "self.surveys = surveys self.company_id = company_id self.weeks = weeks self.g_client", "item_analyze[2] temp = {} temp.update(dimension=dimension) temp.update(question=question) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_surveys_replies.append(temp)", "self.weeks = weeks self.g_client = g_client self.api_source_manager = api_source_manager self.thresholds", "__process_topics(self) -> None: \"\"\" Process the topics :return: \"\"\" for", "periods in self.surveys.items(): for period in self.weeks: period_parts = period.split(CUSTOM_YEAR_WEEK_AGG)", "period_parts = period.split(CUSTOM_YEAR_WEEK_AGG) translations_week = self.api_source_manager.get_company_week_from_period(week=period_parts[0], year=period_parts[1], company_id=self.company_id) sr_dimension =", "as cons import sentiment_analysis.src.constants as global_cons from utils.data_connection.api_data_manager import APISourcesFetcher", "self.table_topics.append(temp) self.table_topics = sorted(self.table_topics, key=lambda k: k['sentiment'], reverse=True) def insert_to_list_topic_comments(self,", "\"\"\" Create array with the dictionary for interface :param features:", "for interface :param features: list of features to extract :param", "d['dimension'] == dimension] temp = sorted(temp, key=lambda k: k['sentiment'], reverse=True)", "\"\"\" Process the surveys replies :return: \"\"\" for company_id, periods", "nested_lookup(global_cons.SENTIMENT, periods) sr_entities = nested_lookup(global_cons.SR_ENTITIES, periods) sr_comment_score = list(zip(sr_dimension, sr_content,", "cons import sentiment_analysis.src.constants as global_cons from utils.data_connection.api_data_manager import APISourcesFetcher from", "nested_lookup(global_cons.TOPIC_COMMENT_SENTIMENT, topic) topic_list_ids = [topic_id] * len(topic_comments) topic_w_scores = list(zip(topic_list_ids,", "weeks self.g_client = g_client self.api_source_manager = api_source_manager self.thresholds = ()", "def __init__(self, topics: dict, surveys: dict, company_id: str, weeks: list,", "item_analyze in features: topic_id_comment_id = item_analyze[0] comment = item_analyze[1] sentiment", "topic_id_comment_id = item_analyze[0] comment = item_analyze[1] sentiment = item_analyze[2] temp", "to topic headlines :param features: list of features to extract", "[topic_id] * len(topic_comments) topic_w_scores = list(zip(topic_list_ids, topic_comments, topic_comments_scores)) self.insert_to_list_topic_comments(topic_w_scores) entities", "= () self.table_surveys_replies = [] self.table_topics = [] self.table_topic_comment =", "topic_comments, topic_comments_scores)) self.insert_to_list_topic_comments(topic_w_scores) entities = nested_lookup(global_cons.TOPIC_ENTITIES, topics) self.counter_text_topics = ClientsLanguageSentiment.count_entities(entities)", "word_cloud(self): \"\"\" Create wordcloud of the main words :return: \"\"\"", "= ClientsLanguageSentiment.count_entities(entities) def process_interface(self) -> None: \"\"\" Take the info", "def __process_sr(self) -> None: \"\"\" Process the surveys replies :return:", ":return: \"\"\" for item_analyze in features: topic_id = item_analyze[0] comment", "\"\"\" Process the topics :return: \"\"\" for company_id, topics in", "insert_to_list_surveys_replies(self, features: list, company_week: int) -> None: \"\"\" Create array" ]
[ "document_id self.doc_set_id = doc_set_id self.last_modified_time_key = last_modified_time_key self.last_modified_date_key = last_modified_date_key", "machime' class DimProcess: def __init__( self, *kwargs, process_key: int, module:", "last_modified_date_key: int, user_name: str = None, process_key: int, field_name: str,", "int, field_name: str, field_value: str = None, last_modified_timestamp: str ):", "self.project_id = project_id self.document_id = document_id self.doc_set_id = doc_set_id self.last_modified_time_key", "'apr_qc', 'keyer_input'] def example_data(self): data = { 'process_key': 1, 'resource':", "str, last_modified_time_key: int, last_modified_date_key: int, user_name: str = None, process_key:", "'transform', 'sub_step': None, } class FactDataExtractionModel: def __init__( self, *kwargs,", "self.document_id = document_id self.doc_set_id = doc_set_id self.last_modified_time_key = last_modified_time_key self.last_modified_date_key", "str, field_value: str = None, last_modified_timestamp: str ): self.project_id =", "str, document_id: str, doc_set_id: str, last_modified_time_key: int, last_modified_date_key: int, user_name:", "last_modified_date_key self.user_name = user_name self.process_key = process_key self.field_name = field_name", "'sub_step': None, 'process_key': 2, 'resource': 'machine', 'module': 'keyed_data', 'step': 'transform',", "step: str, sub_step: str, resource: str = 'human', ): def", "__init__( self, *kwargs, process_key: int, module: str, type: str, step:", "def step(self): return ['qc', 'auto_qc', 'apr_qc', 'keyer_input'] def example_data(self): data", "int, user_name: str = None, process_key: int, field_name: str, field_value:", "'human', ): def step(self): return ['qc', 'auto_qc', 'apr_qc', 'keyer_input'] def", "doc_set_id self.last_modified_time_key = last_modified_time_key self.last_modified_date_key = last_modified_date_key self.user_name = user_name", "str, step: str, sub_step: str, resource: str = 'human', ):", "class FactDataExtractionModel: def __init__( self, *kwargs, project_id: str, document_id: str,", "type: str, step: str, sub_step: str, resource: str = 'human',", "): self.project_id = project_id self.document_id = document_id self.doc_set_id = doc_set_id", "project_id self.document_id = document_id self.doc_set_id = doc_set_id self.last_modified_time_key = last_modified_time_key", "str, type: str, step: str, sub_step: str, resource: str =", "sub_step: str, resource: str = 'human', ): def step(self): return", "= document_id self.doc_set_id = doc_set_id self.last_modified_time_key = last_modified_time_key self.last_modified_date_key =", "= 'human', ): def step(self): return ['qc', 'auto_qc', 'apr_qc', 'keyer_input']", "self.process_key = process_key self.field_name = field_name self.field_value = field_value self.last_modified_timestamp", "process_key self.field_name = field_name self.field_value = field_value self.last_modified_timestamp = last_modified_timestamp", "2, 'resource': 'machine', 'module': 'keyed_data', 'step': 'transform', 'sub_step': None, }", "step(self): return ['qc', 'auto_qc', 'apr_qc', 'keyer_input'] def example_data(self): data =", "'resource': 'human', 'module': 'keyed_data', 'step': 'qc', 'sub_step': None, 'process_key': 2,", "str, doc_set_id: str, last_modified_time_key: int, last_modified_date_key: int, user_name: str =", "None, last_modified_timestamp: str ): self.project_id = project_id self.document_id = document_id", "= doc_set_id self.last_modified_time_key = last_modified_time_key self.last_modified_date_key = last_modified_date_key self.user_name =", "last_modified_time_key: int, last_modified_date_key: int, user_name: str = None, process_key: int,", "= None, last_modified_timestamp: str ): self.project_id = project_id self.document_id =", "'machine', 'module': 'keyed_data', 'step': 'transform', 'sub_step': None, } class FactDataExtractionModel:", "FactDataExtractionModel: def __init__( self, *kwargs, project_id: str, document_id: str, doc_set_id:", "int, last_modified_date_key: int, user_name: str = None, process_key: int, field_name:", "str, resource: str = 'human', ): def step(self): return ['qc',", "'resource': 'machine', 'module': 'keyed_data', 'step': 'transform', 'sub_step': None, } class", "return ['qc', 'auto_qc', 'apr_qc', 'keyer_input'] def example_data(self): data = {", "'process_key': 1, 'resource': 'human', 'module': 'keyed_data', 'step': 'qc', 'sub_step': None,", "self, *kwargs, process_key: int, module: str, type: str, step: str,", "last_modified_timestamp: str ): self.project_id = project_id self.document_id = document_id self.doc_set_id", "self.last_modified_time_key = last_modified_time_key self.last_modified_date_key = last_modified_date_key self.user_name = user_name self.process_key", "} class FactDataExtractionModel: def __init__( self, *kwargs, project_id: str, document_id:", "process_key: int, field_name: str, field_value: str = None, last_modified_timestamp: str", "1, 'resource': 'human', 'module': 'keyed_data', 'step': 'qc', 'sub_step': None, 'process_key':", "data = { 'process_key': 1, 'resource': 'human', 'module': 'keyed_data', 'step':", "str, sub_step: str, resource: str = 'human', ): def step(self):", "'step': 'transform', 'sub_step': None, } class FactDataExtractionModel: def __init__( self,", "def __init__( self, *kwargs, process_key: int, module: str, type: str,", "= user_name self.process_key = process_key self.field_name = field_name self.field_value =", "process_key: int, module: str, type: str, step: str, sub_step: str,", "ad machime' class DimProcess: def __init__( self, *kwargs, process_key: int,", "['qc', 'auto_qc', 'apr_qc', 'keyer_input'] def example_data(self): data = { 'process_key':", "DimProcess: def __init__( self, *kwargs, process_key: int, module: str, type:", "'keyer_input'] def example_data(self): data = { 'process_key': 1, 'resource': 'human',", "= last_modified_date_key self.user_name = user_name self.process_key = process_key self.field_name =", "None, } class FactDataExtractionModel: def __init__( self, *kwargs, project_id: str,", "= None, process_key: int, field_name: str, field_value: str = None,", "= project_id self.document_id = document_id self.doc_set_id = doc_set_id self.last_modified_time_key =", "__init__( self, *kwargs, project_id: str, document_id: str, doc_set_id: str, last_modified_time_key:", "'module': 'keyed_data', 'step': 'qc', 'sub_step': None, 'process_key': 2, 'resource': 'machine',", "self.doc_set_id = doc_set_id self.last_modified_time_key = last_modified_time_key self.last_modified_date_key = last_modified_date_key self.user_name", "str = None, last_modified_timestamp: str ): self.project_id = project_id self.document_id", "None, process_key: int, field_name: str, field_value: str = None, last_modified_timestamp:", "'process_key': 2, 'resource': 'machine', 'module': 'keyed_data', 'step': 'transform', 'sub_step': None,", "resource ='human ad machime' class DimProcess: def __init__( self, *kwargs,", "example_data(self): data = { 'process_key': 1, 'resource': 'human', 'module': 'keyed_data',", "= last_modified_time_key self.last_modified_date_key = last_modified_date_key self.user_name = user_name self.process_key =", "self.last_modified_date_key = last_modified_date_key self.user_name = user_name self.process_key = process_key self.field_name", "field_value: str = None, last_modified_timestamp: str ): self.project_id = project_id", "str ): self.project_id = project_id self.document_id = document_id self.doc_set_id =", "project_id: str, document_id: str, doc_set_id: str, last_modified_time_key: int, last_modified_date_key: int,", "self, *kwargs, project_id: str, document_id: str, doc_set_id: str, last_modified_time_key: int,", "= process_key self.field_name = field_name self.field_value = field_value self.last_modified_timestamp =", "{ 'process_key': 1, 'resource': 'human', 'module': 'keyed_data', 'step': 'qc', 'sub_step':", "module: str, type: str, step: str, sub_step: str, resource: str", "str = 'human', ): def step(self): return ['qc', 'auto_qc', 'apr_qc',", "user_name: str = None, process_key: int, field_name: str, field_value: str", "'keyed_data', 'step': 'transform', 'sub_step': None, } class FactDataExtractionModel: def __init__(", "='human ad machime' class DimProcess: def __init__( self, *kwargs, process_key:", "): def step(self): return ['qc', 'auto_qc', 'apr_qc', 'keyer_input'] def example_data(self):", "def example_data(self): data = { 'process_key': 1, 'resource': 'human', 'module':", "last_modified_time_key self.last_modified_date_key = last_modified_date_key self.user_name = user_name self.process_key = process_key", "document_id: str, doc_set_id: str, last_modified_time_key: int, last_modified_date_key: int, user_name: str", "field_name: str, field_value: str = None, last_modified_timestamp: str ): self.project_id", "*kwargs, project_id: str, document_id: str, doc_set_id: str, last_modified_time_key: int, last_modified_date_key:", "'module': 'keyed_data', 'step': 'transform', 'sub_step': None, } class FactDataExtractionModel: def", "'keyed_data', 'step': 'qc', 'sub_step': None, 'process_key': 2, 'resource': 'machine', 'module':", "int, module: str, type: str, step: str, sub_step: str, resource:", "resource: str = 'human', ): def step(self): return ['qc', 'auto_qc',", "'human', 'module': 'keyed_data', 'step': 'qc', 'sub_step': None, 'process_key': 2, 'resource':", "None, 'process_key': 2, 'resource': 'machine', 'module': 'keyed_data', 'step': 'transform', 'sub_step':", "doc_set_id: str, last_modified_time_key: int, last_modified_date_key: int, user_name: str = None,", "str = None, process_key: int, field_name: str, field_value: str =", "self.user_name = user_name self.process_key = process_key self.field_name = field_name self.field_value", "= { 'process_key': 1, 'resource': 'human', 'module': 'keyed_data', 'step': 'qc',", "user_name self.process_key = process_key self.field_name = field_name self.field_value = field_value", "'step': 'qc', 'sub_step': None, 'process_key': 2, 'resource': 'machine', 'module': 'keyed_data',", "'qc', 'sub_step': None, 'process_key': 2, 'resource': 'machine', 'module': 'keyed_data', 'step':", "'sub_step': None, } class FactDataExtractionModel: def __init__( self, *kwargs, project_id:", "class DimProcess: def __init__( self, *kwargs, process_key: int, module: str,", "def __init__( self, *kwargs, project_id: str, document_id: str, doc_set_id: str,", "*kwargs, process_key: int, module: str, type: str, step: str, sub_step:", "'auto_qc', 'apr_qc', 'keyer_input'] def example_data(self): data = { 'process_key': 1," ]
[ "if verbose_name is not None: dd.update_field(cls, 'owner', verbose_name=verbose_name) kwargs.update( verbose_name=format_lazy(u\"{}", "_('(type)'))) dd.update_field(cls, 'owner_type', **kwargs) def update_owned_instance(self, controllable): if self.owner: self.owner.update_owned_instance(controllable)", "Translators: will also be concatenated with '(type)' '(object)' owner_label =", "GenericForeignKeyIdField class Controllable(dd.Model): # Translators: will also be concatenated with", "BSD (see file COPYING for details) from builtins import object", "'owner_id', **kwargs) if verbose_name is not None: kwargs.update( verbose_name=format_lazy(u\"{} {}\",", "is not None: kwargs.update( verbose_name=format_lazy(u\"{} {}\", verbose_name, _('(type)'))) dd.update_field(cls, 'owner_type',", "controllable): if self.owner: self.owner.update_owned_instance(controllable) super(Controllable, self).update_owned_instance(controllable) def save(self, *args, **kw):", "super(Controllable, self).save(*args, **kw) if self.owner: self.owner.after_update_owned_instance(self) def controlled_rows(self, model, **kwargs):", "class Controllable(dd.Model): # Translators: will also be concatenated with '(type)'", "owner_type, editable=True, blank=True, null=True, verbose_name=format_lazy(u\"{} {}\", owner_label, _('(object)'))) owner =", "= GenericForeignKey( 'owner_type', 'owner_id', verbose_name=owner_label) @classmethod def update_controller_field(cls, verbose_name=None, **kwargs):", "verbose_name=verbose_name) kwargs.update( verbose_name=format_lazy(u\"{} {}\", verbose_name, _('(object)'))) dd.update_field(cls, 'owner_id', **kwargs) if", "def update_controller_field(cls, verbose_name=None, **kwargs): if verbose_name is not None: dd.update_field(cls,", "**kwargs) if verbose_name is not None: kwargs.update( verbose_name=format_lazy(u\"{} {}\", verbose_name,", "**kw): if settings.SITE.loading_from_dump: super(Controllable, self).save(*args, **kw) else: if self.owner: self.owner.update_owned_instance(self)", "model, **kwargs): gfk = self._meta.get_field('owner') kwargs = gfk2lookup(gfk, self, **kwargs)", "update_owned_instance(self, controllable): if self.owner: self.owner.update_owned_instance(controllable) super(Controllable, self).update_owned_instance(controllable) def save(self, *args,", "2010-2018 Rumma & Ko Ltd # License: BSD (see file", "owner_label, _('(object)'))) owner = GenericForeignKey( 'owner_type', 'owner_id', verbose_name=owner_label) @classmethod def", "from django.utils.text import format_lazy from lino.api import dd from lino.core.gfks", "kwargs.update( verbose_name=format_lazy(u\"{} {}\", verbose_name, _('(object)'))) dd.update_field(cls, 'owner_id', **kwargs) if verbose_name", "is not None: dd.update_field(cls, 'owner', verbose_name=verbose_name) kwargs.update( verbose_name=format_lazy(u\"{} {}\", verbose_name,", "gfk2lookup from .fields import GenericForeignKey, GenericForeignKeyIdField class Controllable(dd.Model): # Translators:", "controlled_rows(self, model, **kwargs): gfk = self._meta.get_field('owner') kwargs = gfk2lookup(gfk, self,", "verbose_name=format_lazy(u\"{} {}\", verbose_name, _('(object)'))) dd.update_field(cls, 'owner_id', **kwargs) if verbose_name is", "ugettext_lazy as _ from django.utils.text import format_lazy from lino.api import", "'owner_id', verbose_name=owner_label) @classmethod def update_controller_field(cls, verbose_name=None, **kwargs): if verbose_name is", "lino.core.gfks import gfk2lookup from .fields import GenericForeignKey, GenericForeignKeyIdField class Controllable(dd.Model):", "(see file COPYING for details) from builtins import object from", "owner_label, _('(type)'))) owner_id = GenericForeignKeyIdField( owner_type, editable=True, blank=True, null=True, verbose_name=format_lazy(u\"{}", "dd.update_field(cls, 'owner_id', **kwargs) if verbose_name is not None: kwargs.update( verbose_name=format_lazy(u\"{}", "if settings.SITE.loading_from_dump: super(Controllable, self).save(*args, **kw) else: if self.owner: self.owner.update_owned_instance(self) super(Controllable,", "import gfk2lookup from .fields import GenericForeignKey, GenericForeignKeyIdField class Controllable(dd.Model): #", "settings from django.utils.translation import ugettext_lazy as _ from django.utils.text import", "& Ko Ltd # License: BSD (see file COPYING for", "# License: BSD (see file COPYING for details) from builtins", "verbose_name is not None: dd.update_field(cls, 'owner', verbose_name=verbose_name) kwargs.update( verbose_name=format_lazy(u\"{} {}\",", "from django.utils.translation import ugettext_lazy as _ from django.utils.text import format_lazy", "format_lazy from lino.api import dd from lino.core.gfks import gfk2lookup from", "from django.conf import settings from django.utils.translation import ugettext_lazy as _", "GenericForeignKeyIdField( owner_type, editable=True, blank=True, null=True, verbose_name=format_lazy(u\"{} {}\", owner_label, _('(object)'))) owner", "by') controller_is_optional = True class Meta(object): abstract = True owner_type", "from builtins import object from django.contrib.contenttypes.models import * from django.conf", "self.owner.update_owned_instance(controllable) super(Controllable, self).update_owned_instance(controllable) def save(self, *args, **kw): if settings.SITE.loading_from_dump: super(Controllable,", "will also be concatenated with '(type)' '(object)' owner_label = _('Controlled", "self.owner.update_owned_instance(self) super(Controllable, self).save(*args, **kw) if self.owner: self.owner.after_update_owned_instance(self) def controlled_rows(self, model,", "editable=True, blank=True, null=True, verbose_name=format_lazy(u\"{} {}\", owner_label, _('(type)'))) owner_id = GenericForeignKeyIdField(", "= GenericForeignKeyIdField( owner_type, editable=True, blank=True, null=True, verbose_name=format_lazy(u\"{} {}\", owner_label, _('(object)')))", "*args, **kw): if settings.SITE.loading_from_dump: super(Controllable, self).save(*args, **kw) else: if self.owner:", "**kwargs) def update_owned_instance(self, controllable): if self.owner: self.owner.update_owned_instance(controllable) super(Controllable, self).update_owned_instance(controllable) def", "def controlled_rows(self, model, **kwargs): gfk = self._meta.get_field('owner') kwargs = gfk2lookup(gfk,", "blank=True, null=True, verbose_name=format_lazy(u\"{} {}\", owner_label, _('(object)'))) owner = GenericForeignKey( 'owner_type',", "COPYING for details) from builtins import object from django.contrib.contenttypes.models import", "import dd from lino.core.gfks import gfk2lookup from .fields import GenericForeignKey,", "blank=True, null=True, verbose_name=format_lazy(u\"{} {}\", owner_label, _('(type)'))) owner_id = GenericForeignKeyIdField( owner_type,", "'owner', verbose_name=verbose_name) kwargs.update( verbose_name=format_lazy(u\"{} {}\", verbose_name, _('(object)'))) dd.update_field(cls, 'owner_id', **kwargs)", "abstract = True owner_type = dd.ForeignKey( ContentType, editable=True, blank=True, null=True,", "dd from lino.core.gfks import gfk2lookup from .fields import GenericForeignKey, GenericForeignKeyIdField", "from .fields import GenericForeignKey, GenericForeignKeyIdField class Controllable(dd.Model): # Translators: will", "save(self, *args, **kw): if settings.SITE.loading_from_dump: super(Controllable, self).save(*args, **kw) else: if", "GenericForeignKey( 'owner_type', 'owner_id', verbose_name=owner_label) @classmethod def update_controller_field(cls, verbose_name=None, **kwargs): if", "_('(object)'))) dd.update_field(cls, 'owner_id', **kwargs) if verbose_name is not None: kwargs.update(", "UTF-8 -*- # Copyright 2010-2018 Rumma & Ko Ltd #", "'owner_type', 'owner_id', verbose_name=owner_label) @classmethod def update_controller_field(cls, verbose_name=None, **kwargs): if verbose_name", "None: dd.update_field(cls, 'owner', verbose_name=verbose_name) kwargs.update( verbose_name=format_lazy(u\"{} {}\", verbose_name, _('(object)'))) dd.update_field(cls,", "as _ from django.utils.text import format_lazy from lino.api import dd", "= True class Meta(object): abstract = True owner_type = dd.ForeignKey(", "from django.contrib.contenttypes.models import * from django.conf import settings from django.utils.translation", "django.conf import settings from django.utils.translation import ugettext_lazy as _ from", "{}\", owner_label, _('(object)'))) owner = GenericForeignKey( 'owner_type', 'owner_id', verbose_name=owner_label) @classmethod", "{}\", owner_label, _('(type)'))) owner_id = GenericForeignKeyIdField( owner_type, editable=True, blank=True, null=True,", "True owner_type = dd.ForeignKey( ContentType, editable=True, blank=True, null=True, verbose_name=format_lazy(u\"{} {}\",", "owner = GenericForeignKey( 'owner_type', 'owner_id', verbose_name=owner_label) @classmethod def update_controller_field(cls, verbose_name=None,", "= _('Controlled by') controller_is_optional = True class Meta(object): abstract =", "Copyright 2010-2018 Rumma & Ko Ltd # License: BSD (see", "= dd.ForeignKey( ContentType, editable=True, blank=True, null=True, verbose_name=format_lazy(u\"{} {}\", owner_label, _('(type)')))", "self).save(*args, **kw) if self.owner: self.owner.after_update_owned_instance(self) def controlled_rows(self, model, **kwargs): gfk", "editable=True, blank=True, null=True, verbose_name=format_lazy(u\"{} {}\", owner_label, _('(object)'))) owner = GenericForeignKey(", "controller_is_optional = True class Meta(object): abstract = True owner_type =", "django.utils.text import format_lazy from lino.api import dd from lino.core.gfks import", "GenericForeignKey, GenericForeignKeyIdField class Controllable(dd.Model): # Translators: will also be concatenated", "object from django.contrib.contenttypes.models import * from django.conf import settings from", "class Meta(object): abstract = True owner_type = dd.ForeignKey( ContentType, editable=True,", "# -*- coding: UTF-8 -*- # Copyright 2010-2018 Rumma &", "# Translators: will also be concatenated with '(type)' '(object)' owner_label", "null=True, verbose_name=format_lazy(u\"{} {}\", owner_label, _('(type)'))) owner_id = GenericForeignKeyIdField( owner_type, editable=True,", "True class Meta(object): abstract = True owner_type = dd.ForeignKey( ContentType,", "owner_label = _('Controlled by') controller_is_optional = True class Meta(object): abstract", "_('(object)'))) owner = GenericForeignKey( 'owner_type', 'owner_id', verbose_name=owner_label) @classmethod def update_controller_field(cls,", "from lino.api import dd from lino.core.gfks import gfk2lookup from .fields", "verbose_name=format_lazy(u\"{} {}\", owner_label, _('(object)'))) owner = GenericForeignKey( 'owner_type', 'owner_id', verbose_name=owner_label)", "if self.owner: self.owner.update_owned_instance(controllable) super(Controllable, self).update_owned_instance(controllable) def save(self, *args, **kw): if", "with '(type)' '(object)' owner_label = _('Controlled by') controller_is_optional = True", "self.owner: self.owner.update_owned_instance(controllable) super(Controllable, self).update_owned_instance(controllable) def save(self, *args, **kw): if settings.SITE.loading_from_dump:", "Controllable(dd.Model): # Translators: will also be concatenated with '(type)' '(object)'", "super(Controllable, self).save(*args, **kw) else: if self.owner: self.owner.update_owned_instance(self) super(Controllable, self).save(*args, **kw)", "lino.api import dd from lino.core.gfks import gfk2lookup from .fields import", "Meta(object): abstract = True owner_type = dd.ForeignKey( ContentType, editable=True, blank=True,", "verbose_name=format_lazy(u\"{} {}\", verbose_name, _('(type)'))) dd.update_field(cls, 'owner_type', **kwargs) def update_owned_instance(self, controllable):", "# Copyright 2010-2018 Rumma & Ko Ltd # License: BSD", "settings.SITE.loading_from_dump: super(Controllable, self).save(*args, **kw) else: if self.owner: self.owner.update_owned_instance(self) super(Controllable, self).save(*args,", "'(object)' owner_label = _('Controlled by') controller_is_optional = True class Meta(object):", "**kwargs): if verbose_name is not None: dd.update_field(cls, 'owner', verbose_name=verbose_name) kwargs.update(", "Ltd # License: BSD (see file COPYING for details) from", "self.owner: self.owner.update_owned_instance(self) super(Controllable, self).save(*args, **kw) if self.owner: self.owner.after_update_owned_instance(self) def controlled_rows(self,", "coding: UTF-8 -*- # Copyright 2010-2018 Rumma & Ko Ltd", "file COPYING for details) from builtins import object from django.contrib.contenttypes.models", "import format_lazy from lino.api import dd from lino.core.gfks import gfk2lookup", "import * from django.conf import settings from django.utils.translation import ugettext_lazy", "'owner_type', **kwargs) def update_owned_instance(self, controllable): if self.owner: self.owner.update_owned_instance(controllable) super(Controllable, self).update_owned_instance(controllable)", "def save(self, *args, **kw): if settings.SITE.loading_from_dump: super(Controllable, self).save(*args, **kw) else:", "@classmethod def update_controller_field(cls, verbose_name=None, **kwargs): if verbose_name is not None:", "_ from django.utils.text import format_lazy from lino.api import dd from", "self.owner: self.owner.after_update_owned_instance(self) def controlled_rows(self, model, **kwargs): gfk = self._meta.get_field('owner') kwargs", "_('(type)'))) owner_id = GenericForeignKeyIdField( owner_type, editable=True, blank=True, null=True, verbose_name=format_lazy(u\"{} {}\",", "import ugettext_lazy as _ from django.utils.text import format_lazy from lino.api", "verbose_name=owner_label) @classmethod def update_controller_field(cls, verbose_name=None, **kwargs): if verbose_name is not", "details) from builtins import object from django.contrib.contenttypes.models import * from", "owner_id = GenericForeignKeyIdField( owner_type, editable=True, blank=True, null=True, verbose_name=format_lazy(u\"{} {}\", owner_label,", "null=True, verbose_name=format_lazy(u\"{} {}\", owner_label, _('(object)'))) owner = GenericForeignKey( 'owner_type', 'owner_id',", "owner_type = dd.ForeignKey( ContentType, editable=True, blank=True, null=True, verbose_name=format_lazy(u\"{} {}\", owner_label,", "-*- # Copyright 2010-2018 Rumma & Ko Ltd # License:", "**kw) else: if self.owner: self.owner.update_owned_instance(self) super(Controllable, self).save(*args, **kw) if self.owner:", "'(type)' '(object)' owner_label = _('Controlled by') controller_is_optional = True class", "verbose_name is not None: kwargs.update( verbose_name=format_lazy(u\"{} {}\", verbose_name, _('(type)'))) dd.update_field(cls,", "* from django.conf import settings from django.utils.translation import ugettext_lazy as", "import settings from django.utils.translation import ugettext_lazy as _ from django.utils.text", "{}\", verbose_name, _('(type)'))) dd.update_field(cls, 'owner_type', **kwargs) def update_owned_instance(self, controllable): if", "django.contrib.contenttypes.models import * from django.conf import settings from django.utils.translation import", "verbose_name, _('(type)'))) dd.update_field(cls, 'owner_type', **kwargs) def update_owned_instance(self, controllable): if self.owner:", "import object from django.contrib.contenttypes.models import * from django.conf import settings", "for details) from builtins import object from django.contrib.contenttypes.models import *", "not None: dd.update_field(cls, 'owner', verbose_name=verbose_name) kwargs.update( verbose_name=format_lazy(u\"{} {}\", verbose_name, _('(object)')))", "kwargs.update( verbose_name=format_lazy(u\"{} {}\", verbose_name, _('(type)'))) dd.update_field(cls, 'owner_type', **kwargs) def update_owned_instance(self,", "gfk = self._meta.get_field('owner') kwargs = gfk2lookup(gfk, self, **kwargs) return model.objects.filter(**kwargs)", "also be concatenated with '(type)' '(object)' owner_label = _('Controlled by')", "verbose_name=None, **kwargs): if verbose_name is not None: dd.update_field(cls, 'owner', verbose_name=verbose_name)", "= True owner_type = dd.ForeignKey( ContentType, editable=True, blank=True, null=True, verbose_name=format_lazy(u\"{}", "if verbose_name is not None: kwargs.update( verbose_name=format_lazy(u\"{} {}\", verbose_name, _('(type)')))", "Rumma & Ko Ltd # License: BSD (see file COPYING", "update_controller_field(cls, verbose_name=None, **kwargs): if verbose_name is not None: dd.update_field(cls, 'owner',", "License: BSD (see file COPYING for details) from builtins import", "import GenericForeignKey, GenericForeignKeyIdField class Controllable(dd.Model): # Translators: will also be", "self).update_owned_instance(controllable) def save(self, *args, **kw): if settings.SITE.loading_from_dump: super(Controllable, self).save(*args, **kw)", "Ko Ltd # License: BSD (see file COPYING for details)", "def update_owned_instance(self, controllable): if self.owner: self.owner.update_owned_instance(controllable) super(Controllable, self).update_owned_instance(controllable) def save(self,", "**kw) if self.owner: self.owner.after_update_owned_instance(self) def controlled_rows(self, model, **kwargs): gfk =", "django.utils.translation import ugettext_lazy as _ from django.utils.text import format_lazy from", "self).save(*args, **kw) else: if self.owner: self.owner.update_owned_instance(self) super(Controllable, self).save(*args, **kw) if", "_('Controlled by') controller_is_optional = True class Meta(object): abstract = True", "verbose_name, _('(object)'))) dd.update_field(cls, 'owner_id', **kwargs) if verbose_name is not None:", "dd.update_field(cls, 'owner_type', **kwargs) def update_owned_instance(self, controllable): if self.owner: self.owner.update_owned_instance(controllable) super(Controllable,", "if self.owner: self.owner.update_owned_instance(self) super(Controllable, self).save(*args, **kw) if self.owner: self.owner.after_update_owned_instance(self) def", "**kwargs): gfk = self._meta.get_field('owner') kwargs = gfk2lookup(gfk, self, **kwargs) return", "dd.ForeignKey( ContentType, editable=True, blank=True, null=True, verbose_name=format_lazy(u\"{} {}\", owner_label, _('(type)'))) owner_id", "verbose_name=format_lazy(u\"{} {}\", owner_label, _('(type)'))) owner_id = GenericForeignKeyIdField( owner_type, editable=True, blank=True,", "else: if self.owner: self.owner.update_owned_instance(self) super(Controllable, self).save(*args, **kw) if self.owner: self.owner.after_update_owned_instance(self)", "None: kwargs.update( verbose_name=format_lazy(u\"{} {}\", verbose_name, _('(type)'))) dd.update_field(cls, 'owner_type', **kwargs) def", "{}\", verbose_name, _('(object)'))) dd.update_field(cls, 'owner_id', **kwargs) if verbose_name is not", "from lino.core.gfks import gfk2lookup from .fields import GenericForeignKey, GenericForeignKeyIdField class", "if self.owner: self.owner.after_update_owned_instance(self) def controlled_rows(self, model, **kwargs): gfk = self._meta.get_field('owner')", "-*- coding: UTF-8 -*- # Copyright 2010-2018 Rumma & Ko", "dd.update_field(cls, 'owner', verbose_name=verbose_name) kwargs.update( verbose_name=format_lazy(u\"{} {}\", verbose_name, _('(object)'))) dd.update_field(cls, 'owner_id',", "be concatenated with '(type)' '(object)' owner_label = _('Controlled by') controller_is_optional", "concatenated with '(type)' '(object)' owner_label = _('Controlled by') controller_is_optional =", "ContentType, editable=True, blank=True, null=True, verbose_name=format_lazy(u\"{} {}\", owner_label, _('(type)'))) owner_id =", "not None: kwargs.update( verbose_name=format_lazy(u\"{} {}\", verbose_name, _('(type)'))) dd.update_field(cls, 'owner_type', **kwargs)", "super(Controllable, self).update_owned_instance(controllable) def save(self, *args, **kw): if settings.SITE.loading_from_dump: super(Controllable, self).save(*args,", ".fields import GenericForeignKey, GenericForeignKeyIdField class Controllable(dd.Model): # Translators: will also", "self.owner.after_update_owned_instance(self) def controlled_rows(self, model, **kwargs): gfk = self._meta.get_field('owner') kwargs =", "builtins import object from django.contrib.contenttypes.models import * from django.conf import" ]
[ "''' TERS İSE TEKRAR BOLGELERİ BUL ''' if ret==True: warp2_gri=cv2.cvtColor(warp2,cv2.COLOR_BGR2GRAY)", "imutils import contours from imutils.perspective import four_point_transform import imutils import", "pts2=np.array([(300,50),(600,50),(302,1545),(602,1545)]) pts3=np.array([(600,50),(900,50),(602,1545),(902,1545)]) pts4=np.array([(900,50),(1200,50),(902,1545),(1202,1545)]) col1=four_point_transform(cevap,pts1) col2=four_point_transform(cevap,pts2) col3=four_point_transform(cevap,pts3) col4=four_point_transform(cevap,pts4) return col1,col2,col3,col4 def", "if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if cevap[0]>500: yanit+=str(cevap[1])", "kagit=imutils.rotate(kagit,angle=180) print(\"Kağıdı ters koymuşsunuz,çevrildi\") ret=True return ret,kagit else: return ret,kagit", "col1_gri,col2_gri,col3_gri,col4_gri def cevap_contour(col1,col2,col3,col4): col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord=cevap_contour_bul(col1,col1_gri) col2_coord=cevap_contour_bul(col2,col1_gri) col3_coord=cevap_contour_bul(col3,col1_gri) col4_coord=cevap_contour_bul(col4,col1_gri) return col1_coord,col2_coord,col3_coord,col4_coord", "in array: if koordinat==c[0] or abs(koordinat-c[0])<15: return True #Tekrar var", "imutils import cv2 import matplotlib.pyplot as plt import numpy as", "M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) res=tekrar_bul(x_coords,x) if res is False and abs(x_coords[-1][1]-y)<35:", "contour_cizdir(ogrno_bos_gri,ogrno_coord,\"ogrenci numarası\") #v2.imshow(\"ogrno\",imutils.resize(ogrno_bos,height=400)) ''' DIVIDE ANSWER PART INTO 4 SLICES", "soru_tur=sorugrup_islemleri(sorugrup_dolu,sorugrup_dolu_gri,soru_cont) print(\"Soru Grubu\",soru_tur) thresh_dolu=cv2.threshold(isim_dolu_gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] isim_str=isim_islemleri(isim_dolu_gri,coord_isim,thresh_dolu) print(isim_str) sinav_dolu=four_point_transform(warp2,bolgeler2['sinav_turu']) sinav_dolu_gri=cv2.cvtColor(sinav_dolu,cv2.COLOR_BGR2GRAY) sinav_turu=sinav_islemleri(sinav_dolu,sinav_dolu_gri,sinav_coord) print(\"Sınav", "coord_isim, thres=contour_bul(isim_bos, isim_bos_gri) #contour_cizdir(isim_bos,coord,\"isim_bos\") #cevap_islemleri(cevap_bos_gri,coord) ############################################## resim=cv2.imread(dolu_kagit) resim_gri=cv2.cvtColor(resim,cv2.COLOR_BGR2GRAY) warp2,warp2_gri=kagit_bul(resim,resim_gri) bolgeler2,areas2=bolge_bul(warp2,warp2_gri)", "warp=four_point_transform(image,approx.reshape(4,2)) warp_gri=four_point_transform(gray,approx.reshape(4,2)) return warp,warp_gri def soru_grup_contour_bul(resim,gri): thr2=cv2.threshold(gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] can=cv2.Canny(thr2,50,100) can=cv2.dilate(can,None,iterations=3) coords=[]", "cont: approx=cv2.approxPolyDP(c,0.009*cv2.arcLength(c,True),True) if cv2.contourArea(approx)>10050 and len(approx)==4: a+=1 M=cv2.moments(approx) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00'])", "#plt.imshow(maske,cmap='gray') #plt.show() #a+=1 toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap is None or", "#Tekrar var else: pass return False def contour_bul(isim,isim_gri,karmasiklik=0): coord=[] thr6=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8)", "else: continue return coord,thr6 def contour_cizdir(resim,cont,isim=\"default\"): for c in cont:", "#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) elif abs(x_coords[-1][1]-y)>=35: coord.append(approx) x_coords=[(0,0)] sayac+=1 x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else:", "if cevap[0]>500: yanit.append(alfabe[cevap[1]]) elif cevap[0]<600: yanit.append(\" \") for s in", "toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show()", "#areas.append([a,cv2.contourArea(approx)]) #cv2.putText(resim,\"{}\".format(a),(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=4,color=(255,0,0),thickness=3) temp.append(approx.reshape(4,2)) areas.append([a,cv2.contourArea(approx)]) #cv2.drawContours(resim,[approx],0,(255,0,0),thickness=3) #cv2.imshow(\"resim_olge\",imutils.resize(resim,height=650)) if len(temp)>=5: bolgeler={'isim':temp[0],'ogrno':temp[1],'sinav_turu':temp[2],'soru_grubu':temp[3],'ogretim_onay':temp[4],'cevaplar':temp[5]} areas=sorted(areas,key=lambda", "col1=four_point_transform(cevap,pts1) col2=four_point_transform(cevap,pts2) col3=four_point_transform(cevap,pts3) col4=four_point_transform(cevap,pts4) return col1,col2,col3,col4 def cevap_gri(col1,col2,col3,col4): ''' KOLONLARI", "return coord def ters_bul(kagit,areas): ret=False #print(areas[0][0]) if areas[0][0]!=1 and areas[0][1]+areas[1][1]>2300000:", "import imutils import cv2 import matplotlib.pyplot as plt import numpy", "print(isim_str) sinav_dolu=four_point_transform(warp2,bolgeler2['sinav_turu']) sinav_dolu_gri=cv2.cvtColor(sinav_dolu,cv2.COLOR_BGR2GRAY) sinav_turu=sinav_islemleri(sinav_dolu,sinav_dolu_gri,sinav_coord) print(\"Sınav Türü: \",sinav_turu) ogrno_dolu=four_point_transform(warp2,bolgeler2['ogrno']) ogrno_dolu_gri=cv2.cvtColor(ogrno_dolu,cv2.COLOR_BGR2GRAY) ogrno_islemleri(ogrno_dolu,ogrno_dolu_gri,ogrno_coord)", "coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i) in enumerate(np.arange(0,len(coords),32)): cevap=None cnt=contours.sort_contours(coords[i:i+32],method=\"top-to-bottom\")[0] toplam_beyaz=None for (j,c)", "resim=cv2.imread(dolu_kagit) resim_gri=cv2.cvtColor(resim,cv2.COLOR_BGR2GRAY) warp2,warp2_gri=kagit_bul(resim,resim_gri) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) ret,warp2=ters_bul(warp2,areas2) ''' TERS İSE TEKRAR BOLGELERİ", "sayısı:{basarim[2]}\\nİki cevap işaret:{basarim[3]}\") cv2.waitKey() cv2.destroyAllWindows() if __name__ == '__main__': bos_kagit=\"optic_empty.jpg\"", "box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) # print(x,y) res=tekrar_bul(x_coords,x) if res", "enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) if cevap is", "elif len(cevap_anahtar)<=90: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim=basarim1+basarim2+basarim3 elif len(cevap_anahtar)<=120: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2)", "bolgeler,areas def cevap_islemleri(cevap,coords,col_no=1): iki_cevap=0 bos=0 dogru=0 q_no=0 yanlıs=0 if col_no==1:", "ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) # print(x,y) res=tekrar_bul(x_coords,x)", "coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),20)): cevap=None cnt=contours.sort_contours(coords[i:i+30])[0] toplam_beyaz=None for (j,c)", "thresh=cv2.threshold(sinav_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"left-to-right\")[0] toplam_beyaz=None for", "#plt.show() toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) return", "ogrno_bos=four_point_transform(kagit,bolgeler['ogrno']) ogrno_bos_gri=four_point_transform(kagit_gri,bolgeler['ogrno']) ogrno_coord,ogrno_thresh=contour_bul(ogrno_bos,ogrno_bos_gri) contour_cizdir(ogrno_bos_gri,ogrno_coord,\"ogrenci numarası\") #v2.imshow(\"ogrno\",imutils.resize(ogrno_bos,height=400)) ''' DIVIDE ANSWER PART", "pts3=np.array([(600,50),(900,50),(602,1545),(902,1545)]) pts4=np.array([(900,50),(1200,50),(902,1545),(1202,1545)]) col1=four_point_transform(cevap,pts1) col2=four_point_transform(cevap,pts2) col3=four_point_transform(cevap,pts3) col4=four_point_transform(cevap,pts4) return col1,col2,col3,col4 def cevap_gri(col1,col2,col3,col4):", "thres=contour_bul(isim_bos, isim_bos_gri) #contour_cizdir(isim_bos,coord,\"isim_bos\") #cevap_islemleri(cevap_bos_gri,coord) ############################################## resim=cv2.imread(dolu_kagit) resim_gri=cv2.cvtColor(resim,cv2.COLOR_BGR2GRAY) warp2,warp2_gri=kagit_bul(resim,resim_gri) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) ret,warp2=ters_bul(warp2,areas2)", "col1_gri_dolu,col2_gri_dolu,col3_gri_dolu,col4_gri_dolu=cevap_gri(col1_dolu,col2_dolu,col3_dolu,col4_dolu) #contour_cizdir(col1_dolu,col1_coord,\"colon1 dolu\") if len(cevap_anahtar)<=30: basarim=cevap_islemleri(col1_gri_dolu,col1_coord,1) elif len(cevap_anahtar)<=60: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2)", "if sayac==5: break print(cevap) if cevap[0]>500: return yanit[cevap[1]] #print(\"tespit edilemedi\")", "thresh=cv2.threshold(cevap,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),5)): cevap=None cnt=contours.sort_contours(coords[i:i+5])[0] toplam_beyaz=None say=0", "box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) if cv2.contourArea(box)>150: coords.append(approx) cv2.drawContours(resim,[box],0,(0,0,255),thickness=3) if len(coords)==5: return coords", "PART INTO 4 SLICES AND FIND ONE BY ONE '''", "/ float(h) if area<1500 and area>250 and ar>=0.9 and ar<=1.1:", "import numpy as np from imutils import contours from imutils.perspective", "def main_starter(bos_kagit,dolu_kagit): image=cv2.imread(bos_kagit) gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) kagit,kagit_gri=kagit_bul(image,gray) bolgeler,areas=bolge_bul(kagit,kagit_gri) ''' FIND SCHOOL NUMBER", "abs(x_coords[-1][1]-y)<35: coord.append(approx) x_coords.append((x,y)) sayac+=1 #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) elif abs(x_coords[-1][1]-y)>=35: coord.append(approx) x_coords=[(0,0)]", "(s,i) in enumerate(np.arange(0,len(coords),32)): cevap=None cnt=contours.sort_contours(coords[i:i+32],method=\"top-to-bottom\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt):", "coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"top-to-bottom\")[0] toplam_beyaz=None for (j,c)", "cevap işaret:{basarim[3]}\") cv2.waitKey() cv2.destroyAllWindows() if __name__ == '__main__': bos_kagit=\"optic_empty.jpg\" dolu_kagit=\"optic_marked.jpg\"", "break print(cevap) if cevap[0]>500: return yanit[cevap[1]] #print(\"tespit edilemedi\") return \"Tespit", "x_coords=[(0,0)] sayac=0 cont=imutils.grab_contours(cont) cont=contours.sort_contours(cont,method=\"top-to-bottom\")[0] for c in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx)", "len(approx)==4: #cv2.drawContours(image,[approx],0,(0,255,0),thickness=3) break warp=four_point_transform(image,approx.reshape(4,2)) warp_gri=four_point_transform(gray,approx.reshape(4,2)) return warp,warp_gri def soru_grup_contour_bul(resim,gri): thr2=cv2.threshold(gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1]", "for c in array: if koordinat==c[0] or abs(koordinat-c[0])<15: return True", "print(\"Okul Numarası:\",yanit) def sinav_islemleri(sinav,sinav_gri,coords): yanit=[\"QUİZ\",\"ARA\",\"FİNAL\",\"BÜTÜNLEME\"] thresh=cv2.threshold(sinav_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in", "areas.append([a,cv2.contourArea(approx)]) #cv2.drawContours(resim,[approx],0,(255,0,0),thickness=3) #cv2.imshow(\"resim_olge\",imutils.resize(resim,height=650)) if len(temp)>=5: bolgeler={'isim':temp[0],'ogrno':temp[1],'sinav_turu':temp[2],'soru_grubu':temp[3],'ogretim_onay':temp[4],'cevaplar':temp[5]} areas=sorted(areas,key=lambda x:x[1],reverse=True) return bolgeler,areas", "else: return ret,kagit def kagit_bul(image,gray): thr=cv2.threshold(gray,150,255,cv2.THRESH_BINARY)[1] contour=cv2.findContours(thr,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) contour=imutils.grab_contours(contour) contour=sorted(contour,key=cv2.contourArea,reverse=True) for", "''' return(dogru,yanlıs,bos,iki_cevap) def isim_islemleri(isim,coords,thresh): a=0 yanit=[] ad_str=\"\" coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i)", "ogret_onay=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogret_cont) print(\"Öğretim Onayı:\",ogret_onay) #cv2.drawContours(ogretim_dolu,ogret_cont,-1,(255,0,0),thickness=3) #cv2.imshow(\"ogretc\",ogretim_dolu) #ogretim_onayı=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogretimonay_coord) sorugrup_dolu=four_point_transform(warp2,bolgeler2['soru_grubu']) sorugrup_dolu_gri=cv2.cvtColor(sorugrup_dolu,cv2.COLOR_BGR2GRAY) soru_tur=sorugrup_islemleri(sorugrup_dolu,sorugrup_dolu_gri,soru_cont) print(\"Soru", "M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) # print(x,y) res=tekrar_bul(x_coords,x) if res is False", "sinav_dolu_gri=cv2.cvtColor(sinav_dolu,cv2.COLOR_BGR2GRAY) sinav_turu=sinav_islemleri(sinav_dolu,sinav_dolu_gri,sinav_coord) print(\"Sınav Türü: \",sinav_turu) ogrno_dolu=four_point_transform(warp2,bolgeler2['ogrno']) ogrno_dolu_gri=cv2.cvtColor(ogrno_dolu,cv2.COLOR_BGR2GRAY) ogrno_islemleri(ogrno_dolu,ogrno_dolu_gri,ogrno_coord) cevap_dolu=four_point_transform(warp2,bolgeler2['cevaplar']) cevap_dolu_gri=cv2.cvtColor(cevap_dolu,cv2.COLOR_BGR2GRAY)", "contour=contours.sort_contours(contour,method=\"top-to-bottom\")[0] for c in contour: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w,", "toplam_beyaz>800: say+=1 if say>1: #İKİ ŞIK İŞARETLEME DURUMU iki_cevap+=1 continue", "FIND SCHOOL NUMBER PART ''' ogrno_bos=four_point_transform(kagit,bolgeler['ogrno']) ogrno_bos_gri=four_point_transform(kagit_gri,bolgeler['ogrno']) ogrno_coord,ogrno_thresh=contour_bul(ogrno_bos,ogrno_bos_gri) contour_cizdir(ogrno_bos_gri,ogrno_coord,\"ogrenci numarası\")", "coord=[] thr6=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) #thr6=cv2.threshold(isim_gri,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1] ar_value=200 #if karmasiklik==1: # ar_value=800 cont=cv2.findContours(thr6,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)]", "sayac=0 thresh=cv2.threshold(soru_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"left-to-right\")[0] toplam_beyaz=None", "return ad_str def cevap_kolon(cevap): pts1=np.array([(2,50),(300,50),(2,1545),(300,1545)]) pts2=np.array([(300,50),(600,50),(302,1545),(602,1545)]) pts3=np.array([(600,50),(900,50),(602,1545),(902,1545)]) pts4=np.array([(900,50),(1200,50),(902,1545),(1202,1545)]) col1=four_point_transform(cevap,pts1) col2=four_point_transform(cevap,pts2)", "#cv2.putText(resim,\"{}\".format(a),(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=4,color=(255,0,0),thickness=3) temp.append(approx.reshape(4,2)) areas.append([a,cv2.contourArea(approx)]) #cv2.drawContours(resim,[approx],0,(255,0,0),thickness=3) #cv2.imshow(\"resim_olge\",imutils.resize(resim,height=650)) if len(temp)>=5: bolgeler={'isim':temp[0],'ogrno':temp[1],'sinav_turu':temp[2],'soru_grubu':temp[3],'ogretim_onay':temp[4],'cevaplar':temp[5]} areas=sorted(areas,key=lambda x:x[1],reverse=True)", "if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) def cevap_contour_bul(isim,isim_gri): coord=[]", "ogret_cont,ogret_thr=contour_bul(ogretim_bos,ogretim_bos_gri,1) ''' NAME FIND PART. ''' isim_bos=four_point_transform(kagit,bolgeler['isim']) isim_bos_gri=cv2.cvtColor(isim_bos,cv2.COLOR_BGR2GRAY) coord_isim, thres=contour_bul(isim_bos,", "col2_coord=cevap_contour_bul(col2,col1_gri) col3_coord=cevap_contour_bul(col3,col1_gri) col4_coord=cevap_contour_bul(col4,col1_gri) return col1_coord,col2_coord,col3_coord,col4_coord def ogrno_islemleri(ogrno,ogrno_gri,coords): yanit=\"\" thresh=cv2.threshold(ogrno_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"left-to-right\")[0]", "OF TRUE,FALSE,NOT MARKED AND MARKED MORE THAN 1 ''' return(dogru,yanlıs,bos,iki_cevap)", "if len(cevap_anahtar)<=q_no+s: return (dogru,yanlıs,bos,iki_cevap) maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske)", "x_coords.append((x,y)) sayac+=1 #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) elif abs(x_coords[-1][1]-y)>=35: coord.append(approx) x_coords=[(0,0)] sayac+=1 x_coords.append((x,y))", "cnt=contours.sort_contours(coords[i:i+10],method=\"left-to-right\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray')", "and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) if cv2.contourArea(box)>150: coords.append(approx)", "import matplotlib.pyplot as plt import numpy as np from imutils", "İSE TEKRAR BOLGELERİ BUL ''' if ret==True: warp2_gri=cv2.cvtColor(warp2,cv2.COLOR_BGR2GRAY) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) else:", "sinav_turu=sinav_islemleri(sinav_dolu,sinav_dolu_gri,sinav_coord) print(\"Sınav Türü: \",sinav_turu) ogrno_dolu=four_point_transform(warp2,bolgeler2['ogrno']) ogrno_dolu_gri=cv2.cvtColor(ogrno_dolu,cv2.COLOR_BGR2GRAY) ogrno_islemleri(ogrno_dolu,ogrno_dolu_gri,ogrno_coord) cevap_dolu=four_point_transform(warp2,bolgeler2['cevaplar']) cevap_dolu_gri=cv2.cvtColor(cevap_dolu,cv2.COLOR_BGR2GRAY) col1_dolu,col2_dolu,col3_dolu,col4_dolu=cevap_kolon(cevap_dolu)", "y, w, h) = cv2.boundingRect(approx) ar = w / float(h)", "len(cevap_anahtar)<=30: basarim=cevap_islemleri(col1_gri_dolu,col1_coord,1) elif len(cevap_anahtar)<=60: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim=(basarim1[0]+basarim2[0],basarim1[1]+basarim2[1],basarim1[2]+basarim2[2],basarim1[3]+basarim2[3]) #print(basarim) elif len(cevap_anahtar)<=90:", "contour_cizdir(resim,cont,isim=\"default\"): for c in cont: cv2.drawContours(resim,[c],0,(0,255,0),thickness=4) #print(f\"Bulunan contour sayısı: {len(cont)}\")", "from imutils.perspective import four_point_transform import imutils import cv2 import matplotlib.pyplot", "basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim4=cevap_islemleri(col4_gri_dolu,col4_coord,4) basarim=basarim1+basarim2+basarim3+basarim4 print(f\"Doğru cevap sayısı:{basarim[0]}\\nYanlış cevap sayısı:{basarim[1]}\\nBoş sayısı:{basarim[2]}\\nİki", "#v2.imshow(\"ogrno\",imutils.resize(ogrno_bos,height=400)) ''' DIVIDE ANSWER PART INTO 4 SLICES AND FIND", "abs(x_coords[-1][1]-y)>=35: coord.append(approx) x_coords=[(0,0)] sayac+=1 x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return", "toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if sayac==5: break print(cevap) if cevap[0]>500: return yanit[cevap[1]]", "yanlıs+=1 ''' NUMBER OF TRUE,FALSE,NOT MARKED AND MARKED MORE THAN", "col3=four_point_transform(cevap,pts3) col4=four_point_transform(cevap,pts4) return col1,col2,col3,col4 def cevap_gri(col1,col2,col3,col4): ''' KOLONLARI GRİ YAPMAK", "contours from imutils.perspective import four_point_transform import imutils import cv2 import", "maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap is", "ogrno_dolu=four_point_transform(warp2,bolgeler2['ogrno']) ogrno_dolu_gri=cv2.cvtColor(ogrno_dolu,cv2.COLOR_BGR2GRAY) ogrno_islemleri(ogrno_dolu,ogrno_dolu_gri,ogrno_coord) cevap_dolu=four_point_transform(warp2,bolgeler2['cevaplar']) cevap_dolu_gri=cv2.cvtColor(cevap_dolu,cv2.COLOR_BGR2GRAY) col1_dolu,col2_dolu,col3_dolu,col4_dolu=cevap_kolon(cevap_dolu) col1_gri_dolu,col2_gri_dolu,col3_gri_dolu,col4_gri_dolu=cevap_gri(col1_dolu,col2_dolu,col3_dolu,col4_dolu) #contour_cizdir(col1_dolu,col1_coord,\"colon1 dolu\") if", "iki_cevap=0 bos=0 dogru=0 q_no=0 yanlıs=0 if col_no==1: pass elif col_no==2:", "sayısı: {len(cont)}\") def bolge_bul(resim,gri): bolgeler={} thr2=cv2.adaptiveThreshold(gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) areas=[] cont=cv2.findContours(thr2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) temp=[]", "None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) def cevap_contour_bul(isim,isim_gri): coord=[] thresholded=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) contour=cv2.findContours(thresholded,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)]", "for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske)", "print(\"Kağıdı ters koymuşsunuz,çevrildi\") ret=True return ret,kagit else: return ret,kagit def", "toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) return yanit[cevap[1]]", "enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) #plt.imshow(maske,cmap='gray') #plt.show() #a+=1 toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if", "for (s,i) in enumerate(np.arange(0,len(coords),5)): cevap=None cnt=contours.sort_contours(coords[i:i+5])[0] toplam_beyaz=None say=0 for (j,c)", "is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) return yanit[cevap[1]] def sorugrup_islemleri(soru,soru_gri,coords): yanit=[\"A\",\"B\",\"C\",\"D\",\"E\"]", "h) = cv2.boundingRect(approx) ar = w / float(h) if area<1500", "w / float(h) if area<1500 and area>250 and ar>=0.9 and", "sayac+=1 #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) elif abs(x_coords[-1][1]-y)>=35: coord.append(approx) x_coords=[(0,0)] sayac+=1 x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3)", "for (j,c) in enumerate(cnt): if len(cevap_anahtar)<=q_no+s: return (dogru,yanlıs,bos,iki_cevap) maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)", "cevap[1]: #print(cevap_anahtar[q_no+s],cevap[1]) dogru+=1 else: yanlıs+=1 ''' NUMBER OF TRUE,FALSE,NOT MARKED", "enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() sayac+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap", "PART ''' ogrno_bos=four_point_transform(kagit,bolgeler['ogrno']) ogrno_bos_gri=four_point_transform(kagit_gri,bolgeler['ogrno']) ogrno_coord,ogrno_thresh=contour_bul(ogrno_bos,ogrno_bos_gri) contour_cizdir(ogrno_bos_gri,ogrno_coord,\"ogrenci numarası\") #v2.imshow(\"ogrno\",imutils.resize(ogrno_bos,height=400)) ''' DIVIDE", "area<1300 and area>300 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int)", "#print(basarim) elif len(cevap_anahtar)<=90: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim=basarim1+basarim2+basarim3 elif len(cevap_anahtar)<=120: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1)", "box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) # print(x,y) res=tekrar_bul(x_coords,x) if", "col1_coord,col2_coord,col3_coord,col4_coord def ogrno_islemleri(ogrno,ogrno_gri,coords): yanit=\"\" thresh=cv2.threshold(ogrno_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)):", "def ogrno_islemleri(ogrno,ogrno_gri,coords): yanit=\"\" thresh=cv2.threshold(ogrno_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None", "4 SLICES AND FIND ONE BY ONE ''' cevap_bos=four_point_transform(kagit,bolgeler['cevaplar']) cevap_bos_gri=four_point_transform(kagit_gri,bolgeler['cevaplar'])", "for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"left-to-right\")[0] toplam_beyaz=None for (j,c) in", "return False def contour_bul(isim,isim_gri,karmasiklik=0): coord=[] thr6=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) #thr6=cv2.threshold(isim_gri,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1] ar_value=200 #if karmasiklik==1:", "NUMBER PART ''' ogrno_bos=four_point_transform(kagit,bolgeler['ogrno']) ogrno_bos_gri=four_point_transform(kagit_gri,bolgeler['ogrno']) ogrno_coord,ogrno_thresh=contour_bul(ogrno_bos,ogrno_bos_gri) contour_cizdir(ogrno_bos_gri,ogrno_coord,\"ogrenci numarası\") #v2.imshow(\"ogrno\",imutils.resize(ogrno_bos,height=400)) '''", "x:x[1],reverse=True) return bolgeler,areas def cevap_islemleri(cevap,coords,col_no=1): iki_cevap=0 bos=0 dogru=0 q_no=0 yanlıs=0", "plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s)", "cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if cevap[0]>500: yanit+=str(cevap[1]) print(\"Okul", "toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,q_no+s) if", "KOLONLARI GRİ YAPMAK İÇİN,MAİNDE YER KAPLAMASIN ''' col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY) col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY) col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY)", "thresholded=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) contour=cv2.findContours(thresholded,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 contour=imutils.grab_contours(contour) contour=contours.sort_contours(contour,method=\"top-to-bottom\")[0] for c in contour:", "enumerate(np.arange(0,len(coords),20)): cevap=None cnt=contours.sort_contours(coords[i:i+30])[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)", "#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return coord,thr6 def contour_cizdir(resim,cont,isim=\"default\"): for c", "PAPER ''' sorugrup_bos=four_point_transform(kagit,bolgeler['soru_grubu']) sorugrup_bos_gri=four_point_transform(kagit_gri,bolgeler['soru_grubu']) sorugrup_coord,sorugrup_thresh=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) coors=soru_grup_contour_bul(sorugrup_bos,sorugrup_bos_gri) soru_cont,soru_thr=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) ############################### ogretim_bos=four_point_transform(kagit,bolgeler['ogretim_onay']) ogretim_bos_gri=four_point_transform(kagit_gri,bolgeler['ogretim_onay'])", "ad_str def cevap_kolon(cevap): pts1=np.array([(2,50),(300,50),(2,1545),(300,1545)]) pts2=np.array([(300,50),(600,50),(302,1545),(602,1545)]) pts3=np.array([(600,50),(900,50),(602,1545),(902,1545)]) pts4=np.array([(900,50),(1200,50),(902,1545),(1202,1545)]) col1=four_point_transform(cevap,pts1) col2=four_point_transform(cevap,pts2) col3=four_point_transform(cevap,pts3)", "MARKED MORE THAN 1 ''' return(dogru,yanlıs,bos,iki_cevap) def isim_islemleri(isim,coords,thresh): a=0 yanit=[]", "#plt.show() #a+=1 toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap is None or toplam_beyaz>cevap[0]:", "BOŞ BIRAKMA DURUMU bos+=1 continue else: if cevap_anahtar[q_no+s]== cevap[1]: #print(cevap_anahtar[q_no+s],cevap[1])", "x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return coord def ters_bul(kagit,areas): ret=False", "def sorugrup_islemleri(soru,soru_gri,coords): yanit=[\"A\",\"B\",\"C\",\"D\",\"E\"] sayac=0 thresh=cv2.threshold(soru_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)):", "cevap_dolu=four_point_transform(warp2,bolgeler2['cevaplar']) cevap_dolu_gri=cv2.cvtColor(cevap_dolu,cv2.COLOR_BGR2GRAY) col1_dolu,col2_dolu,col3_dolu,col4_dolu=cevap_kolon(cevap_dolu) col1_gri_dolu,col2_gri_dolu,col3_gri_dolu,col4_gri_dolu=cevap_gri(col1_dolu,col2_dolu,col3_dolu,col4_dolu) #contour_cizdir(col1_dolu,col1_coord,\"colon1 dolu\") if len(cevap_anahtar)<=30: basarim=cevap_islemleri(col1_gri_dolu,col1_coord,1) elif", "ters_bul(kagit,areas): ret=False #print(areas[0][0]) if areas[0][0]!=1 and areas[0][1]+areas[1][1]>2300000: kagit=imutils.rotate(kagit,angle=180) print(\"Kağıdı ters", "basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim=basarim1+basarim2+basarim3 elif len(cevap_anahtar)<=120: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim4=cevap_islemleri(col4_gri_dolu,col4_coord,4)", "box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) res=tekrar_bul(x_coords,x) if res is", "ret==True: warp2_gri=cv2.cvtColor(warp2,cv2.COLOR_BGR2GRAY) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) else: pass isim_dolu=four_point_transform(warp2,bolgeler2['isim']) isim_dolu_gri=cv2.cvtColor(isim_dolu,cv2.COLOR_BGR2GRAY) contour_cizdir(isim_dolu,coord_isim,\"dolu_kagit_contourlu\") ''' OGRETİM", "''' NAME FIND PART. ''' isim_bos=four_point_transform(kagit,bolgeler['isim']) isim_bos_gri=cv2.cvtColor(isim_bos,cv2.COLOR_BGR2GRAY) coord_isim, thres=contour_bul(isim_bos, isim_bos_gri)", "ogrno_coord,ogrno_thresh=contour_bul(ogrno_bos,ogrno_bos_gri) contour_cizdir(ogrno_bos_gri,ogrno_coord,\"ogrenci numarası\") #v2.imshow(\"ogrno\",imutils.resize(ogrno_bos,height=400)) ''' DIVIDE ANSWER PART INTO 4", "############################### ogretim_bos=four_point_transform(kagit,bolgeler['ogretim_onay']) ogretim_bos_gri=four_point_transform(kagit_gri,bolgeler['ogretim_onay']) ogret_cont,ogret_thr=contour_bul(ogretim_bos,ogretim_bos_gri,1) ''' NAME FIND PART. ''' isim_bos=four_point_transform(kagit,bolgeler['isim'])", "\") for s in yanit: ad_str+=s return ad_str def cevap_kolon(cevap):", "box=np.array(box,dtype=np.int) if cv2.contourArea(box)>150: coords.append(approx) cv2.drawContours(resim,[box],0,(0,0,255),thickness=3) if len(coords)==5: return coords else:", "toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,q_no+s) if toplam_beyaz>800: say+=1 if say>1: #İKİ ŞIK İŞARETLEME", "is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if cevap[0]>500: yanit+=str(cevap[1]) print(\"Okul Numarası:\",yanit)", "yanit.append(\" \") for s in yanit: ad_str+=s return ad_str def", "= w / float(h) if area<1300 and area>300 and ar>=0.9", "coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"left-to-right\")[0] toplam_beyaz=None for (j,c)", "ret=True return ret,kagit else: return ret,kagit def kagit_bul(image,gray): thr=cv2.threshold(gray,150,255,cv2.THRESH_BINARY)[1] contour=cv2.findContours(thr,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)", "h) = cv2.boundingRect(approx) ar = w / float(h) if area<1300", "imutils cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4} #, alfabe={0:'A',1:'B',2:'C',3:'Ç',4:'D',5:'E',6:'F',7:'G',8:'Ğ',9:'H',10:'I',11:'İ',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'Ö',19:'P',20:'Q',21:'R',22:'S',23:'Ş',24:'T',25:'U',26:'Ü',27:'V',28:'W',29:'Y',30:'Z',31:'X'} def cevap_islemleri(isim,coords): a=0 thresh=cv2.threshold(isim,179,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for", "cont=cv2.findContours(thr6,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 cont=imutils.grab_contours(cont) cont=contours.sort_contours(cont,method=\"top-to-bottom\")[0] for c in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True)", "elif abs(x_coords[-1][1]-y)>=35: coord.append(approx) x_coords=[(0,0)] sayac+=1 x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue", "return bolgeler,areas def cevap_islemleri(cevap,coords,col_no=1): iki_cevap=0 bos=0 dogru=0 q_no=0 yanlıs=0 if", "x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) res=tekrar_bul(x_coords,x) if res is False and abs(x_coords[-1][1]-y)<35: coord.append(approx)", "cv2 import numpy as np from imutils import contours from", "NUMBER OF TRUE,FALSE,NOT MARKED AND MARKED MORE THAN 1 '''", "pts1=np.array([(2,50),(300,50),(2,1545),(300,1545)]) pts2=np.array([(300,50),(600,50),(302,1545),(602,1545)]) pts3=np.array([(600,50),(900,50),(602,1545),(902,1545)]) pts4=np.array([(900,50),(1200,50),(902,1545),(1202,1545)]) col1=four_point_transform(cevap,pts1) col2=four_point_transform(cevap,pts2) col3=four_point_transform(cevap,pts3) col4=four_point_transform(cevap,pts4) return col1,col2,col3,col4", "SCHOOL NUMBER PART ''' ogrno_bos=four_point_transform(kagit,bolgeler['ogrno']) ogrno_bos_gri=four_point_transform(kagit_gri,bolgeler['ogrno']) ogrno_coord,ogrno_thresh=contour_bul(ogrno_bos,ogrno_bos_gri) contour_cizdir(ogrno_bos_gri,ogrno_coord,\"ogrenci numarası\") #v2.imshow(\"ogrno\",imutils.resize(ogrno_bos,height=400))", "area<1500 and area>250 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int)", "PARTS THAT ON PAPER ''' sorugrup_bos=four_point_transform(kagit,bolgeler['soru_grubu']) sorugrup_bos_gri=four_point_transform(kagit_gri,bolgeler['soru_grubu']) sorugrup_coord,sorugrup_thresh=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) coors=soru_grup_contour_bul(sorugrup_bos,sorugrup_bos_gri) soru_cont,soru_thr=contour_bul(sorugrup_bos,sorugrup_bos_gri,1)", "yanlıs=0 if col_no==1: pass elif col_no==2: q_no=30 elif col_no==3: q_no=60", "cevap[0]<600: yanit.append(\" \") for s in yanit: ad_str+=s return ad_str", "and area>300 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box)", "cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if sayac==5: break print(cevap)", "c in contour: approx=cv2.approxPolyDP(c,0.02*cv2.arcLength(c,True),True) if len(approx)==4: #cv2.drawContours(image,[approx],0,(0,255,0),thickness=3) break warp=four_point_transform(image,approx.reshape(4,2)) warp_gri=four_point_transform(gray,approx.reshape(4,2))", "from imutils import contours from imutils.perspective import four_point_transform import imutils", "if len(temp)>=5: bolgeler={'isim':temp[0],'ogrno':temp[1],'sinav_turu':temp[2],'soru_grubu':temp[3],'ogretim_onay':temp[4],'cevaplar':temp[5]} areas=sorted(areas,key=lambda x:x[1],reverse=True) return bolgeler,areas def cevap_islemleri(cevap,coords,col_no=1): iki_cevap=0", "#plt.show() toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,q_no+s)", "yanit[cevap[1]] #print(\"tespit edilemedi\") return \"Tespit edilemedi\" #################################################################### def main_starter(bos_kagit,dolu_kagit): image=cv2.imread(bos_kagit)", "len(cevap_anahtar)<=q_no+s: return (dogru,yanlıs,bos,iki_cevap) maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j)", "(j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() sayac+=1 toplam_beyaz=cv2.countNonZero(maske)", "x_coords=[(0,0)] sayac+=1 x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return coord,thr6 def", "cont=contours.sort_contours(cont,\"top-to-bottom\")[0] a=0 for c in cont: approx=cv2.approxPolyDP(c,0.009*cv2.arcLength(c,True),True) if cv2.contourArea(approx)>10050 and", "cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) if cevap is None or", "ogretim_bos_gri=four_point_transform(kagit_gri,bolgeler['ogretim_onay']) ogret_cont,ogret_thr=contour_bul(ogretim_bos,ogretim_bos_gri,1) ''' NAME FIND PART. ''' isim_bos=four_point_transform(kagit,bolgeler['isim']) isim_bos_gri=cv2.cvtColor(isim_bos,cv2.COLOR_BGR2GRAY) coord_isim,", "contour=cv2.findContours(thr,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) contour=imutils.grab_contours(contour) contour=sorted(contour,key=cv2.contourArea,reverse=True) for c in contour: approx=cv2.approxPolyDP(c,0.02*cv2.arcLength(c,True),True) if len(approx)==4:", "return col1,col2,col3,col4 def cevap_gri(col1,col2,col3,col4): ''' KOLONLARI GRİ YAPMAK İÇİN,MAİNDE YER", "maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) a+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap is None or", "if cevap[0]>500: yanit+=str(cevap[1]) print(\"Okul Numarası:\",yanit) def sinav_islemleri(sinav,sinav_gri,coords): yanit=[\"QUİZ\",\"ARA\",\"FİNAL\",\"BÜTÜNLEME\"] thresh=cv2.threshold(sinav_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0]", "import contours from imutils.perspective import four_point_transform import imutils import cv2", "cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w, h) = cv2.boundingRect(approx) ar", "cnt=contours.sort_contours(coords[i:i+32],method=\"top-to-bottom\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) #plt.imshow(maske,cmap='gray')", "print(\"cevap\",cevap) if cevap[0]>500: yanit.append(alfabe[cevap[1]]) elif cevap[0]<600: yanit.append(\" \") for s", "ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) #", "cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() sayac+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap is None", "sinav_islemleri(sinav_bos,sinav_bos_gri,sinav_coord) #cv2.imshow(\"sınav türü\",sinav_bos_gri) ''' OTHER PARTS THAT ON PAPER '''", "cevap_contour(col1,col2,col3,col4): col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord=cevap_contour_bul(col1,col1_gri) col2_coord=cevap_contour_bul(col2,col1_gri) col3_coord=cevap_contour_bul(col3,col1_gri) col4_coord=cevap_contour_bul(col4,col1_gri) return col1_coord,col2_coord,col3_coord,col4_coord def ogrno_islemleri(ogrno,ogrno_gri,coords):", "sinav_islemleri(sinav,sinav_gri,coords): yanit=[\"QUİZ\",\"ARA\",\"FİNAL\",\"BÜTÜNLEME\"] thresh=cv2.threshold(sinav_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"left-to-right\")[0]", "coors=soru_grup_contour_bul(sorugrup_bos,sorugrup_bos_gri) soru_cont,soru_thr=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) ############################### ogretim_bos=four_point_transform(kagit,bolgeler['ogretim_onay']) ogretim_bos_gri=four_point_transform(kagit_gri,bolgeler['ogretim_onay']) ogret_cont,ogret_thr=contour_bul(ogretim_bos,ogretim_bos_gri,1) ''' NAME FIND PART.", "OGRETİM ONAY DOLU KAGIT ''' ogretim_dolu=four_point_transform(warp2,bolgeler2['ogretim_onay']) ogretim_dolu_gri=cv2.cvtColor(ogretim_dolu,cv2.COLOR_BGR2GRAY) ogret_onay=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogret_cont) print(\"Öğretim Onayı:\",ogret_onay)", "basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim=(basarim1[0]+basarim2[0],basarim1[1]+basarim2[1],basarim1[2]+basarim2[2],basarim1[3]+basarim2[3]) #print(basarim) elif len(cevap_anahtar)<=90: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim=basarim1+basarim2+basarim3 elif", "toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) #", "col2=four_point_transform(cevap,pts2) col3=four_point_transform(cevap,pts3) col4=four_point_transform(cevap,pts4) return col1,col2,col3,col4 def cevap_gri(col1,col2,col3,col4): ''' KOLONLARI GRİ", "ogrno_islemleri(ogrno,ogrno_gri,coords): yanit=\"\" thresh=cv2.threshold(ogrno_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"top-to-bottom\")[0]", "#contour_cizdir(col1_dolu,col1_coord,\"colon1 dolu\") if len(cevap_anahtar)<=30: basarim=cevap_islemleri(col1_gri_dolu,col1_coord,1) elif len(cevap_anahtar)<=60: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim=(basarim1[0]+basarim2[0],basarim1[1]+basarim2[1],basarim1[2]+basarim2[2],basarim1[3]+basarim2[3])", "is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,q_no+s) if toplam_beyaz>800: say+=1 if say>1:", "ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) res=tekrar_bul(x_coords,x)", "isim_dolu=four_point_transform(warp2,bolgeler2['isim']) isim_dolu_gri=cv2.cvtColor(isim_dolu,cv2.COLOR_BGR2GRAY) contour_cizdir(isim_dolu,coord_isim,\"dolu_kagit_contourlu\") ''' OGRETİM ONAY DOLU KAGIT ''' ogretim_dolu=four_point_transform(warp2,bolgeler2['ogretim_onay'])", "thresh=cv2.threshold(ogrno_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"top-to-bottom\")[0] toplam_beyaz=None for", "y=int(M['m01']/M['m00']) #areas.append([a,cv2.contourArea(approx)]) #cv2.putText(resim,\"{}\".format(a),(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=4,color=(255,0,0),thickness=3) temp.append(approx.reshape(4,2)) areas.append([a,cv2.contourArea(approx)]) #cv2.drawContours(resim,[approx],0,(255,0,0),thickness=3) #cv2.imshow(\"resim_olge\",imutils.resize(resim,height=650)) if len(temp)>=5: bolgeler={'isim':temp[0],'ogrno':temp[1],'sinav_turu':temp[2],'soru_grubu':temp[3],'ogretim_onay':temp[4],'cevaplar':temp[5]}", "yanit.append(alfabe[cevap[1]]) elif cevap[0]<600: yanit.append(\" \") for s in yanit: ad_str+=s", "elif len(cevap_anahtar)<=120: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim4=cevap_islemleri(col4_gri_dolu,col4_coord,4) basarim=basarim1+basarim2+basarim3+basarim4 print(f\"Doğru cevap sayısı:{basarim[0]}\\nYanlış", "#################################################################### def main_starter(bos_kagit,dolu_kagit): image=cv2.imread(bos_kagit) gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) kagit,kagit_gri=kagit_bul(image,gray) bolgeler,areas=bolge_bul(kagit,kagit_gri) ''' FIND SCHOOL", "Onayı:\",ogret_onay) #cv2.drawContours(ogretim_dolu,ogret_cont,-1,(255,0,0),thickness=3) #cv2.imshow(\"ogretc\",ogretim_dolu) #ogretim_onayı=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogretimonay_coord) sorugrup_dolu=four_point_transform(warp2,bolgeler2['soru_grubu']) sorugrup_dolu_gri=cv2.cvtColor(sorugrup_dolu,cv2.COLOR_BGR2GRAY) soru_tur=sorugrup_islemleri(sorugrup_dolu,sorugrup_dolu_gri,soru_cont) print(\"Soru Grubu\",soru_tur) thresh_dolu=cv2.threshold(isim_dolu_gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1]", "area>250 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00'])", "contour: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w, h) = cv2.boundingRect(approx) ar", "cevap[0]<800:# BOŞ BIRAKMA DURUMU bos+=1 continue else: if cevap_anahtar[q_no+s]== cevap[1]:", "import cv2 import numpy as np from imutils import contours", "def cevap_contour_bul(isim,isim_gri): coord=[] thresholded=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) contour=cv2.findContours(thresholded,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 contour=imutils.grab_contours(contour) contour=contours.sort_contours(contour,method=\"top-to-bottom\")[0] for", "in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) #plt.imshow(maske,cmap='gray') #plt.show() #a+=1 toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j)", "thresh_dolu=cv2.threshold(isim_dolu_gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] isim_str=isim_islemleri(isim_dolu_gri,coord_isim,thresh_dolu) print(isim_str) sinav_dolu=four_point_transform(warp2,bolgeler2['sinav_turu']) sinav_dolu_gri=cv2.cvtColor(sinav_dolu,cv2.COLOR_BGR2GRAY) sinav_turu=sinav_islemleri(sinav_dolu,sinav_dolu_gri,sinav_coord) print(\"Sınav Türü: \",sinav_turu) ogrno_dolu=four_point_transform(warp2,bolgeler2['ogrno'])", "Grubu\",soru_tur) thresh_dolu=cv2.threshold(isim_dolu_gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] isim_str=isim_islemleri(isim_dolu_gri,coord_isim,thresh_dolu) print(isim_str) sinav_dolu=four_point_transform(warp2,bolgeler2['sinav_turu']) sinav_dolu_gri=cv2.cvtColor(sinav_dolu,cv2.COLOR_BGR2GRAY) sinav_turu=sinav_islemleri(sinav_dolu,sinav_dolu_gri,sinav_coord) print(\"Sınav Türü: \",sinav_turu)", "ogrno_bos_gri=four_point_transform(kagit_gri,bolgeler['ogrno']) ogrno_coord,ogrno_thresh=contour_bul(ogrno_bos,ogrno_bos_gri) contour_cizdir(ogrno_bos_gri,ogrno_coord,\"ogrenci numarası\") #v2.imshow(\"ogrno\",imutils.resize(ogrno_bos,height=400)) ''' DIVIDE ANSWER PART INTO", "''' OGRETİM ONAY DOLU KAGIT ''' ogretim_dolu=four_point_transform(warp2,bolgeler2['ogretim_onay']) ogretim_dolu_gri=cv2.cvtColor(ogretim_dolu,cv2.COLOR_BGR2GRAY) ogret_onay=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogret_cont) print(\"Öğretim", "cevap=(toplam_beyaz,j,s) if cevap[0]>500: yanit+=str(cevap[1]) print(\"Okul Numarası:\",yanit) def sinav_islemleri(sinav,sinav_gri,coords): yanit=[\"QUİZ\",\"ARA\",\"FİNAL\",\"BÜTÜNLEME\"] thresh=cv2.threshold(sinav_gri,180,255,cv2.THRESH_BINARY_INV)[1]", "np from imutils import contours from imutils.perspective import four_point_transform import", "soru_grup_contour_bul(resim,gri): thr2=cv2.threshold(gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] can=cv2.Canny(thr2,50,100) can=cv2.dilate(can,None,iterations=3) coords=[] cont=cv2.findContours(can,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) for c in", "cevap_anahtar[q_no+s]== cevap[1]: #print(cevap_anahtar[q_no+s],cevap[1]) dogru+=1 else: yanlıs+=1 ''' NUMBER OF TRUE,FALSE,NOT", "ONE BY ONE ''' cevap_bos=four_point_transform(kagit,bolgeler['cevaplar']) cevap_bos_gri=four_point_transform(kagit_gri,bolgeler['cevaplar']) col1,col2,col3,col4=cevap_kolon(cevap_bos) col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord,col2_coord,col3_coord,col4_coord=cevap_contour(col1,col2,col3,col4) #contour_cizdir(col1,col1_coord)", "cv2.drawContours(resim,[box],0,(0,0,255),thickness=3) if len(coords)==5: return coords else: return 0 def tekrar_bul(array,koordinat):", "ters koymuşsunuz,çevrildi\") ret=True return ret,kagit else: return ret,kagit def kagit_bul(image,gray):", "= w / float(h) if area<1500 and area>250 and ar>=0.9", "cevap=(toplam_beyaz,j,s) if sayac==5: break print(cevap) if cevap[0]>500: return yanit[cevap[1]] #print(\"tespit", "coord.append(approx) x_coords=[(0,0)] sayac+=1 x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return coord", "#ogretim_onayı=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogretimonay_coord) sorugrup_dolu=four_point_transform(warp2,bolgeler2['soru_grubu']) sorugrup_dolu_gri=cv2.cvtColor(sorugrup_dolu,cv2.COLOR_BGR2GRAY) soru_tur=sorugrup_islemleri(sorugrup_dolu,sorugrup_dolu_gri,soru_cont) print(\"Soru Grubu\",soru_tur) thresh_dolu=cv2.threshold(isim_dolu_gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] isim_str=isim_islemleri(isim_dolu_gri,coord_isim,thresh_dolu) print(isim_str) sinav_dolu=four_point_transform(warp2,bolgeler2['sinav_turu'])", "yanit[cevap[1]] def sorugrup_islemleri(soru,soru_gri,coords): yanit=[\"A\",\"B\",\"C\",\"D\",\"E\"] sayac=0 thresh=cv2.threshold(soru_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in", "np from imutils import contours from imutils.perspective import four_point_transform,order_points import", "cv2 import matplotlib.pyplot as plt import numpy as np from", "in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) a+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap is", "bos+=1 continue else: if cevap_anahtar[q_no+s]== cevap[1]: #print(cevap_anahtar[q_no+s],cevap[1]) dogru+=1 else: yanlıs+=1", "''' if ret==True: warp2_gri=cv2.cvtColor(warp2,cv2.COLOR_BGR2GRAY) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) else: pass isim_dolu=four_point_transform(warp2,bolgeler2['isim']) isim_dolu_gri=cv2.cvtColor(isim_dolu,cv2.COLOR_BGR2GRAY) contour_cizdir(isim_dolu,coord_isim,\"dolu_kagit_contourlu\")", "cevap=(toplam_beyaz,j,s) def cevap_contour_bul(isim,isim_gri): coord=[] thresholded=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) contour=cv2.findContours(thresholded,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 contour=imutils.grab_contours(contour) contour=contours.sort_contours(contour,method=\"top-to-bottom\")[0]", "and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) res=tekrar_bul(x_coords,x) if", "elif col_no==3: q_no=60 elif col_no==4: q_no=90 yanit=[] #cevap=cv2.cvtColor(cevap,cv2.COLOR_BGR2GRAY) thresh=cv2.threshold(cevap,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0]", "col_no==4: q_no=90 yanit=[] #cevap=cv2.cvtColor(cevap,cv2.COLOR_BGR2GRAY) thresh=cv2.threshold(cevap,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),5)):", "#print(cevap_anahtar[q_no+s],cevap[1]) dogru+=1 else: yanlıs+=1 ''' NUMBER OF TRUE,FALSE,NOT MARKED AND", "cevap_dolu_gri=cv2.cvtColor(cevap_dolu,cv2.COLOR_BGR2GRAY) col1_dolu,col2_dolu,col3_dolu,col4_dolu=cevap_kolon(cevap_dolu) col1_gri_dolu,col2_gri_dolu,col3_gri_dolu,col4_gri_dolu=cevap_gri(col1_dolu,col2_dolu,col3_dolu,col4_dolu) #contour_cizdir(col1_dolu,col1_coord,\"colon1 dolu\") if len(cevap_anahtar)<=30: basarim=cevap_islemleri(col1_gri_dolu,col1_coord,1) elif len(cevap_anahtar)<=60:", "x_coords=[(0,0)] sayac+=1 x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return coord def", "def kagit_bul(image,gray): thr=cv2.threshold(gray,150,255,cv2.THRESH_BINARY)[1] contour=cv2.findContours(thr,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) contour=imutils.grab_contours(contour) contour=sorted(contour,key=cv2.contourArea,reverse=True) for c in contour:", "return True #Tekrar var else: pass return False def contour_bul(isim,isim_gri,karmasiklik=0):", "from imutils import contours from imutils.perspective import four_point_transform,order_points import imutils", "False def contour_bul(isim,isim_gri,karmasiklik=0): coord=[] thr6=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) #thr6=cv2.threshold(isim_gri,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1] ar_value=200 #if karmasiklik==1: #", "= cv2.boundingRect(approx) ar = w / float(h) if area<1300 and", "and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) # print(x,y)", "coord=[] thresholded=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) contour=cv2.findContours(thresholded,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 contour=imutils.grab_contours(contour) contour=contours.sort_contours(contour,method=\"top-to-bottom\")[0] for c in", "FIND ONE BY ONE ''' cevap_bos=four_point_transform(kagit,bolgeler['cevaplar']) cevap_bos_gri=four_point_transform(kagit_gri,bolgeler['cevaplar']) col1,col2,col3,col4=cevap_kolon(cevap_bos) col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord,col2_coord,col3_coord,col4_coord=cevap_contour(col1,col2,col3,col4)", "print(\"Sınav Türü: \",sinav_turu) ogrno_dolu=four_point_transform(warp2,bolgeler2['ogrno']) ogrno_dolu_gri=cv2.cvtColor(ogrno_dolu,cv2.COLOR_BGR2GRAY) ogrno_islemleri(ogrno_dolu,ogrno_dolu_gri,ogrno_coord) cevap_dolu=four_point_transform(warp2,bolgeler2['cevaplar']) cevap_dolu_gri=cv2.cvtColor(cevap_dolu,cv2.COLOR_BGR2GRAY) col1_dolu,col2_dolu,col3_dolu,col4_dolu=cevap_kolon(cevap_dolu) col1_gri_dolu,col2_gri_dolu,col3_gri_dolu,col4_gri_dolu=cevap_gri(col1_dolu,col2_dolu,col3_dolu,col4_dolu)", "def contour_bul(isim,isim_gri,karmasiklik=0): coord=[] thr6=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) #thr6=cv2.threshold(isim_gri,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1] ar_value=200 #if karmasiklik==1: # ar_value=800", "col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY) return col1_gri,col2_gri,col3_gri,col4_gri def cevap_contour(col1,col2,col3,col4): col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord=cevap_contour_bul(col1,col1_gri) col2_coord=cevap_contour_bul(col2,col1_gri) col3_coord=cevap_contour_bul(col3,col1_gri) col4_coord=cevap_contour_bul(col4,col1_gri)", "TRUE,FALSE,NOT MARKED AND MARKED MORE THAN 1 ''' return(dogru,yanlıs,bos,iki_cevap) def", "ar = w / float(h) if area<1300 and area>300 and", "sayısı:{basarim[0]}\\nYanlış cevap sayısı:{basarim[1]}\\nBoş sayısı:{basarim[2]}\\nİki cevap işaret:{basarim[3]}\") cv2.waitKey() cv2.destroyAllWindows() if __name__", "sorugrup_bos_gri=four_point_transform(kagit_gri,bolgeler['soru_grubu']) sorugrup_coord,sorugrup_thresh=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) coors=soru_grup_contour_bul(sorugrup_bos,sorugrup_bos_gri) soru_cont,soru_thr=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) ############################### ogretim_bos=four_point_transform(kagit,bolgeler['ogretim_onay']) ogretim_bos_gri=four_point_transform(kagit_gri,bolgeler['ogretim_onay']) ogret_cont,ogret_thr=contour_bul(ogretim_bos,ogretim_bos_gri,1) ''' NAME", "if col_no==1: pass elif col_no==2: q_no=30 elif col_no==3: q_no=60 elif", "(j,c) in enumerate(cnt): if len(cevap_anahtar)<=q_no+s: return (dogru,yanlıs,bos,iki_cevap) maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske)", "ogretim_dolu=four_point_transform(warp2,bolgeler2['ogretim_onay']) ogretim_dolu_gri=cv2.cvtColor(ogretim_dolu,cv2.COLOR_BGR2GRAY) ogret_onay=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogret_cont) print(\"Öğretim Onayı:\",ogret_onay) #cv2.drawContours(ogretim_dolu,ogret_cont,-1,(255,0,0),thickness=3) #cv2.imshow(\"ogretc\",ogretim_dolu) #ogretim_onayı=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogretimonay_coord) sorugrup_dolu=four_point_transform(warp2,bolgeler2['soru_grubu']) sorugrup_dolu_gri=cv2.cvtColor(sorugrup_dolu,cv2.COLOR_BGR2GRAY)", "col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY) col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY) col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY) col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY) return col1_gri,col2_gri,col3_gri,col4_gri def cevap_contour(col1,col2,col3,col4): col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord=cevap_contour_bul(col1,col1_gri)", "print(\"Soru Grubu\",soru_tur) thresh_dolu=cv2.threshold(isim_dolu_gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] isim_str=isim_islemleri(isim_dolu_gri,coord_isim,thresh_dolu) print(isim_str) sinav_dolu=four_point_transform(warp2,bolgeler2['sinav_turu']) sinav_dolu_gri=cv2.cvtColor(sinav_dolu,cv2.COLOR_BGR2GRAY) sinav_turu=sinav_islemleri(sinav_dolu,sinav_dolu_gri,sinav_coord) print(\"Sınav Türü:", "box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) if cv2.contourArea(box)>150: coords.append(approx) cv2.drawContours(resim,[box],0,(0,0,255),thickness=3) if len(coords)==5: return", "FIND PART ''' sinav_bos=four_point_transform(kagit,bolgeler['sinav_turu']) sinav_bos_gri=four_point_transform(kagit_gri,bolgeler['sinav_turu']) sinav_coord,sinav_thresh=contour_bul(sinav_bos,sinav_bos_gri) sinav_islemleri(sinav_bos,sinav_bos_gri,sinav_coord) #cv2.imshow(\"sınav türü\",sinav_bos_gri) '''", "sayac+=1 x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return coord def ters_bul(kagit,areas):", "gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) kagit,kagit_gri=kagit_bul(image,gray) bolgeler,areas=bolge_bul(kagit,kagit_gri) ''' FIND SCHOOL NUMBER PART ''' ogrno_bos=four_point_transform(kagit,bolgeler['ogrno'])", "ar_value=800 cont=cv2.findContours(thr6,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 cont=imutils.grab_contours(cont) cont=contours.sort_contours(cont,method=\"top-to-bottom\")[0] for c in cont:", "coords else: return 0 def tekrar_bul(array,koordinat): for c in array:", "elif cevap[0]<600: yanit.append(\" \") for s in yanit: ad_str+=s return", "col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord=cevap_contour_bul(col1,col1_gri) col2_coord=cevap_contour_bul(col2,col1_gri) col3_coord=cevap_contour_bul(col3,col1_gri) col4_coord=cevap_contour_bul(col4,col1_gri) return col1_coord,col2_coord,col3_coord,col4_coord def ogrno_islemleri(ogrno,ogrno_gri,coords): yanit=\"\"", "col1_coord=cevap_contour_bul(col1,col1_gri) col2_coord=cevap_contour_bul(col2,col1_gri) col3_coord=cevap_contour_bul(col3,col1_gri) col4_coord=cevap_contour_bul(col4,col1_gri) return col1_coord,col2_coord,col3_coord,col4_coord def ogrno_islemleri(ogrno,ogrno_gri,coords): yanit=\"\" thresh=cv2.threshold(ogrno_gri,180,255,cv2.THRESH_BINARY_INV)[1]", "maske=cv2.bitwise_and(thresh,thresh,mask=maske) a+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s)", "float(h) if area<1300 and area>300 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx)", "plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap is None or toplam_beyaz>cevap[0]:", "cv2.drawContours(resim,[c],0,(0,255,0),thickness=4) #print(f\"Bulunan contour sayısı: {len(cont)}\") def bolge_bul(resim,gri): bolgeler={} thr2=cv2.adaptiveThreshold(gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) areas=[]", "return col1_coord,col2_coord,col3_coord,col4_coord def ogrno_islemleri(ogrno,ogrno_gri,coords): yanit=\"\" thresh=cv2.threshold(ogrno_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i) in", "imutils import contours from imutils.perspective import four_point_transform,order_points import imutils cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4}", "(j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) #plt.imshow(maske,cmap='gray') #plt.show() #a+=1 toplam_beyaz=cv2.countNonZero(maske)", "(j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) if", "box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) res=tekrar_bul(x_coords,x) if res is False", "maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) if cevap is None", "temp.append(approx.reshape(4,2)) areas.append([a,cv2.contourArea(approx)]) #cv2.drawContours(resim,[approx],0,(255,0,0),thickness=3) #cv2.imshow(\"resim_olge\",imutils.resize(resim,height=650)) if len(temp)>=5: bolgeler={'isim':temp[0],'ogrno':temp[1],'sinav_turu':temp[2],'soru_grubu':temp[3],'ogretim_onay':temp[4],'cevaplar':temp[5]} areas=sorted(areas,key=lambda x:x[1],reverse=True) return", "BY ONE ''' cevap_bos=four_point_transform(kagit,bolgeler['cevaplar']) cevap_bos_gri=four_point_transform(kagit_gri,bolgeler['cevaplar']) col1,col2,col3,col4=cevap_kolon(cevap_bos) col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord,col2_coord,col3_coord,col4_coord=cevap_contour(col1,col2,col3,col4) #contour_cizdir(col1,col1_coord) #cevap_islemleri(col2_gri,coord_cevap)", "if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) return yanit[cevap[1]] def", "dogru=0 q_no=0 yanlıs=0 if col_no==1: pass elif col_no==2: q_no=30 elif", "yanit=[\"QUİZ\",\"ARA\",\"FİNAL\",\"BÜTÜNLEME\"] thresh=cv2.threshold(sinav_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"left-to-right\")[0] toplam_beyaz=None", "def contour_cizdir(resim,cont,isim=\"default\"): for c in cont: cv2.drawContours(resim,[c],0,(0,255,0),thickness=4) #print(f\"Bulunan contour sayısı:", "basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim=basarim1+basarim2+basarim3 elif len(cevap_anahtar)<=120: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim4=cevap_islemleri(col4_gri_dolu,col4_coord,4) basarim=basarim1+basarim2+basarim3+basarim4", "(dogru,yanlıs,bos,iki_cevap) maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap", "KAGIT ''' ogretim_dolu=four_point_transform(warp2,bolgeler2['ogretim_onay']) ogretim_dolu_gri=cv2.cvtColor(ogretim_dolu,cv2.COLOR_BGR2GRAY) ogret_onay=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogret_cont) print(\"Öğretim Onayı:\",ogret_onay) #cv2.drawContours(ogretim_dolu,ogret_cont,-1,(255,0,0),thickness=3) #cv2.imshow(\"ogretc\",ogretim_dolu) #ogretim_onayı=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogretimonay_coord)", "as np from imutils import contours from imutils.perspective import four_point_transform,order_points", "return col1_gri,col2_gri,col3_gri,col4_gri def cevap_contour(col1,col2,col3,col4): col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord=cevap_contour_bul(col1,col1_gri) col2_coord=cevap_contour_bul(col2,col1_gri) col3_coord=cevap_contour_bul(col3,col1_gri) col4_coord=cevap_contour_bul(col4,col1_gri) return", "from imutils.perspective import four_point_transform,order_points import imutils cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4} #, alfabe={0:'A',1:'B',2:'C',3:'Ç',4:'D',5:'E',6:'F',7:'G',8:'Ğ',9:'H',10:'I',11:'İ',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'Ö',19:'P',20:'Q',21:'R',22:'S',23:'Ş',24:'T',25:'U',26:'Ü',27:'V',28:'W',29:'Y',30:'Z',31:'X'} def", "contour_cizdir(isim_dolu,coord_isim,\"dolu_kagit_contourlu\") ''' OGRETİM ONAY DOLU KAGIT ''' ogretim_dolu=four_point_transform(warp2,bolgeler2['ogretim_onay']) ogretim_dolu_gri=cv2.cvtColor(ogretim_dolu,cv2.COLOR_BGR2GRAY) ogret_onay=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogret_cont)", "if cevap_anahtar[q_no+s]== cevap[1]: #print(cevap_anahtar[q_no+s],cevap[1]) dogru+=1 else: yanlıs+=1 ''' NUMBER OF", "import four_point_transform import imutils import cv2 import matplotlib.pyplot as plt", "pts4=np.array([(900,50),(1200,50),(902,1545),(1202,1545)]) col1=four_point_transform(cevap,pts1) col2=four_point_transform(cevap,pts2) col3=four_point_transform(cevap,pts3) col4=four_point_transform(cevap,pts4) return col1,col2,col3,col4 def cevap_gri(col1,col2,col3,col4): '''", "''' FIND SCHOOL NUMBER PART ''' ogrno_bos=four_point_transform(kagit,bolgeler['ogrno']) ogrno_bos_gri=four_point_transform(kagit_gri,bolgeler['ogrno']) ogrno_coord,ogrno_thresh=contour_bul(ogrno_bos,ogrno_bos_gri) contour_cizdir(ogrno_bos_gri,ogrno_coord,\"ogrenci", "#print(toplam_beyaz,j) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,q_no+s) if toplam_beyaz>800:", "as plt import numpy as np from imutils import contours", "float(h) if area<1500 and area>250 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx)", "DOLU KAGIT ''' ogretim_dolu=four_point_transform(warp2,bolgeler2['ogretim_onay']) ogretim_dolu_gri=cv2.cvtColor(ogretim_dolu,cv2.COLOR_BGR2GRAY) ogret_onay=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogret_cont) print(\"Öğretim Onayı:\",ogret_onay) #cv2.drawContours(ogretim_dolu,ogret_cont,-1,(255,0,0),thickness=3) #cv2.imshow(\"ogretc\",ogretim_dolu)", "or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,q_no+s) if toplam_beyaz>800: say+=1 if say>1: #İKİ ŞIK", "enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"top-to-bottom\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)", "#cevap_islemleri(cevap_bos_gri,coord) ############################################## resim=cv2.imread(dolu_kagit) resim_gri=cv2.cvtColor(resim,cv2.COLOR_BGR2GRAY) warp2,warp2_gri=kagit_bul(resim,resim_gri) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) ret,warp2=ters_bul(warp2,areas2) ''' TERS İSE", "sinav_coord,sinav_thresh=contour_bul(sinav_bos,sinav_bos_gri) sinav_islemleri(sinav_bos,sinav_bos_gri,sinav_coord) #cv2.imshow(\"sınav türü\",sinav_bos_gri) ''' OTHER PARTS THAT ON PAPER", "for s in yanit: ad_str+=s return ad_str def cevap_kolon(cevap): pts1=np.array([(2,50),(300,50),(2,1545),(300,1545)])", "thr=cv2.threshold(gray,150,255,cv2.THRESH_BINARY)[1] contour=cv2.findContours(thr,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) contour=imutils.grab_contours(contour) contour=sorted(contour,key=cv2.contourArea,reverse=True) for c in contour: approx=cv2.approxPolyDP(c,0.02*cv2.arcLength(c,True),True) if", "for (s,i) in enumerate(np.arange(0,len(coords),32)): cevap=None cnt=contours.sort_contours(coords[i:i+32],method=\"top-to-bottom\")[0] toplam_beyaz=None for (j,c) in", "coord.append(approx) x_coords=[(0,0)] sayac+=1 x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return coord,thr6", "basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim4=cevap_islemleri(col4_gri_dolu,col4_coord,4) basarim=basarim1+basarim2+basarim3+basarim4 print(f\"Doğru cevap sayısı:{basarim[0]}\\nYanlış cevap sayısı:{basarim[1]}\\nBoş sayısı:{basarim[2]}\\nİki cevap", "warp_gri=four_point_transform(gray,approx.reshape(4,2)) return warp,warp_gri def soru_grup_contour_bul(resim,gri): thr2=cv2.threshold(gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] can=cv2.Canny(thr2,50,100) can=cv2.dilate(can,None,iterations=3) coords=[] cont=cv2.findContours(can,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)", "''' col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY) col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY) col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY) col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY) return col1_gri,col2_gri,col3_gri,col4_gri def cevap_contour(col1,col2,col3,col4): col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4)", "sayac=0 cont=imutils.grab_contours(cont) cont=contours.sort_contours(cont,method=\"top-to-bottom\")[0] for c in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x,", "coord.append(approx) x_coords.append((x,y)) sayac+=1 #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) elif abs(x_coords[-1][1]-y)>=35: coord.append(approx) x_coords=[(0,0)] sayac+=1", "ad_str+=s return ad_str def cevap_kolon(cevap): pts1=np.array([(2,50),(300,50),(2,1545),(300,1545)]) pts2=np.array([(300,50),(600,50),(302,1545),(602,1545)]) pts3=np.array([(600,50),(900,50),(602,1545),(902,1545)]) pts4=np.array([(900,50),(1200,50),(902,1545),(1202,1545)]) col1=four_point_transform(cevap,pts1)", "MARKED AND MARKED MORE THAN 1 ''' return(dogru,yanlıs,bos,iki_cevap) def isim_islemleri(isim,coords,thresh):", "elif col_no==4: q_no=90 yanit=[] #cevap=cv2.cvtColor(cevap,cv2.COLOR_BGR2GRAY) thresh=cv2.threshold(cevap,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in", "cv2.contourArea(c)>30 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) if cv2.contourArea(box)>150:", "thresh=cv2.threshold(isim,179,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),20)): cevap=None cnt=contours.sort_contours(coords[i:i+30])[0] toplam_beyaz=None for", "say=0 for (j,c) in enumerate(cnt): if len(cevap_anahtar)<=q_no+s: return (dogru,yanlıs,bos,iki_cevap) maske=np.zeros(thresh.shape,dtype=np.uint8)", "else: return 0 def tekrar_bul(array,koordinat): for c in array: if", "x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return coord,thr6 def contour_cizdir(resim,cont,isim=\"default\"): for", "\",sinav_turu) ogrno_dolu=four_point_transform(warp2,bolgeler2['ogrno']) ogrno_dolu_gri=cv2.cvtColor(ogrno_dolu,cv2.COLOR_BGR2GRAY) ogrno_islemleri(ogrno_dolu,ogrno_dolu_gri,ogrno_coord) cevap_dolu=four_point_transform(warp2,bolgeler2['cevaplar']) cevap_dolu_gri=cv2.cvtColor(cevap_dolu,cv2.COLOR_BGR2GRAY) col1_dolu,col2_dolu,col3_dolu,col4_dolu=cevap_kolon(cevap_dolu) col1_gri_dolu,col2_gri_dolu,col3_gri_dolu,col4_gri_dolu=cevap_gri(col1_dolu,col2_dolu,col3_dolu,col4_dolu) #contour_cizdir(col1_dolu,col1_coord,\"colon1 dolu\")", "thr2=cv2.adaptiveThreshold(gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) areas=[] cont=cv2.findContours(thr2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) temp=[] cont=contours.sort_contours(cont,\"top-to-bottom\")[0] a=0 for c in", "warp,warp_gri def soru_grup_contour_bul(resim,gri): thr2=cv2.threshold(gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] can=cv2.Canny(thr2,50,100) can=cv2.dilate(can,None,iterations=3) coords=[] cont=cv2.findContours(can,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) for", "res=tekrar_bul(x_coords,x) if res is False and abs(x_coords[-1][1]-y)<35: coord.append(approx) x_coords.append((x,y)) sayac+=1", "for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) #plt.imshow(maske,cmap='gray') #plt.show() #a+=1", "''' isim_bos=four_point_transform(kagit,bolgeler['isim']) isim_bos_gri=cv2.cvtColor(isim_bos,cv2.COLOR_BGR2GRAY) coord_isim, thres=contour_bul(isim_bos, isim_bos_gri) #contour_cizdir(isim_bos,coord,\"isim_bos\") #cevap_islemleri(cevap_bos_gri,coord) ############################################## resim=cv2.imread(dolu_kagit)", "box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) res=tekrar_bul(x_coords,x) if res is False and", "NAME FIND PART. ''' isim_bos=four_point_transform(kagit,bolgeler['isim']) isim_bos_gri=cv2.cvtColor(isim_bos,cv2.COLOR_BGR2GRAY) coord_isim, thres=contour_bul(isim_bos, isim_bos_gri) #contour_cizdir(isim_bos,coord,\"isim_bos\")", "İÇİN,MAİNDE YER KAPLAMASIN ''' col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY) col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY) col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY) col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY) return col1_gri,col2_gri,col3_gri,col4_gri", "and area>250 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box)", "else: yanlıs+=1 ''' NUMBER OF TRUE,FALSE,NOT MARKED AND MARKED MORE", "a=0 for c in cont: approx=cv2.approxPolyDP(c,0.009*cv2.arcLength(c,True),True) if cv2.contourArea(approx)>10050 and len(approx)==4:", "maske=cv2.bitwise_and(thresh,thresh,mask=maske) #plt.imshow(maske,cmap='gray') #plt.show() #a+=1 toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap is None", "ogrno_dolu_gri=cv2.cvtColor(ogrno_dolu,cv2.COLOR_BGR2GRAY) ogrno_islemleri(ogrno_dolu,ogrno_dolu_gri,ogrno_coord) cevap_dolu=four_point_transform(warp2,bolgeler2['cevaplar']) cevap_dolu_gri=cv2.cvtColor(cevap_dolu,cv2.COLOR_BGR2GRAY) col1_dolu,col2_dolu,col3_dolu,col4_dolu=cevap_kolon(cevap_dolu) col1_gri_dolu,col2_gri_dolu,col3_gri_dolu,col4_gri_dolu=cevap_gri(col1_dolu,col2_dolu,col3_dolu,col4_dolu) #contour_cizdir(col1_dolu,col1_coord,\"colon1 dolu\") if len(cevap_anahtar)<=30:", "karmasiklik==1: # ar_value=800 cont=cv2.findContours(thr6,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 cont=imutils.grab_contours(cont) cont=contours.sort_contours(cont,method=\"top-to-bottom\")[0] for c", "#print(f\"Bulunan contour sayısı: {len(cont)}\") def bolge_bul(resim,gri): bolgeler={} thr2=cv2.adaptiveThreshold(gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) areas=[] cont=cv2.findContours(thr2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)", "pass elif col_no==2: q_no=30 elif col_no==3: q_no=60 elif col_no==4: q_no=90", "0 def tekrar_bul(array,koordinat): for c in array: if koordinat==c[0] or", "DIVIDE ANSWER PART INTO 4 SLICES AND FIND ONE BY", "cont: cv2.drawContours(resim,[c],0,(0,255,0),thickness=4) #print(f\"Bulunan contour sayısı: {len(cont)}\") def bolge_bul(resim,gri): bolgeler={} thr2=cv2.adaptiveThreshold(gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8)", "basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim=(basarim1[0]+basarim2[0],basarim1[1]+basarim2[1],basarim1[2]+basarim2[2],basarim1[3]+basarim2[3]) #print(basarim) elif len(cevap_anahtar)<=90: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim=basarim1+basarim2+basarim3", "iki_cevap+=1 continue elif cevap[0]<800:# BOŞ BIRAKMA DURUMU bos+=1 continue else:", "cevap[0]>500: return yanit[cevap[1]] #print(\"tespit edilemedi\") return \"Tespit edilemedi\" #################################################################### def", "MORE THAN 1 ''' return(dogru,yanlıs,bos,iki_cevap) def isim_islemleri(isim,coords,thresh): a=0 yanit=[] ad_str=\"\"", "areas=[] cont=cv2.findContours(thr2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) temp=[] cont=contours.sort_contours(cont,\"top-to-bottom\")[0] a=0 for c in cont:", "imutils.perspective import four_point_transform,order_points import imutils cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4} #, alfabe={0:'A',1:'B',2:'C',3:'Ç',4:'D',5:'E',6:'F',7:'G',8:'Ğ',9:'H',10:'I',11:'İ',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'Ö',19:'P',20:'Q',21:'R',22:'S',23:'Ş',24:'T',25:'U',26:'Ü',27:'V',28:'W',29:'Y',30:'Z',31:'X'} def cevap_islemleri(isim,coords):", "is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) # print(\"cevap\",cevap) if cevap[0]>500: yanit.append(alfabe[cevap[1]])", "def ters_bul(kagit,areas): ret=False #print(areas[0][0]) if areas[0][0]!=1 and areas[0][1]+areas[1][1]>2300000: kagit=imutils.rotate(kagit,angle=180) print(\"Kağıdı", "q_no=30 elif col_no==3: q_no=60 elif col_no==4: q_no=90 yanit=[] #cevap=cv2.cvtColor(cevap,cv2.COLOR_BGR2GRAY) thresh=cv2.threshold(cevap,180,255,cv2.THRESH_BINARY_INV)[1]", "toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if sayac==5:", "BOLGELERİ BUL ''' if ret==True: warp2_gri=cv2.cvtColor(warp2,cv2.COLOR_BGR2GRAY) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) else: pass isim_dolu=four_point_transform(warp2,bolgeler2['isim'])", "q_no=90 yanit=[] #cevap=cv2.cvtColor(cevap,cv2.COLOR_BGR2GRAY) thresh=cv2.threshold(cevap,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),5)): cevap=None", "print(x,y) res=tekrar_bul(x_coords,x) if res is False and abs(x_coords[-1][1]-y)<35: coord.append(approx) x_coords.append((x,y))", "if cv2.contourArea(box)>150: coords.append(approx) cv2.drawContours(resim,[box],0,(0,0,255),thickness=3) if len(coords)==5: return coords else: return", "for c in cont: approx=cv2.approxPolyDP(c,0.009*cv2.arcLength(c,True),True) if cv2.contourArea(approx)>10050 and len(approx)==4: a+=1", "#print(toplam_beyaz,j) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) # print(\"cevap\",cevap)", "cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) return yanit[cevap[1]] def sorugrup_islemleri(soru,soru_gri,coords):", "ar = w / float(h) if area<1500 and area>250 and", "if ret==True: warp2_gri=cv2.cvtColor(warp2,cv2.COLOR_BGR2GRAY) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) else: pass isim_dolu=four_point_transform(warp2,bolgeler2['isim']) isim_dolu_gri=cv2.cvtColor(isim_dolu,cv2.COLOR_BGR2GRAY) contour_cizdir(isim_dolu,coord_isim,\"dolu_kagit_contourlu\") '''", "cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) # print(\"cevap\",cevap) if cevap[0]>500:", "SLICES AND FIND ONE BY ONE ''' cevap_bos=four_point_transform(kagit,bolgeler['cevaplar']) cevap_bos_gri=four_point_transform(kagit_gri,bolgeler['cevaplar']) col1,col2,col3,col4=cevap_kolon(cevap_bos)", "ogretim_dolu_gri=cv2.cvtColor(ogretim_dolu,cv2.COLOR_BGR2GRAY) ogret_onay=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogret_cont) print(\"Öğretim Onayı:\",ogret_onay) #cv2.drawContours(ogretim_dolu,ogret_cont,-1,(255,0,0),thickness=3) #cv2.imshow(\"ogretc\",ogretim_dolu) #ogretim_onayı=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogretimonay_coord) sorugrup_dolu=four_point_transform(warp2,bolgeler2['soru_grubu']) sorugrup_dolu_gri=cv2.cvtColor(sorugrup_dolu,cv2.COLOR_BGR2GRAY) soru_tur=sorugrup_islemleri(sorugrup_dolu,sorugrup_dolu_gri,soru_cont)", "maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() sayac+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap is None or", "############################################## resim=cv2.imread(dolu_kagit) resim_gri=cv2.cvtColor(resim,cv2.COLOR_BGR2GRAY) warp2,warp2_gri=kagit_bul(resim,resim_gri) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) ret,warp2=ters_bul(warp2,areas2) ''' TERS İSE TEKRAR", "print(cevap) if cevap[0]>500: return yanit[cevap[1]] #print(\"tespit edilemedi\") return \"Tespit edilemedi\"", "return (dogru,yanlıs,bos,iki_cevap) maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if", "def cevap_contour(col1,col2,col3,col4): col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord=cevap_contour_bul(col1,col1_gri) col2_coord=cevap_contour_bul(col2,col1_gri) col3_coord=cevap_contour_bul(col3,col1_gri) col4_coord=cevap_contour_bul(col4,col1_gri) return col1_coord,col2_coord,col3_coord,col4_coord def", "#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) elif abs(x_coords[-1][1]-y)>=35: coord.append(approx) x_coords=[(0,0)] sayac+=1 x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2)", "approx=cv2.approxPolyDP(c,0.009*cv2.arcLength(c,True),True) if cv2.contourArea(approx)>10050 and len(approx)==4: a+=1 M=cv2.moments(approx) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) #areas.append([a,cv2.contourArea(approx)])", "def cevap_gri(col1,col2,col3,col4): ''' KOLONLARI GRİ YAPMAK İÇİN,MAİNDE YER KAPLAMASIN '''", "import four_point_transform,order_points import imutils cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4} #, alfabe={0:'A',1:'B',2:'C',3:'Ç',4:'D',5:'E',6:'F',7:'G',8:'Ğ',9:'H',10:'I',11:'İ',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'Ö',19:'P',20:'Q',21:'R',22:'S',23:'Ş',24:'T',25:'U',26:'Ü',27:'V',28:'W',29:'Y',30:'Z',31:'X'} def cevap_islemleri(isim,coords): a=0", "sayac+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if", "if len(cevap_anahtar)<=30: basarim=cevap_islemleri(col1_gri_dolu,col1_coord,1) elif len(cevap_anahtar)<=60: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim=(basarim1[0]+basarim2[0],basarim1[1]+basarim2[1],basarim1[2]+basarim2[2],basarim1[3]+basarim2[3]) #print(basarim) elif", "''' ogretim_dolu=four_point_transform(warp2,bolgeler2['ogretim_onay']) ogretim_dolu_gri=cv2.cvtColor(ogretim_dolu,cv2.COLOR_BGR2GRAY) ogret_onay=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogret_cont) print(\"Öğretim Onayı:\",ogret_onay) #cv2.drawContours(ogretim_dolu,ogret_cont,-1,(255,0,0),thickness=3) #cv2.imshow(\"ogretc\",ogretim_dolu) #ogretim_onayı=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogretimonay_coord) sorugrup_dolu=four_point_transform(warp2,bolgeler2['soru_grubu'])", "in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"top-to-bottom\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8)", "cevap=(toplam_beyaz,j,q_no+s) if toplam_beyaz>800: say+=1 if say>1: #İKİ ŞIK İŞARETLEME DURUMU", "edilemedi\" #################################################################### def main_starter(bos_kagit,dolu_kagit): image=cv2.imread(bos_kagit) gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) kagit,kagit_gri=kagit_bul(image,gray) bolgeler,areas=bolge_bul(kagit,kagit_gri) ''' FIND", "contour=imutils.grab_contours(contour) contour=sorted(contour,key=cv2.contourArea,reverse=True) for c in contour: approx=cv2.approxPolyDP(c,0.02*cv2.arcLength(c,True),True) if len(approx)==4: #cv2.drawContours(image,[approx],0,(0,255,0),thickness=3)", "cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) def cevap_contour_bul(isim,isim_gri): coord=[] thresholded=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8)", "s in yanit: ad_str+=s return ad_str def cevap_kolon(cevap): pts1=np.array([(2,50),(300,50),(2,1545),(300,1545)]) pts2=np.array([(300,50),(600,50),(302,1545),(602,1545)])", "#contour_cizdir(col1,col1_coord) #cevap_islemleri(col2_gri,coord_cevap) ''' EXAM TYPE FIND PART ''' sinav_bos=four_point_transform(kagit,bolgeler['sinav_turu']) sinav_bos_gri=four_point_transform(kagit_gri,bolgeler['sinav_turu'])", "toplam_beyaz=None say=0 for (j,c) in enumerate(cnt): if len(cevap_anahtar)<=q_no+s: return (dogru,yanlıs,bos,iki_cevap)", "''' cevap_bos=four_point_transform(kagit,bolgeler['cevaplar']) cevap_bos_gri=four_point_transform(kagit_gri,bolgeler['cevaplar']) col1,col2,col3,col4=cevap_kolon(cevap_bos) col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord,col2_coord,col3_coord,col4_coord=cevap_contour(col1,col2,col3,col4) #contour_cizdir(col1,col1_coord) #cevap_islemleri(col2_gri,coord_cevap) ''' EXAM", "BUL ''' if ret==True: warp2_gri=cv2.cvtColor(warp2,cv2.COLOR_BGR2GRAY) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) else: pass isim_dolu=four_point_transform(warp2,bolgeler2['isim']) isim_dolu_gri=cv2.cvtColor(isim_dolu,cv2.COLOR_BGR2GRAY)", "cevap_gri(col1,col2,col3,col4): ''' KOLONLARI GRİ YAPMAK İÇİN,MAİNDE YER KAPLAMASIN ''' col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY)", "x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) #areas.append([a,cv2.contourArea(approx)]) #cv2.putText(resim,\"{}\".format(a),(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=4,color=(255,0,0),thickness=3) temp.append(approx.reshape(4,2)) areas.append([a,cv2.contourArea(approx)]) #cv2.drawContours(resim,[approx],0,(255,0,0),thickness=3) #cv2.imshow(\"resim_olge\",imutils.resize(resim,height=650)) if len(temp)>=5:", "if areas[0][0]!=1 and areas[0][1]+areas[1][1]>2300000: kagit=imutils.rotate(kagit,angle=180) print(\"Kağıdı ters koymuşsunuz,çevrildi\") ret=True return", "len(cevap_anahtar)<=120: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim4=cevap_islemleri(col4_gri_dolu,col4_coord,4) basarim=basarim1+basarim2+basarim3+basarim4 print(f\"Doğru cevap sayısı:{basarim[0]}\\nYanlış cevap", "THAT ON PAPER ''' sorugrup_bos=four_point_transform(kagit,bolgeler['soru_grubu']) sorugrup_bos_gri=four_point_transform(kagit_gri,bolgeler['soru_grubu']) sorugrup_coord,sorugrup_thresh=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) coors=soru_grup_contour_bul(sorugrup_bos,sorugrup_bos_gri) soru_cont,soru_thr=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) ###############################", "as np from imutils import contours from imutils.perspective import four_point_transform", "return ret,kagit def kagit_bul(image,gray): thr=cv2.threshold(gray,150,255,cv2.THRESH_BINARY)[1] contour=cv2.findContours(thr,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) contour=imutils.grab_contours(contour) contour=sorted(contour,key=cv2.contourArea,reverse=True) for c", "if cevap[0]>500: return yanit[cevap[1]] #print(\"tespit edilemedi\") return \"Tespit edilemedi\" ####################################################################", "YAPMAK İÇİN,MAİNDE YER KAPLAMASIN ''' col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY) col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY) col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY) col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY) return", "(x, y, w, h) = cv2.boundingRect(approx) ar = w /", "c in cont: cv2.drawContours(resim,[c],0,(0,255,0),thickness=4) #print(f\"Bulunan contour sayısı: {len(cont)}\") def bolge_bul(resim,gri):", "elif col_no==2: q_no=30 elif col_no==3: q_no=60 elif col_no==4: q_no=90 yanit=[]", "OTHER PARTS THAT ON PAPER ''' sorugrup_bos=four_point_transform(kagit,bolgeler['soru_grubu']) sorugrup_bos_gri=four_point_transform(kagit_gri,bolgeler['soru_grubu']) sorugrup_coord,sorugrup_thresh=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) coors=soru_grup_contour_bul(sorugrup_bos,sorugrup_bos_gri)", "enumerate(np.arange(0,len(coords),5)): cevap=None cnt=contours.sort_contours(coords[i:i+5])[0] toplam_beyaz=None say=0 for (j,c) in enumerate(cnt): if", "cnt=contours.sort_contours(coords[i:i+30])[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) a+=1", "main_starter(bos_kagit,dolu_kagit): image=cv2.imread(bos_kagit) gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) kagit,kagit_gri=kagit_bul(image,gray) bolgeler,areas=bolge_bul(kagit,kagit_gri) ''' FIND SCHOOL NUMBER PART", "or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if cevap[0]>500: yanit+=str(cevap[1]) print(\"Okul Numarası:\",yanit) def sinav_islemleri(sinav,sinav_gri,coords):", "cont=imutils.grab_contours(cont) temp=[] cont=contours.sort_contours(cont,\"top-to-bottom\")[0] a=0 for c in cont: approx=cv2.approxPolyDP(c,0.009*cv2.arcLength(c,True),True) if", "len(approx)==4: a+=1 M=cv2.moments(approx) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) #areas.append([a,cv2.contourArea(approx)]) #cv2.putText(resim,\"{}\".format(a),(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=4,color=(255,0,0),thickness=3) temp.append(approx.reshape(4,2)) areas.append([a,cv2.contourArea(approx)]) #cv2.drawContours(resim,[approx],0,(255,0,0),thickness=3)", "#cv2.imshow(\"sınav türü\",sinav_bos_gri) ''' OTHER PARTS THAT ON PAPER ''' sorugrup_bos=four_point_transform(kagit,bolgeler['soru_grubu'])", "areas=sorted(areas,key=lambda x:x[1],reverse=True) return bolgeler,areas def cevap_islemleri(cevap,coords,col_no=1): iki_cevap=0 bos=0 dogru=0 q_no=0", "maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]:", "var else: pass return False def contour_bul(isim,isim_gri,karmasiklik=0): coord=[] thr6=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) #thr6=cv2.threshold(isim_gri,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]", "y=int(M['m01']/M['m00']) # print(x,y) res=tekrar_bul(x_coords,x) if res is False and abs(x_coords[-1][1]-y)<35:", "# ar_value=800 cont=cv2.findContours(thr6,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 cont=imutils.grab_contours(cont) cont=contours.sort_contours(cont,method=\"top-to-bottom\")[0] for c in", "continue else: if cevap_anahtar[q_no+s]== cevap[1]: #print(cevap_anahtar[q_no+s],cevap[1]) dogru+=1 else: yanlıs+=1 '''", "maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap is None or", "tekrar_bul(array,koordinat): for c in array: if koordinat==c[0] or abs(koordinat-c[0])<15: return", "if say>1: #İKİ ŞIK İŞARETLEME DURUMU iki_cevap+=1 continue elif cevap[0]<800:#", "matplotlib.pyplot as plt import numpy as np from imutils import", "#print(areas[0][0]) if areas[0][0]!=1 and areas[0][1]+areas[1][1]>2300000: kagit=imutils.rotate(kagit,angle=180) print(\"Kağıdı ters koymuşsunuz,çevrildi\") ret=True", "if cv2.contourArea(approx)>10050 and len(approx)==4: a+=1 M=cv2.moments(approx) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) #areas.append([a,cv2.contourArea(approx)]) #cv2.putText(resim,\"{}\".format(a),(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=4,color=(255,0,0),thickness=3)", "return 0 def tekrar_bul(array,koordinat): for c in array: if koordinat==c[0]", "import imutils cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4} #, alfabe={0:'A',1:'B',2:'C',3:'Ç',4:'D',5:'E',6:'F',7:'G',8:'Ğ',9:'H',10:'I',11:'İ',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'Ö',19:'P',20:'Q',21:'R',22:'S',23:'Ş',24:'T',25:'U',26:'Ü',27:'V',28:'W',29:'Y',30:'Z',31:'X'} def cevap_islemleri(isim,coords): a=0 thresh=cv2.threshold(isim,179,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0]", "if area<1300 and area>300 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box)", "toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if cevap[0]>500:", "isim_bos_gri) #contour_cizdir(isim_bos,coord,\"isim_bos\") #cevap_islemleri(cevap_bos_gri,coord) ############################################## resim=cv2.imread(dolu_kagit) resim_gri=cv2.cvtColor(resim,cv2.COLOR_BGR2GRAY) warp2,warp2_gri=kagit_bul(resim,resim_gri) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) ret,warp2=ters_bul(warp2,areas2) '''", "approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w, h) = cv2.boundingRect(approx) ar =", "ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) if cv2.contourArea(box)>150: coords.append(approx) cv2.drawContours(resim,[box],0,(0,0,255),thickness=3)", "in enumerate(np.arange(0,len(coords),5)): cevap=None cnt=contours.sort_contours(coords[i:i+5])[0] toplam_beyaz=None say=0 for (j,c) in enumerate(cnt):", "thr6=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) #thr6=cv2.threshold(isim_gri,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1] ar_value=200 #if karmasiklik==1: # ar_value=800 cont=cv2.findContours(thr6,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0", "KAPLAMASIN ''' col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY) col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY) col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY) col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY) return col1_gri,col2_gri,col3_gri,col4_gri def cevap_contour(col1,col2,col3,col4):", "bolgeler,areas=bolge_bul(kagit,kagit_gri) ''' FIND SCHOOL NUMBER PART ''' ogrno_bos=four_point_transform(kagit,bolgeler['ogrno']) ogrno_bos_gri=four_point_transform(kagit_gri,bolgeler['ogrno']) ogrno_coord,ogrno_thresh=contour_bul(ogrno_bos,ogrno_bos_gri)", "PART. ''' isim_bos=four_point_transform(kagit,bolgeler['isim']) isim_bos_gri=cv2.cvtColor(isim_bos,cv2.COLOR_BGR2GRAY) coord_isim, thres=contour_bul(isim_bos, isim_bos_gri) #contour_cizdir(isim_bos,coord,\"isim_bos\") #cevap_islemleri(cevap_bos_gri,coord) ##############################################", "and areas[0][1]+areas[1][1]>2300000: kagit=imutils.rotate(kagit,angle=180) print(\"Kağıdı ters koymuşsunuz,çevrildi\") ret=True return ret,kagit else:", "yanit=[] #cevap=cv2.cvtColor(cevap,cv2.COLOR_BGR2GRAY) thresh=cv2.threshold(cevap,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),5)): cevap=None cnt=contours.sort_contours(coords[i:i+5])[0]", "in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() sayac+=1 toplam_beyaz=cv2.countNonZero(maske) if", "cv2.boundingRect(approx) ar = w / float(h) if cv2.contourArea(c)>30 and ar>=0.9", "in contour: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w, h) = cv2.boundingRect(approx)", "isim_str=isim_islemleri(isim_dolu_gri,coord_isim,thresh_dolu) print(isim_str) sinav_dolu=four_point_transform(warp2,bolgeler2['sinav_turu']) sinav_dolu_gri=cv2.cvtColor(sinav_dolu,cv2.COLOR_BGR2GRAY) sinav_turu=sinav_islemleri(sinav_dolu,sinav_dolu_gri,sinav_coord) print(\"Sınav Türü: \",sinav_turu) ogrno_dolu=four_point_transform(warp2,bolgeler2['ogrno']) ogrno_dolu_gri=cv2.cvtColor(ogrno_dolu,cv2.COLOR_BGR2GRAY)", "q_no=0 yanlıs=0 if col_no==1: pass elif col_no==2: q_no=30 elif col_no==3:", "in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"left-to-right\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8)", "''' sorugrup_bos=four_point_transform(kagit,bolgeler['soru_grubu']) sorugrup_bos_gri=four_point_transform(kagit_gri,bolgeler['soru_grubu']) sorugrup_coord,sorugrup_thresh=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) coors=soru_grup_contour_bul(sorugrup_bos,sorugrup_bos_gri) soru_cont,soru_thr=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) ############################### ogretim_bos=four_point_transform(kagit,bolgeler['ogretim_onay']) ogretim_bos_gri=four_point_transform(kagit_gri,bolgeler['ogretim_onay']) ogret_cont,ogret_thr=contour_bul(ogretim_bos,ogretim_bos_gri,1)", "alfabe={0:'A',1:'B',2:'C',3:'Ç',4:'D',5:'E',6:'F',7:'G',8:'Ğ',9:'H',10:'I',11:'İ',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'Ö',19:'P',20:'Q',21:'R',22:'S',23:'Ş',24:'T',25:'U',26:'Ü',27:'V',28:'W',29:'Y',30:'Z',31:'X'} def cevap_islemleri(isim,coords): a=0 thresh=cv2.threshold(isim,179,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),20)):", "for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) a+=1 toplam_beyaz=cv2.countNonZero(maske) if", "return \"Tespit edilemedi\" #################################################################### def main_starter(bos_kagit,dolu_kagit): image=cv2.imread(bos_kagit) gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) kagit,kagit_gri=kagit_bul(image,gray) bolgeler,areas=bolge_bul(kagit,kagit_gri)", "or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) def cevap_contour_bul(isim,isim_gri): coord=[] thresholded=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) contour=cv2.findContours(thresholded,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0", "abs(koordinat-c[0])<15: return True #Tekrar var else: pass return False def", "in enumerate(np.arange(0,len(coords),32)): cevap=None cnt=contours.sort_contours(coords[i:i+32],method=\"top-to-bottom\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8)", "c in contour: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w, h) =", "cv2.boundingRect(approx) ar = w / float(h) if area<1300 and area>300", "#plt.show() toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if", "or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if sayac==5: break print(cevap) if cevap[0]>500: return", "contour=cv2.findContours(thresholded,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 contour=imutils.grab_contours(contour) contour=contours.sort_contours(contour,method=\"top-to-bottom\")[0] for c in contour: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True)", "y=int(M['m01']/M['m00']) res=tekrar_bul(x_coords,x) if res is False and abs(x_coords[-1][1]-y)<35: coord.append(approx) x_coords.append((x,y))", "is False and abs(x_coords[-1][1]-y)<35: coord.append(approx) x_coords.append((x,y)) sayac+=1 #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) elif", "pass return False def contour_bul(isim,isim_gri,karmasiklik=0): coord=[] thr6=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) #thr6=cv2.threshold(isim_gri,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1] ar_value=200 #if", "yanit=[] ad_str=\"\" coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i) in enumerate(np.arange(0,len(coords),32)): cevap=None cnt=contours.sort_contours(coords[i:i+32],method=\"top-to-bottom\")[0] toplam_beyaz=None", "AND MARKED MORE THAN 1 ''' return(dogru,yanlıs,bos,iki_cevap) def isim_islemleri(isim,coords,thresh): a=0", "in enumerate(np.arange(0,len(coords),20)): cevap=None cnt=contours.sort_contours(coords[i:i+30])[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8)", "enumerate(cnt): if len(cevap_anahtar)<=q_no+s: return (dogru,yanlıs,bos,iki_cevap) maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show()", "cevap=(toplam_beyaz,j,s) return yanit[cevap[1]] def sorugrup_islemleri(soru,soru_gri,coords): yanit=[\"A\",\"B\",\"C\",\"D\",\"E\"] sayac=0 thresh=cv2.threshold(soru_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for", "in cont: approx=cv2.approxPolyDP(c,0.009*cv2.arcLength(c,True),True) if cv2.contourArea(approx)>10050 and len(approx)==4: a+=1 M=cv2.moments(approx) x=int(M['m10']/M['m00'])", "sinav_dolu=four_point_transform(warp2,bolgeler2['sinav_turu']) sinav_dolu_gri=cv2.cvtColor(sinav_dolu,cv2.COLOR_BGR2GRAY) sinav_turu=sinav_islemleri(sinav_dolu,sinav_dolu_gri,sinav_coord) print(\"Sınav Türü: \",sinav_turu) ogrno_dolu=four_point_transform(warp2,bolgeler2['ogrno']) ogrno_dolu_gri=cv2.cvtColor(ogrno_dolu,cv2.COLOR_BGR2GRAY) ogrno_islemleri(ogrno_dolu,ogrno_dolu_gri,ogrno_coord) cevap_dolu=four_point_transform(warp2,bolgeler2['cevaplar'])", "bolgeler={'isim':temp[0],'ogrno':temp[1],'sinav_turu':temp[2],'soru_grubu':temp[3],'ogretim_onay':temp[4],'cevaplar':temp[5]} areas=sorted(areas,key=lambda x:x[1],reverse=True) return bolgeler,areas def cevap_islemleri(cevap,coords,col_no=1): iki_cevap=0 bos=0 dogru=0", "#thr6=cv2.threshold(isim_gri,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1] ar_value=200 #if karmasiklik==1: # ar_value=800 cont=cv2.findContours(thr6,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 cont=imutils.grab_contours(cont)", "w / float(h) if area<1300 and area>300 and ar>=0.9 and", "cv2.contourArea(box)>150: coords.append(approx) cv2.drawContours(resim,[box],0,(0,0,255),thickness=3) if len(coords)==5: return coords else: return 0", "ret,warp2=ters_bul(warp2,areas2) ''' TERS İSE TEKRAR BOLGELERİ BUL ''' if ret==True:", "koymuşsunuz,çevrildi\") ret=True return ret,kagit else: return ret,kagit def kagit_bul(image,gray): thr=cv2.threshold(gray,150,255,cv2.THRESH_BINARY)[1]", "sorugrup_islemleri(soru,soru_gri,coords): yanit=[\"A\",\"B\",\"C\",\"D\",\"E\"] sayac=0 thresh=cv2.threshold(soru_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None", "#cv2.imshow(\"ogretc\",ogretim_dolu) #ogretim_onayı=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogretimonay_coord) sorugrup_dolu=four_point_transform(warp2,bolgeler2['soru_grubu']) sorugrup_dolu_gri=cv2.cvtColor(sorugrup_dolu,cv2.COLOR_BGR2GRAY) soru_tur=sorugrup_islemleri(sorugrup_dolu,sorugrup_dolu_gri,soru_cont) print(\"Soru Grubu\",soru_tur) thresh_dolu=cv2.threshold(isim_dolu_gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] isim_str=isim_islemleri(isim_dolu_gri,coord_isim,thresh_dolu) print(isim_str)", "sayac+=1 x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return coord,thr6 def contour_cizdir(resim,cont,isim=\"default\"):", "1 ''' return(dogru,yanlıs,bos,iki_cevap) def isim_islemleri(isim,coords,thresh): a=0 yanit=[] ad_str=\"\" coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for", "plt import numpy as np from imutils import contours from", "ret=False #print(areas[0][0]) if areas[0][0]!=1 and areas[0][1]+areas[1][1]>2300000: kagit=imutils.rotate(kagit,angle=180) print(\"Kağıdı ters koymuşsunuz,çevrildi\")", "isim_dolu_gri=cv2.cvtColor(isim_dolu,cv2.COLOR_BGR2GRAY) contour_cizdir(isim_dolu,coord_isim,\"dolu_kagit_contourlu\") ''' OGRETİM ONAY DOLU KAGIT ''' ogretim_dolu=four_point_transform(warp2,bolgeler2['ogretim_onay']) ogretim_dolu_gri=cv2.cvtColor(ogretim_dolu,cv2.COLOR_BGR2GRAY)", "area=cv2.contourArea(approx) (x, y, w, h) = cv2.boundingRect(approx) ar = w", "basarim=cevap_islemleri(col1_gri_dolu,col1_coord,1) elif len(cevap_anahtar)<=60: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim=(basarim1[0]+basarim2[0],basarim1[1]+basarim2[1],basarim1[2]+basarim2[2],basarim1[3]+basarim2[3]) #print(basarim) elif len(cevap_anahtar)<=90: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1)", "yanit=[\"A\",\"B\",\"C\",\"D\",\"E\"] sayac=0 thresh=cv2.threshold(soru_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"left-to-right\")[0]", "TEKRAR BOLGELERİ BUL ''' if ret==True: warp2_gri=cv2.cvtColor(warp2,cv2.COLOR_BGR2GRAY) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) else: pass", "#print(\"tespit edilemedi\") return \"Tespit edilemedi\" #################################################################### def main_starter(bos_kagit,dolu_kagit): image=cv2.imread(bos_kagit) gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)", "array: if koordinat==c[0] or abs(koordinat-c[0])<15: return True #Tekrar var else:", "cnt=contours.sort_contours(coords[i:i+5])[0] toplam_beyaz=None say=0 for (j,c) in enumerate(cnt): if len(cevap_anahtar)<=q_no+s: return", "contour=sorted(contour,key=cv2.contourArea,reverse=True) for c in contour: approx=cv2.approxPolyDP(c,0.02*cv2.arcLength(c,True),True) if len(approx)==4: #cv2.drawContours(image,[approx],0,(0,255,0),thickness=3) break", "col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY) col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY) return col1_gri,col2_gri,col3_gri,col4_gri def cevap_contour(col1,col2,col3,col4): col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord=cevap_contour_bul(col1,col1_gri) col2_coord=cevap_contour_bul(col2,col1_gri) col3_coord=cevap_contour_bul(col3,col1_gri)", "numpy as np from imutils import contours from imutils.perspective import", "image=cv2.imread(bos_kagit) gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) kagit,kagit_gri=kagit_bul(image,gray) bolgeler,areas=bolge_bul(kagit,kagit_gri) ''' FIND SCHOOL NUMBER PART '''", "= w / float(h) if cv2.contourArea(c)>30 and ar>=0.9 and ar<=1.1:", "if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,q_no+s) if toplam_beyaz>800: say+=1", "continue return coord,thr6 def contour_cizdir(resim,cont,isim=\"default\"): for c in cont: cv2.drawContours(resim,[c],0,(0,255,0),thickness=4)", "len(cevap_anahtar)<=60: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim=(basarim1[0]+basarim2[0],basarim1[1]+basarim2[1],basarim1[2]+basarim2[2],basarim1[3]+basarim2[3]) #print(basarim) elif len(cevap_anahtar)<=90: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3)", "len(cevap_anahtar)<=90: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim=basarim1+basarim2+basarim3 elif len(cevap_anahtar)<=120: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3)", "\"Tespit edilemedi\" #################################################################### def main_starter(bos_kagit,dolu_kagit): image=cv2.imread(bos_kagit) gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) kagit,kagit_gri=kagit_bul(image,gray) bolgeler,areas=bolge_bul(kagit,kagit_gri) '''", "def cevap_islemleri(isim,coords): a=0 thresh=cv2.threshold(isim,179,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),20)): cevap=None", "bolgeler={} thr2=cv2.adaptiveThreshold(gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) areas=[] cont=cv2.findContours(thr2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) temp=[] cont=contours.sort_contours(cont,\"top-to-bottom\")[0] a=0 for c", "else: if cevap_anahtar[q_no+s]== cevap[1]: #print(cevap_anahtar[q_no+s],cevap[1]) dogru+=1 else: yanlıs+=1 ''' NUMBER", "for (s,i) in enumerate(np.arange(0,len(coords),20)): cevap=None cnt=contours.sort_contours(coords[i:i+30])[0] toplam_beyaz=None for (j,c) in", "türü\",sinav_bos_gri) ''' OTHER PARTS THAT ON PAPER ''' sorugrup_bos=four_point_transform(kagit,bolgeler['soru_grubu']) sorugrup_bos_gri=four_point_transform(kagit_gri,bolgeler['soru_grubu'])", "approx=cv2.approxPolyDP(c,0.02*cv2.arcLength(c,True),True) if len(approx)==4: #cv2.drawContours(image,[approx],0,(0,255,0),thickness=3) break warp=four_point_transform(image,approx.reshape(4,2)) warp_gri=four_point_transform(gray,approx.reshape(4,2)) return warp,warp_gri def", "col_no==3: q_no=60 elif col_no==4: q_no=90 yanit=[] #cevap=cv2.cvtColor(cevap,cv2.COLOR_BGR2GRAY) thresh=cv2.threshold(cevap,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for", "cevap=(toplam_beyaz,j,s) # print(\"cevap\",cevap) if cevap[0]>500: yanit.append(alfabe[cevap[1]]) elif cevap[0]<600: yanit.append(\" \")", "cnt=contours.sort_contours(coords[i:i+10],method=\"top-to-bottom\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray')", "ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) if cv2.contourArea(box)>150: coords.append(approx) cv2.drawContours(resim,[box],0,(0,0,255),thickness=3) if len(coords)==5:", "#cevap_islemleri(col2_gri,coord_cevap) ''' EXAM TYPE FIND PART ''' sinav_bos=four_point_transform(kagit,bolgeler['sinav_turu']) sinav_bos_gri=four_point_transform(kagit_gri,bolgeler['sinav_turu']) sinav_coord,sinav_thresh=contour_bul(sinav_bos,sinav_bos_gri)", "basarim4=cevap_islemleri(col4_gri_dolu,col4_coord,4) basarim=basarim1+basarim2+basarim3+basarim4 print(f\"Doğru cevap sayısı:{basarim[0]}\\nYanlış cevap sayısı:{basarim[1]}\\nBoş sayısı:{basarim[2]}\\nİki cevap işaret:{basarim[3]}\")", "= cv2.boundingRect(approx) ar = w / float(h) if area<1500 and", "None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) # print(\"cevap\",cevap) if cevap[0]>500: yanit.append(alfabe[cevap[1]]) elif", "isim_bos_gri=cv2.cvtColor(isim_bos,cv2.COLOR_BGR2GRAY) coord_isim, thres=contour_bul(isim_bos, isim_bos_gri) #contour_cizdir(isim_bos,coord,\"isim_bos\") #cevap_islemleri(cevap_bos_gri,coord) ############################################## resim=cv2.imread(dolu_kagit) resim_gri=cv2.cvtColor(resim,cv2.COLOR_BGR2GRAY) warp2,warp2_gri=kagit_bul(resim,resim_gri)", "break warp=four_point_transform(image,approx.reshape(4,2)) warp_gri=four_point_transform(gray,approx.reshape(4,2)) return warp,warp_gri def soru_grup_contour_bul(resim,gri): thr2=cv2.threshold(gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] can=cv2.Canny(thr2,50,100) can=cv2.dilate(can,None,iterations=3)", "True #Tekrar var else: pass return False def contour_bul(isim,isim_gri,karmasiklik=0): coord=[]", "sorugrup_coord,sorugrup_thresh=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) coors=soru_grup_contour_bul(sorugrup_bos,sorugrup_bos_gri) soru_cont,soru_thr=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) ############################### ogretim_bos=four_point_transform(kagit,bolgeler['ogretim_onay']) ogretim_bos_gri=four_point_transform(kagit_gri,bolgeler['ogretim_onay']) ogret_cont,ogret_thr=contour_bul(ogretim_bos,ogretim_bos_gri,1) ''' NAME FIND", "cevap sayısı:{basarim[1]}\\nBoş sayısı:{basarim[2]}\\nİki cevap işaret:{basarim[3]}\") cv2.waitKey() cv2.destroyAllWindows() if __name__ ==", "col4=four_point_transform(cevap,pts4) return col1,col2,col3,col4 def cevap_gri(col1,col2,col3,col4): ''' KOLONLARI GRİ YAPMAK İÇİN,MAİNDE", "cevap=None cnt=contours.sort_contours(coords[i:i+5])[0] toplam_beyaz=None say=0 for (j,c) in enumerate(cnt): if len(cevap_anahtar)<=q_no+s:", "is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) def cevap_contour_bul(isim,isim_gri): coord=[] thresholded=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) contour=cv2.findContours(thresholded,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)", "if len(coords)==5: return coords else: return 0 def tekrar_bul(array,koordinat): for", "print(\"Öğretim Onayı:\",ogret_onay) #cv2.drawContours(ogretim_dolu,ogret_cont,-1,(255,0,0),thickness=3) #cv2.imshow(\"ogretc\",ogretim_dolu) #ogretim_onayı=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogretimonay_coord) sorugrup_dolu=four_point_transform(warp2,bolgeler2['soru_grubu']) sorugrup_dolu_gri=cv2.cvtColor(sorugrup_dolu,cv2.COLOR_BGR2GRAY) soru_tur=sorugrup_islemleri(sorugrup_dolu,sorugrup_dolu_gri,soru_cont) print(\"Soru Grubu\",soru_tur)", "= cv2.boundingRect(approx) ar = w / float(h) if cv2.contourArea(c)>30 and", "areas[0][0]!=1 and areas[0][1]+areas[1][1]>2300000: kagit=imutils.rotate(kagit,angle=180) print(\"Kağıdı ters koymuşsunuz,çevrildi\") ret=True return ret,kagit", "a+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) def", "EXAM TYPE FIND PART ''' sinav_bos=four_point_transform(kagit,bolgeler['sinav_turu']) sinav_bos_gri=four_point_transform(kagit_gri,bolgeler['sinav_turu']) sinav_coord,sinav_thresh=contour_bul(sinav_bos,sinav_bos_gri) sinav_islemleri(sinav_bos,sinav_bos_gri,sinav_coord) #cv2.imshow(\"sınav", "or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) # print(\"cevap\",cevap) if cevap[0]>500: yanit.append(alfabe[cevap[1]]) elif cevap[0]<600:", "return ret,kagit else: return ret,kagit def kagit_bul(image,gray): thr=cv2.threshold(gray,150,255,cv2.THRESH_BINARY)[1] contour=cv2.findContours(thr,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) contour=imutils.grab_contours(contour)", "thr2=cv2.threshold(gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] can=cv2.Canny(thr2,50,100) can=cv2.dilate(can,None,iterations=3) coords=[] cont=cv2.findContours(can,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) for c in cont:", "toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) #plt.imshow(maske,cmap='gray') #plt.show()", "ŞIK İŞARETLEME DURUMU iki_cevap+=1 continue elif cevap[0]<800:# BOŞ BIRAKMA DURUMU", "ad_str=\"\" coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i) in enumerate(np.arange(0,len(coords),32)): cevap=None cnt=contours.sort_contours(coords[i:i+32],method=\"top-to-bottom\")[0] toplam_beyaz=None for", "#cv2.drawContours(image,[approx],0,(0,255,0),thickness=3) break warp=four_point_transform(image,approx.reshape(4,2)) warp_gri=four_point_transform(gray,approx.reshape(4,2)) return warp,warp_gri def soru_grup_contour_bul(resim,gri): thr2=cv2.threshold(gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] can=cv2.Canny(thr2,50,100)", "YER KAPLAMASIN ''' col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY) col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY) col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY) col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY) return col1_gri,col2_gri,col3_gri,col4_gri def", "ogretim_bos=four_point_transform(kagit,bolgeler['ogretim_onay']) ogretim_bos_gri=four_point_transform(kagit_gri,bolgeler['ogretim_onay']) ogret_cont,ogret_thr=contour_bul(ogretim_bos,ogretim_bos_gri,1) ''' NAME FIND PART. ''' isim_bos=four_point_transform(kagit,bolgeler['isim']) isim_bos_gri=cv2.cvtColor(isim_bos,cv2.COLOR_BGR2GRAY)", "#cevap=cv2.cvtColor(cevap,cv2.COLOR_BGR2GRAY) thresh=cv2.threshold(cevap,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),5)): cevap=None cnt=contours.sort_contours(coords[i:i+5])[0] toplam_beyaz=None", "(s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"left-to-right\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt):", "for c in contour: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w, h)", "def sinav_islemleri(sinav,sinav_gri,coords): yanit=[\"QUİZ\",\"ARA\",\"FİNAL\",\"BÜTÜNLEME\"] thresh=cv2.threshold(sinav_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None", "thresh=cv2.threshold(soru_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"left-to-right\")[0] toplam_beyaz=None for", "return(dogru,yanlıs,bos,iki_cevap) def isim_islemleri(isim,coords,thresh): a=0 yanit=[] ad_str=\"\" coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i) in", "basarim=(basarim1[0]+basarim2[0],basarim1[1]+basarim2[1],basarim1[2]+basarim2[2],basarim1[3]+basarim2[3]) #print(basarim) elif len(cevap_anahtar)<=90: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim=basarim1+basarim2+basarim3 elif len(cevap_anahtar)<=120:", "four_point_transform,order_points import imutils cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4} #, alfabe={0:'A',1:'B',2:'C',3:'Ç',4:'D',5:'E',6:'F',7:'G',8:'Ğ',9:'H',10:'I',11:'İ',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'Ö',19:'P',20:'Q',21:'R',22:'S',23:'Ş',24:'T',25:'U',26:'Ü',27:'V',28:'W',29:'Y',30:'Z',31:'X'} def cevap_islemleri(isim,coords): a=0 thresh=cv2.threshold(isim,179,255,cv2.THRESH_BINARY_INV)[1]", "#if karmasiklik==1: # ar_value=800 cont=cv2.findContours(thr6,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 cont=imutils.grab_contours(cont) cont=contours.sort_contours(cont,method=\"top-to-bottom\")[0] for", "/ float(h) if cv2.contourArea(c)>30 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box)", "len(temp)>=5: bolgeler={'isim':temp[0],'ogrno':temp[1],'sinav_turu':temp[2],'soru_grubu':temp[3],'ogretim_onay':temp[4],'cevaplar':temp[5]} areas=sorted(areas,key=lambda x:x[1],reverse=True) return bolgeler,areas def cevap_islemleri(cevap,coords,col_no=1): iki_cevap=0 bos=0", "can=cv2.dilate(can,None,iterations=3) coords=[] cont=cv2.findContours(can,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) for c in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx)", "contour=imutils.grab_contours(contour) contour=contours.sort_contours(contour,method=\"top-to-bottom\")[0] for c in contour: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y,", "if koordinat==c[0] or abs(koordinat-c[0])<15: return True #Tekrar var else: pass", "sinav_bos_gri=four_point_transform(kagit_gri,bolgeler['sinav_turu']) sinav_coord,sinav_thresh=contour_bul(sinav_bos,sinav_bos_gri) sinav_islemleri(sinav_bos,sinav_bos_gri,sinav_coord) #cv2.imshow(\"sınav türü\",sinav_bos_gri) ''' OTHER PARTS THAT ON", "numarası\") #v2.imshow(\"ogrno\",imutils.resize(ogrno_bos,height=400)) ''' DIVIDE ANSWER PART INTO 4 SLICES AND", "M=cv2.moments(approx) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) #areas.append([a,cv2.contourArea(approx)]) #cv2.putText(resim,\"{}\".format(a),(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=4,color=(255,0,0),thickness=3) temp.append(approx.reshape(4,2)) areas.append([a,cv2.contourArea(approx)]) #cv2.drawContours(resim,[approx],0,(255,0,0),thickness=3) #cv2.imshow(\"resim_olge\",imutils.resize(resim,height=650)) if", "can=cv2.Canny(thr2,50,100) can=cv2.dilate(can,None,iterations=3) coords=[] cont=cv2.findContours(can,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) for c in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True)", "def soru_grup_contour_bul(resim,gri): thr2=cv2.threshold(gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] can=cv2.Canny(thr2,50,100) can=cv2.dilate(can,None,iterations=3) coords=[] cont=cv2.findContours(can,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) for c", "cont=cv2.findContours(thr2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) temp=[] cont=contours.sort_contours(cont,\"top-to-bottom\")[0] a=0 for c in cont: approx=cv2.approxPolyDP(c,0.009*cv2.arcLength(c,True),True)", "contour sayısı: {len(cont)}\") def bolge_bul(resim,gri): bolgeler={} thr2=cv2.adaptiveThreshold(gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) areas=[] cont=cv2.findContours(thr2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont)", "basarim=basarim1+basarim2+basarim3+basarim4 print(f\"Doğru cevap sayısı:{basarim[0]}\\nYanlış cevap sayısı:{basarim[1]}\\nBoş sayısı:{basarim[2]}\\nİki cevap işaret:{basarim[3]}\") cv2.waitKey()", "and len(approx)==4: a+=1 M=cv2.moments(approx) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) #areas.append([a,cv2.contourArea(approx)]) #cv2.putText(resim,\"{}\".format(a),(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=4,color=(255,0,0),thickness=3) temp.append(approx.reshape(4,2)) areas.append([a,cv2.contourArea(approx)])", "continue elif cevap[0]<800:# BOŞ BIRAKMA DURUMU bos+=1 continue else: if", "None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,q_no+s) if toplam_beyaz>800: say+=1 if say>1: #İKİ", "print(f\"Doğru cevap sayısı:{basarim[0]}\\nYanlış cevap sayısı:{basarim[1]}\\nBoş sayısı:{basarim[2]}\\nİki cevap işaret:{basarim[3]}\") cv2.waitKey() cv2.destroyAllWindows()", "kagit,kagit_gri=kagit_bul(image,gray) bolgeler,areas=bolge_bul(kagit,kagit_gri) ''' FIND SCHOOL NUMBER PART ''' ogrno_bos=four_point_transform(kagit,bolgeler['ogrno']) ogrno_bos_gri=four_point_transform(kagit_gri,bolgeler['ogrno'])", "import cv2 import matplotlib.pyplot as plt import numpy as np", "q_no=60 elif col_no==4: q_no=90 yanit=[] #cevap=cv2.cvtColor(cevap,cv2.COLOR_BGR2GRAY) thresh=cv2.threshold(cevap,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i)", "sorugrup_bos=four_point_transform(kagit,bolgeler['soru_grubu']) sorugrup_bos_gri=four_point_transform(kagit_gri,bolgeler['soru_grubu']) sorugrup_coord,sorugrup_thresh=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) coors=soru_grup_contour_bul(sorugrup_bos,sorugrup_bos_gri) soru_cont,soru_thr=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) ############################### ogretim_bos=four_point_transform(kagit,bolgeler['ogretim_onay']) ogretim_bos_gri=four_point_transform(kagit_gri,bolgeler['ogretim_onay']) ogret_cont,ogret_thr=contour_bul(ogretim_bos,ogretim_bos_gri,1) '''", "bolge_bul(resim,gri): bolgeler={} thr2=cv2.adaptiveThreshold(gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) areas=[] cont=cv2.findContours(thr2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) temp=[] cont=contours.sort_contours(cont,\"top-to-bottom\")[0] a=0 for", "cevap=None cnt=contours.sort_contours(coords[i:i+30])[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske)", "col_no==2: q_no=30 elif col_no==3: q_no=60 elif col_no==4: q_no=90 yanit=[] #cevap=cv2.cvtColor(cevap,cv2.COLOR_BGR2GRAY)", "#, alfabe={0:'A',1:'B',2:'C',3:'Ç',4:'D',5:'E',6:'F',7:'G',8:'Ğ',9:'H',10:'I',11:'İ',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'Ö',19:'P',20:'Q',21:'R',22:'S',23:'Ş',24:'T',25:'U',26:'Ü',27:'V',28:'W',29:'Y',30:'Z',31:'X'} def cevap_islemleri(isim,coords): a=0 thresh=cv2.threshold(isim,179,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in", "cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4} #, alfabe={0:'A',1:'B',2:'C',3:'Ç',4:'D',5:'E',6:'F',7:'G',8:'Ğ',9:'H',10:'I',11:'İ',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'Ö',19:'P',20:'Q',21:'R',22:'S',23:'Ş',24:'T',25:'U',26:'Ü',27:'V',28:'W',29:'Y',30:'Z',31:'X'} def cevap_islemleri(isim,coords): a=0 thresh=cv2.threshold(isim,179,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i)", "DURUMU bos+=1 continue else: if cevap_anahtar[q_no+s]== cevap[1]: #print(cevap_anahtar[q_no+s],cevap[1]) dogru+=1 else:", "cevap[0]>500: yanit+=str(cevap[1]) print(\"Okul Numarası:\",yanit) def sinav_islemleri(sinav,sinav_gri,coords): yanit=[\"QUİZ\",\"ARA\",\"FİNAL\",\"BÜTÜNLEME\"] thresh=cv2.threshold(sinav_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for", "isim_islemleri(isim,coords,thresh): a=0 yanit=[] ad_str=\"\" coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i) in enumerate(np.arange(0,len(coords),32)): cevap=None", "sorugrup_dolu_gri=cv2.cvtColor(sorugrup_dolu,cv2.COLOR_BGR2GRAY) soru_tur=sorugrup_islemleri(sorugrup_dolu,sorugrup_dolu_gri,soru_cont) print(\"Soru Grubu\",soru_tur) thresh_dolu=cv2.threshold(isim_dolu_gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] isim_str=isim_islemleri(isim_dolu_gri,coord_isim,thresh_dolu) print(isim_str) sinav_dolu=four_point_transform(warp2,bolgeler2['sinav_turu']) sinav_dolu_gri=cv2.cvtColor(sinav_dolu,cv2.COLOR_BGR2GRAY) sinav_turu=sinav_islemleri(sinav_dolu,sinav_dolu_gri,sinav_coord)", "PART ''' sinav_bos=four_point_transform(kagit,bolgeler['sinav_turu']) sinav_bos_gri=four_point_transform(kagit_gri,bolgeler['sinav_turu']) sinav_coord,sinav_thresh=contour_bul(sinav_bos,sinav_bos_gri) sinav_islemleri(sinav_bos,sinav_bos_gri,sinav_coord) #cv2.imshow(\"sınav türü\",sinav_bos_gri) ''' OTHER", "else: pass return False def contour_bul(isim,isim_gri,karmasiklik=0): coord=[] thr6=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) #thr6=cv2.threshold(isim_gri,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1] ar_value=200", "enumerate(np.arange(0,len(coords),32)): cevap=None cnt=contours.sort_contours(coords[i:i+32],method=\"top-to-bottom\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)", "# print(x,y) res=tekrar_bul(x_coords,x) if res is False and abs(x_coords[-1][1]-y)<35: coord.append(approx)", "kagit_bul(image,gray): thr=cv2.threshold(gray,150,255,cv2.THRESH_BINARY)[1] contour=cv2.findContours(thr,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) contour=imutils.grab_contours(contour) contour=sorted(contour,key=cv2.contourArea,reverse=True) for c in contour: approx=cv2.approxPolyDP(c,0.02*cv2.arcLength(c,True),True)", "if len(approx)==4: #cv2.drawContours(image,[approx],0,(0,255,0),thickness=3) break warp=four_point_transform(image,approx.reshape(4,2)) warp_gri=four_point_transform(gray,approx.reshape(4,2)) return warp,warp_gri def soru_grup_contour_bul(resim,gri):", "cevap sayısı:{basarim[0]}\\nYanlış cevap sayısı:{basarim[1]}\\nBoş sayısı:{basarim[2]}\\nİki cevap işaret:{basarim[3]}\") cv2.waitKey() cv2.destroyAllWindows() if", "#cv2.drawContours(resim,[approx],0,(255,0,0),thickness=3) #cv2.imshow(\"resim_olge\",imutils.resize(resim,height=650)) if len(temp)>=5: bolgeler={'isim':temp[0],'ogrno':temp[1],'sinav_turu':temp[2],'soru_grubu':temp[3],'ogretim_onay':temp[4],'cevaplar':temp[5]} areas=sorted(areas,key=lambda x:x[1],reverse=True) return bolgeler,areas def", "def tekrar_bul(array,koordinat): for c in array: if koordinat==c[0] or abs(koordinat-c[0])<15:", "cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) #plt.imshow(maske,cmap='gray') #plt.show() #a+=1 toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap is", "for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() sayac+=1", "ON PAPER ''' sorugrup_bos=four_point_transform(kagit,bolgeler['soru_grubu']) sorugrup_bos_gri=four_point_transform(kagit_gri,bolgeler['soru_grubu']) sorugrup_coord,sorugrup_thresh=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) coors=soru_grup_contour_bul(sorugrup_bos,sorugrup_bos_gri) soru_cont,soru_thr=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) ############################### ogretim_bos=four_point_transform(kagit,bolgeler['ogretim_onay'])", "warp2_gri=cv2.cvtColor(warp2,cv2.COLOR_BGR2GRAY) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) else: pass isim_dolu=four_point_transform(warp2,bolgeler2['isim']) isim_dolu_gri=cv2.cvtColor(isim_dolu,cv2.COLOR_BGR2GRAY) contour_cizdir(isim_dolu,coord_isim,\"dolu_kagit_contourlu\") ''' OGRETİM ONAY", "return warp,warp_gri def soru_grup_contour_bul(resim,gri): thr2=cv2.threshold(gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] can=cv2.Canny(thr2,50,100) can=cv2.dilate(can,None,iterations=3) coords=[] cont=cv2.findContours(can,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont)", "in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w, h) = cv2.boundingRect(approx)", "FIND PART. ''' isim_bos=four_point_transform(kagit,bolgeler['isim']) isim_bos_gri=cv2.cvtColor(isim_bos,cv2.COLOR_BGR2GRAY) coord_isim, thres=contour_bul(isim_bos, isim_bos_gri) #contour_cizdir(isim_bos,coord,\"isim_bos\") #cevap_islemleri(cevap_bos_gri,coord)", "area>300 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00'])", "enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"left-to-right\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)", "is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if sayac==5: break print(cevap) if", "for c in cont: cv2.drawContours(resim,[c],0,(0,255,0),thickness=4) #print(f\"Bulunan contour sayısı: {len(cont)}\") def", "#plt.show() sayac+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s)", "if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) # print(\"cevap\",cevap) if", "bos=0 dogru=0 q_no=0 yanlıs=0 if col_no==1: pass elif col_no==2: q_no=30", "for c in contour: approx=cv2.approxPolyDP(c,0.02*cv2.arcLength(c,True),True) if len(approx)==4: #cv2.drawContours(image,[approx],0,(0,255,0),thickness=3) break warp=four_point_transform(image,approx.reshape(4,2))", "''' ogrno_bos=four_point_transform(kagit,bolgeler['ogrno']) ogrno_bos_gri=four_point_transform(kagit_gri,bolgeler['ogrno']) ogrno_coord,ogrno_thresh=contour_bul(ogrno_bos,ogrno_bos_gri) contour_cizdir(ogrno_bos_gri,ogrno_coord,\"ogrenci numarası\") #v2.imshow(\"ogrno\",imutils.resize(ogrno_bos,height=400)) ''' DIVIDE ANSWER", "a=0 thresh=cv2.threshold(isim,179,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),20)): cevap=None cnt=contours.sort_contours(coords[i:i+30])[0] toplam_beyaz=None", "cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,q_no+s) if toplam_beyaz>800: say+=1 if", "c in array: if koordinat==c[0] or abs(koordinat-c[0])<15: return True #Tekrar", "''' EXAM TYPE FIND PART ''' sinav_bos=four_point_transform(kagit,bolgeler['sinav_turu']) sinav_bos_gri=four_point_transform(kagit_gri,bolgeler['sinav_turu']) sinav_coord,sinav_thresh=contour_bul(sinav_bos,sinav_bos_gri) sinav_islemleri(sinav_bos,sinav_bos_gri,sinav_coord)", "ret,kagit else: return ret,kagit def kagit_bul(image,gray): thr=cv2.threshold(gray,150,255,cv2.THRESH_BINARY)[1] contour=cv2.findContours(thr,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) contour=imutils.grab_contours(contour) contour=sorted(contour,key=cv2.contourArea,reverse=True)", "def cevap_islemleri(cevap,coords,col_no=1): iki_cevap=0 bos=0 dogru=0 q_no=0 yanlıs=0 if col_no==1: pass", "GRİ YAPMAK İÇİN,MAİNDE YER KAPLAMASIN ''' col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY) col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY) col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY) col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY)", "#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return coord def ters_bul(kagit,areas): ret=False #print(areas[0][0]) if", "col4_coord=cevap_contour_bul(col4,col1_gri) return col1_coord,col2_coord,col3_coord,col4_coord def ogrno_islemleri(ogrno,ogrno_gri,coords): yanit=\"\" thresh=cv2.threshold(ogrno_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i)", "DURUMU iki_cevap+=1 continue elif cevap[0]<800:# BOŞ BIRAKMA DURUMU bos+=1 continue", "toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) a+=1 toplam_beyaz=cv2.countNonZero(maske)", "if cv2.contourArea(c)>30 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) if", "four_point_transform import imutils import cv2 import matplotlib.pyplot as plt import", "col1_coord,col2_coord,col3_coord,col4_coord=cevap_contour(col1,col2,col3,col4) #contour_cizdir(col1,col1_coord) #cevap_islemleri(col2_gri,coord_cevap) ''' EXAM TYPE FIND PART ''' sinav_bos=four_point_transform(kagit,bolgeler['sinav_turu'])", "TERS İSE TEKRAR BOLGELERİ BUL ''' if ret==True: warp2_gri=cv2.cvtColor(warp2,cv2.COLOR_BGR2GRAY) bolgeler2,areas2=bolge_bul(warp2,warp2_gri)", "yanit+=str(cevap[1]) print(\"Okul Numarası:\",yanit) def sinav_islemleri(sinav,sinav_gri,coords): yanit=[\"QUİZ\",\"ARA\",\"FİNAL\",\"BÜTÜNLEME\"] thresh=cv2.threshold(sinav_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i)", "isim_bos=four_point_transform(kagit,bolgeler['isim']) isim_bos_gri=cv2.cvtColor(isim_bos,cv2.COLOR_BGR2GRAY) coord_isim, thres=contour_bul(isim_bos, isim_bos_gri) #contour_cizdir(isim_bos,coord,\"isim_bos\") #cevap_islemleri(cevap_bos_gri,coord) ############################################## resim=cv2.imread(dolu_kagit) resim_gri=cv2.cvtColor(resim,cv2.COLOR_BGR2GRAY)", "bolgeler2,areas2=bolge_bul(warp2,warp2_gri) ret,warp2=ters_bul(warp2,areas2) ''' TERS İSE TEKRAR BOLGELERİ BUL ''' if", "dogru+=1 else: yanlıs+=1 ''' NUMBER OF TRUE,FALSE,NOT MARKED AND MARKED", "''' OTHER PARTS THAT ON PAPER ''' sorugrup_bos=four_point_transform(kagit,bolgeler['soru_grubu']) sorugrup_bos_gri=four_point_transform(kagit_gri,bolgeler['soru_grubu']) sorugrup_coord,sorugrup_thresh=contour_bul(sorugrup_bos,sorugrup_bos_gri,1)", "say+=1 if say>1: #İKİ ŞIK İŞARETLEME DURUMU iki_cevap+=1 continue elif", "(s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"top-to-bottom\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt):", "cont=imutils.grab_contours(cont) cont=contours.sort_contours(cont,method=\"top-to-bottom\")[0] for c in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y,", "if res is False and abs(x_coords[-1][1]-y)<35: coord.append(approx) x_coords.append((x,y)) sayac+=1 #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3)", "cont=cv2.findContours(can,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) for c in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y,", "res is False and abs(x_coords[-1][1]-y)<35: coord.append(approx) x_coords.append((x,y)) sayac+=1 #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2)", "None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if cevap[0]>500: yanit+=str(cevap[1]) print(\"Okul Numarası:\",yanit) def", "basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim4=cevap_islemleri(col4_gri_dolu,col4_coord,4) basarim=basarim1+basarim2+basarim3+basarim4 print(f\"Doğru cevap sayısı:{basarim[0]}\\nYanlış cevap sayısı:{basarim[1]}\\nBoş", "/ float(h) if area<1300 and area>300 and ar>=0.9 and ar<=1.1:", "a+=1 M=cv2.moments(approx) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) #areas.append([a,cv2.contourArea(approx)]) #cv2.putText(resim,\"{}\".format(a),(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=4,color=(255,0,0),thickness=3) temp.append(approx.reshape(4,2)) areas.append([a,cv2.contourArea(approx)]) #cv2.drawContours(resim,[approx],0,(255,0,0),thickness=3) #cv2.imshow(\"resim_olge\",imutils.resize(resim,height=650))", "return coord,thr6 def contour_cizdir(resim,cont,isim=\"default\"): for c in cont: cv2.drawContours(resim,[c],0,(0,255,0),thickness=4) #print(f\"Bulunan", "#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return coord def ters_bul(kagit,areas): ret=False #print(areas[0][0])", "#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return coord,thr6 def contour_cizdir(resim,cont,isim=\"default\"): for c in", "box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) # print(x,y) res=tekrar_bul(x_coords,x) if res is", "sayısı:{basarim[1]}\\nBoş sayısı:{basarim[2]}\\nİki cevap işaret:{basarim[3]}\") cv2.waitKey() cv2.destroyAllWindows() if __name__ == '__main__':", "coords=[] cont=cv2.findContours(can,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) for c in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x,", "THAN 1 ''' return(dogru,yanlıs,bos,iki_cevap) def isim_islemleri(isim,coords,thresh): a=0 yanit=[] ad_str=\"\" coords=contours.sort_contours(coords,method=\"left-to-right\")[0]", "''' KOLONLARI GRİ YAPMAK İÇİN,MAİNDE YER KAPLAMASIN ''' col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY) col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY)", "toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if cevap[0]>500: yanit+=str(cevap[1]) print(\"Okul Numarası:\",yanit) def sinav_islemleri(sinav,sinav_gri,coords): yanit=[\"QUİZ\",\"ARA\",\"FİNAL\",\"BÜTÜNLEME\"]", "maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) #plt.imshow(maske,cmap='gray') #plt.show() #a+=1 toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap", "contour_bul(isim,isim_gri,karmasiklik=0): coord=[] thr6=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) #thr6=cv2.threshold(isim_gri,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1] ar_value=200 #if karmasiklik==1: # ar_value=800 cont=cv2.findContours(thr6,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)", "ONE ''' cevap_bos=four_point_transform(kagit,bolgeler['cevaplar']) cevap_bos_gri=four_point_transform(kagit_gri,bolgeler['cevaplar']) col1,col2,col3,col4=cevap_kolon(cevap_bos) col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord,col2_coord,col3_coord,col4_coord=cevap_contour(col1,col2,col3,col4) #contour_cizdir(col1,col1_coord) #cevap_islemleri(col2_gri,coord_cevap) '''", "contours from imutils.perspective import four_point_transform,order_points import imutils cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4} #, alfabe={0:'A',1:'B',2:'C',3:'Ç',4:'D',5:'E',6:'F',7:'G',8:'Ğ',9:'H',10:'I',11:'İ',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'Ö',19:'P',20:'Q',21:'R',22:'S',23:'Ş',24:'T',25:'U',26:'Ü',27:'V',28:'W',29:'Y',30:'Z',31:'X'}", "yanit: ad_str+=s return ad_str def cevap_kolon(cevap): pts1=np.array([(2,50),(300,50),(2,1545),(300,1545)]) pts2=np.array([(300,50),(600,50),(302,1545),(602,1545)]) pts3=np.array([(600,50),(900,50),(602,1545),(902,1545)]) pts4=np.array([(900,50),(1200,50),(902,1545),(1202,1545)])", "ar = w / float(h) if cv2.contourArea(c)>30 and ar>=0.9 and", "col_no==1: pass elif col_no==2: q_no=30 elif col_no==3: q_no=60 elif col_no==4:", "# print(\"cevap\",cevap) if cevap[0]>500: yanit.append(alfabe[cevap[1]]) elif cevap[0]<600: yanit.append(\" \") for", "for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"top-to-bottom\")[0] toplam_beyaz=None for (j,c) in", "''' NUMBER OF TRUE,FALSE,NOT MARKED AND MARKED MORE THAN 1", "c in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w, h) =", "cv2.contourArea(approx)>10050 and len(approx)==4: a+=1 M=cv2.moments(approx) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) #areas.append([a,cv2.contourArea(approx)]) #cv2.putText(resim,\"{}\".format(a),(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=4,color=(255,0,0),thickness=3) temp.append(approx.reshape(4,2))", "if toplam_beyaz>800: say+=1 if say>1: #İKİ ŞIK İŞARETLEME DURUMU iki_cevap+=1", "in contour: approx=cv2.approxPolyDP(c,0.02*cv2.arcLength(c,True),True) if len(approx)==4: #cv2.drawContours(image,[approx],0,(0,255,0),thickness=3) break warp=four_point_transform(image,approx.reshape(4,2)) warp_gri=four_point_transform(gray,approx.reshape(4,2)) return", "''' DIVIDE ANSWER PART INTO 4 SLICES AND FIND ONE", "elif len(cevap_anahtar)<=60: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim=(basarim1[0]+basarim2[0],basarim1[1]+basarim2[1],basarim1[2]+basarim2[2],basarim1[3]+basarim2[3]) #print(basarim) elif len(cevap_anahtar)<=90: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2)", "dolu\") if len(cevap_anahtar)<=30: basarim=cevap_islemleri(col1_gri_dolu,col1_coord,1) elif len(cevap_anahtar)<=60: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim=(basarim1[0]+basarim2[0],basarim1[1]+basarim2[1],basarim1[2]+basarim2[2],basarim1[3]+basarim2[3]) #print(basarim)", "toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) def cevap_contour_bul(isim,isim_gri): coord=[] thresholded=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) contour=cv2.findContours(thresholded,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 contour=imutils.grab_contours(contour)", "sayac=0 contour=imutils.grab_contours(contour) contour=contours.sort_contours(contour,method=\"top-to-bottom\")[0] for c in contour: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x,", "col1,col2,col3,col4=cevap_kolon(cevap_bos) col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord,col2_coord,col3_coord,col4_coord=cevap_contour(col1,col2,col3,col4) #contour_cizdir(col1,col1_coord) #cevap_islemleri(col2_gri,coord_cevap) ''' EXAM TYPE FIND PART", "import contours from imutils.perspective import four_point_transform,order_points import imutils cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4} #,", "and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00'])", "def bolge_bul(resim,gri): bolgeler={} thr2=cv2.adaptiveThreshold(gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) areas=[] cont=cv2.findContours(thr2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) temp=[] cont=contours.sort_contours(cont,\"top-to-bottom\")[0] a=0", "in enumerate(cnt): if len(cevap_anahtar)<=q_no+s: return (dogru,yanlıs,bos,iki_cevap) maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray')", "toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) # print(\"cevap\",cevap) if cevap[0]>500: yanit.append(alfabe[cevap[1]]) elif cevap[0]<600: yanit.append(\"", "cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"top-to-bottom\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske)", "h) = cv2.boundingRect(approx) ar = w / float(h) if cv2.contourArea(c)>30", "Türü: \",sinav_turu) ogrno_dolu=four_point_transform(warp2,bolgeler2['ogrno']) ogrno_dolu_gri=cv2.cvtColor(ogrno_dolu,cv2.COLOR_BGR2GRAY) ogrno_islemleri(ogrno_dolu,ogrno_dolu_gri,ogrno_coord) cevap_dolu=four_point_transform(warp2,bolgeler2['cevaplar']) cevap_dolu_gri=cv2.cvtColor(cevap_dolu,cv2.COLOR_BGR2GRAY) col1_dolu,col2_dolu,col3_dolu,col4_dolu=cevap_kolon(cevap_dolu) col1_gri_dolu,col2_gri_dolu,col3_gri_dolu,col4_gri_dolu=cevap_gri(col1_dolu,col2_dolu,col3_dolu,col4_dolu) #contour_cizdir(col1_dolu,col1_coord,\"colon1", "def cevap_kolon(cevap): pts1=np.array([(2,50),(300,50),(2,1545),(300,1545)]) pts2=np.array([(300,50),(600,50),(302,1545),(602,1545)]) pts3=np.array([(600,50),(900,50),(602,1545),(902,1545)]) pts4=np.array([(900,50),(1200,50),(902,1545),(1202,1545)]) col1=four_point_transform(cevap,pts1) col2=four_point_transform(cevap,pts2) col3=four_point_transform(cevap,pts3) col4=four_point_transform(cevap,pts4)", "coords.append(approx) cv2.drawContours(resim,[box],0,(0,0,255),thickness=3) if len(coords)==5: return coords else: return 0 def", "areas[0][1]+areas[1][1]>2300000: kagit=imutils.rotate(kagit,angle=180) print(\"Kağıdı ters koymuşsunuz,çevrildi\") ret=True return ret,kagit else: return", "for c in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w, h)", "col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY) col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY) col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY) return col1_gri,col2_gri,col3_gri,col4_gri def cevap_contour(col1,col2,col3,col4): col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord=cevap_contour_bul(col1,col1_gri) col2_coord=cevap_contour_bul(col2,col1_gri)", "#cv2.imshow(\"resim_olge\",imutils.resize(resim,height=650)) if len(temp)>=5: bolgeler={'isim':temp[0],'ogrno':temp[1],'sinav_turu':temp[2],'soru_grubu':temp[3],'ogretim_onay':temp[4],'cevaplar':temp[5]} areas=sorted(areas,key=lambda x:x[1],reverse=True) return bolgeler,areas def cevap_islemleri(cevap,coords,col_no=1):", "#a+=1 toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s)", "BIRAKMA DURUMU bos+=1 continue else: if cevap_anahtar[q_no+s]== cevap[1]: #print(cevap_anahtar[q_no+s],cevap[1]) dogru+=1", "sinav_bos=four_point_transform(kagit,bolgeler['sinav_turu']) sinav_bos_gri=four_point_transform(kagit_gri,bolgeler['sinav_turu']) sinav_coord,sinav_thresh=contour_bul(sinav_bos,sinav_bos_gri) sinav_islemleri(sinav_bos,sinav_bos_gri,sinav_coord) #cv2.imshow(\"sınav türü\",sinav_bos_gri) ''' OTHER PARTS THAT", "ANSWER PART INTO 4 SLICES AND FIND ONE BY ONE", "işaret:{basarim[3]}\") cv2.waitKey() cv2.destroyAllWindows() if __name__ == '__main__': bos_kagit=\"optic_empty.jpg\" dolu_kagit=\"optic_marked.jpg\" main_starter(bos_kagit,dolu_kagit)", "TYPE FIND PART ''' sinav_bos=four_point_transform(kagit,bolgeler['sinav_turu']) sinav_bos_gri=four_point_transform(kagit_gri,bolgeler['sinav_turu']) sinav_coord,sinav_thresh=contour_bul(sinav_bos,sinav_bos_gri) sinav_islemleri(sinav_bos,sinav_bos_gri,sinav_coord) #cv2.imshow(\"sınav türü\",sinav_bos_gri)", "x_coords=[(0,0)] sayac=0 contour=imutils.grab_contours(contour) contour=contours.sort_contours(contour,method=\"top-to-bottom\")[0] for c in contour: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx)", "toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) return yanit[cevap[1]] def sorugrup_islemleri(soru,soru_gri,coords): yanit=[\"A\",\"B\",\"C\",\"D\",\"E\"] sayac=0 thresh=cv2.threshold(soru_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0]", "col1,col2,col3,col4 def cevap_gri(col1,col2,col3,col4): ''' KOLONLARI GRİ YAPMAK İÇİN,MAİNDE YER KAPLAMASIN", "False and abs(x_coords[-1][1]-y)<35: coord.append(approx) x_coords.append((x,y)) sayac+=1 #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) elif abs(x_coords[-1][1]-y)>=35:", "ar_value=200 #if karmasiklik==1: # ar_value=800 cont=cv2.findContours(thr6,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 cont=imutils.grab_contours(cont) cont=contours.sort_contours(cont,method=\"top-to-bottom\")[0]", "#İKİ ŞIK İŞARETLEME DURUMU iki_cevap+=1 continue elif cevap[0]<800:# BOŞ BIRAKMA", "or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) return yanit[cevap[1]] def sorugrup_islemleri(soru,soru_gri,coords): yanit=[\"A\",\"B\",\"C\",\"D\",\"E\"] sayac=0 thresh=cv2.threshold(soru_gri,180,255,cv2.THRESH_BINARY_INV)[1]", "w / float(h) if cv2.contourArea(c)>30 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx)", "coord,thr6 def contour_cizdir(resim,cont,isim=\"default\"): for c in cont: cv2.drawContours(resim,[c],0,(0,255,0),thickness=4) #print(f\"Bulunan contour", "if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if sayac==5: break", "elif cevap[0]<800:# BOŞ BIRAKMA DURUMU bos+=1 continue else: if cevap_anahtar[q_no+s]==", "INTO 4 SLICES AND FIND ONE BY ONE ''' cevap_bos=four_point_transform(kagit,bolgeler['cevaplar'])", "plt.imshow(maske,cmap='gray') #plt.show() sayac+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]:", "cevap_islemleri(cevap,coords,col_no=1): iki_cevap=0 bos=0 dogru=0 q_no=0 yanlıs=0 if col_no==1: pass elif", "continue return coord def ters_bul(kagit,areas): ret=False #print(areas[0][0]) if areas[0][0]!=1 and", "def isim_islemleri(isim,coords,thresh): a=0 yanit=[] ad_str=\"\" coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i) in enumerate(np.arange(0,len(coords),32)):", "enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) a+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap is None", "coord def ters_bul(kagit,areas): ret=False #print(areas[0][0]) if areas[0][0]!=1 and areas[0][1]+areas[1][1]>2300000: kagit=imutils.rotate(kagit,angle=180)", "c in cont: approx=cv2.approxPolyDP(c,0.009*cv2.arcLength(c,True),True) if cv2.contourArea(approx)>10050 and len(approx)==4: a+=1 M=cv2.moments(approx)", "(s,i) in enumerate(np.arange(0,len(coords),20)): cevap=None cnt=contours.sort_contours(coords[i:i+30])[0] toplam_beyaz=None for (j,c) in enumerate(cnt):", "len(coords)==5: return coords else: return 0 def tekrar_bul(array,koordinat): for c", "warp2,warp2_gri=kagit_bul(resim,resim_gri) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) ret,warp2=ters_bul(warp2,areas2) ''' TERS İSE TEKRAR BOLGELERİ BUL '''", "temp=[] cont=contours.sort_contours(cont,\"top-to-bottom\")[0] a=0 for c in cont: approx=cv2.approxPolyDP(c,0.009*cv2.arcLength(c,True),True) if cv2.contourArea(approx)>10050", "cevap_islemleri(isim,coords): a=0 thresh=cv2.threshold(isim,179,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),20)): cevap=None cnt=contours.sort_contours(coords[i:i+30])[0]", "{len(cont)}\") def bolge_bul(resim,gri): bolgeler={} thr2=cv2.adaptiveThreshold(gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) areas=[] cont=cv2.findContours(thr2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) temp=[] cont=contours.sort_contours(cont,\"top-to-bottom\")[0]", "(s,i) in enumerate(np.arange(0,len(coords),5)): cevap=None cnt=contours.sort_contours(coords[i:i+5])[0] toplam_beyaz=None say=0 for (j,c) in", "cevap=None cnt=contours.sort_contours(coords[i:i+32],method=\"top-to-bottom\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske)", "bolgeler2,areas2=bolge_bul(warp2,warp2_gri) else: pass isim_dolu=four_point_transform(warp2,bolgeler2['isim']) isim_dolu_gri=cv2.cvtColor(isim_dolu,cv2.COLOR_BGR2GRAY) contour_cizdir(isim_dolu,coord_isim,\"dolu_kagit_contourlu\") ''' OGRETİM ONAY DOLU", "cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap is None", "cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"left-to-right\")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske)", "and abs(x_coords[-1][1]-y)<35: coord.append(approx) x_coords.append((x,y)) sayac+=1 #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) elif abs(x_coords[-1][1]-y)>=35: coord.append(approx)", "cevap_bos_gri=four_point_transform(kagit_gri,bolgeler['cevaplar']) col1,col2,col3,col4=cevap_kolon(cevap_bos) col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord,col2_coord,col3_coord,col4_coord=cevap_contour(col1,col2,col3,col4) #contour_cizdir(col1,col1_coord) #cevap_islemleri(col2_gri,coord_cevap) ''' EXAM TYPE FIND", "#cv2.drawContours(ogretim_dolu,ogret_cont,-1,(255,0,0),thickness=3) #cv2.imshow(\"ogretc\",ogretim_dolu) #ogretim_onayı=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogretimonay_coord) sorugrup_dolu=four_point_transform(warp2,bolgeler2['soru_grubu']) sorugrup_dolu_gri=cv2.cvtColor(sorugrup_dolu,cv2.COLOR_BGR2GRAY) soru_tur=sorugrup_islemleri(sorugrup_dolu,sorugrup_dolu_gri,soru_cont) print(\"Soru Grubu\",soru_tur) thresh_dolu=cv2.threshold(isim_dolu_gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] isim_str=isim_islemleri(isim_dolu_gri,coord_isim,thresh_dolu)", "cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) a+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]:", "pass isim_dolu=four_point_transform(warp2,bolgeler2['isim']) isim_dolu_gri=cv2.cvtColor(isim_dolu,cv2.COLOR_BGR2GRAY) contour_cizdir(isim_dolu,coord_isim,\"dolu_kagit_contourlu\") ''' OGRETİM ONAY DOLU KAGIT '''", "toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) def cevap_contour_bul(isim,isim_gri):", "basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim=basarim1+basarim2+basarim3 elif len(cevap_anahtar)<=120: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim4=cevap_islemleri(col4_gri_dolu,col4_coord,4) basarim=basarim1+basarim2+basarim3+basarim4 print(f\"Doğru", "None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) return yanit[cevap[1]] def sorugrup_islemleri(soru,soru_gri,coords): yanit=[\"A\",\"B\",\"C\",\"D\",\"E\"] sayac=0", "None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if sayac==5: break print(cevap) if cevap[0]>500:", "#contour_cizdir(isim_bos,coord,\"isim_bos\") #cevap_islemleri(cevap_bos_gri,coord) ############################################## resim=cv2.imread(dolu_kagit) resim_gri=cv2.cvtColor(resim,cv2.COLOR_BGR2GRAY) warp2,warp2_gri=kagit_bul(resim,resim_gri) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) ret,warp2=ters_bul(warp2,areas2) ''' TERS", "resim_gri=cv2.cvtColor(resim,cv2.COLOR_BGR2GRAY) warp2,warp2_gri=kagit_bul(resim,resim_gri) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) ret,warp2=ters_bul(warp2,areas2) ''' TERS İSE TEKRAR BOLGELERİ BUL", "else: pass isim_dolu=four_point_transform(warp2,bolgeler2['isim']) isim_dolu_gri=cv2.cvtColor(isim_dolu,cv2.COLOR_BGR2GRAY) contour_cizdir(isim_dolu,coord_isim,\"dolu_kagit_contourlu\") ''' OGRETİM ONAY DOLU KAGIT", "İŞARETLEME DURUMU iki_cevap+=1 continue elif cevap[0]<800:# BOŞ BIRAKMA DURUMU bos+=1", "imutils.perspective import four_point_transform import imutils import cv2 import matplotlib.pyplot as", "if area<1500 and area>250 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box)", "cont=imutils.grab_contours(cont) for c in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w,", "cevap_contour_bul(isim,isim_gri): coord=[] thresholded=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) contour=cv2.findContours(thresholded,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 contour=imutils.grab_contours(contour) contour=contours.sort_contours(contour,method=\"top-to-bottom\")[0] for c", "sorugrup_dolu=four_point_transform(warp2,bolgeler2['soru_grubu']) sorugrup_dolu_gri=cv2.cvtColor(sorugrup_dolu,cv2.COLOR_BGR2GRAY) soru_tur=sorugrup_islemleri(sorugrup_dolu,sorugrup_dolu_gri,soru_cont) print(\"Soru Grubu\",soru_tur) thresh_dolu=cv2.threshold(isim_dolu_gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] isim_str=isim_islemleri(isim_dolu_gri,coord_isim,thresh_dolu) print(isim_str) sinav_dolu=four_point_transform(warp2,bolgeler2['sinav_turu']) sinav_dolu_gri=cv2.cvtColor(sinav_dolu,cv2.COLOR_BGR2GRAY)", "return yanit[cevap[1]] def sorugrup_islemleri(soru,soru_gri,coords): yanit=[\"A\",\"B\",\"C\",\"D\",\"E\"] sayac=0 thresh=cv2.threshold(soru_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i)", "cevap_bos=four_point_transform(kagit,bolgeler['cevaplar']) cevap_bos_gri=four_point_transform(kagit_gri,bolgeler['cevaplar']) col1,col2,col3,col4=cevap_kolon(cevap_bos) col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord,col2_coord,col3_coord,col4_coord=cevap_contour(col1,col2,col3,col4) #contour_cizdir(col1,col1_coord) #cevap_islemleri(col2_gri,coord_cevap) ''' EXAM TYPE", "basarim=basarim1+basarim2+basarim3 elif len(cevap_anahtar)<=120: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim4=cevap_islemleri(col4_gri_dolu,col4_coord,4) basarim=basarim1+basarim2+basarim3+basarim4 print(f\"Doğru cevap", "w, h) = cv2.boundingRect(approx) ar = w / float(h) if", "return yanit[cevap[1]] #print(\"tespit edilemedi\") return \"Tespit edilemedi\" #################################################################### def main_starter(bos_kagit,dolu_kagit):", "in yanit: ad_str+=s return ad_str def cevap_kolon(cevap): pts1=np.array([(2,50),(300,50),(2,1545),(300,1545)]) pts2=np.array([(300,50),(600,50),(302,1545),(602,1545)]) pts3=np.array([(600,50),(900,50),(602,1545),(902,1545)])", "''' sinav_bos=four_point_transform(kagit,bolgeler['sinav_turu']) sinav_bos_gri=four_point_transform(kagit_gri,bolgeler['sinav_turu']) sinav_coord,sinav_thresh=contour_bul(sinav_bos,sinav_bos_gri) sinav_islemleri(sinav_bos,sinav_bos_gri,sinav_coord) #cv2.imshow(\"sınav türü\",sinav_bos_gri) ''' OTHER PARTS", "col1_dolu,col2_dolu,col3_dolu,col4_dolu=cevap_kolon(cevap_dolu) col1_gri_dolu,col2_gri_dolu,col3_gri_dolu,col4_gri_dolu=cevap_gri(col1_dolu,col2_dolu,col3_dolu,col4_dolu) #contour_cizdir(col1_dolu,col1_coord,\"colon1 dolu\") if len(cevap_anahtar)<=30: basarim=cevap_islemleri(col1_gri_dolu,col1_coord,1) elif len(cevap_anahtar)<=60: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1)", "sayac==5: break print(cevap) if cevap[0]>500: return yanit[cevap[1]] #print(\"tespit edilemedi\") return", "soru_cont,soru_thr=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) ############################### ogretim_bos=four_point_transform(kagit,bolgeler['ogretim_onay']) ogretim_bos_gri=four_point_transform(kagit_gri,bolgeler['ogretim_onay']) ogret_cont,ogret_thr=contour_bul(ogretim_bos,ogretim_bos_gri,1) ''' NAME FIND PART. '''", "<filename>optical_form_reader/main.py<gh_stars>0 import cv2 import numpy as np from imutils import", "col3_coord=cevap_contour_bul(col3,col1_gri) col4_coord=cevap_contour_bul(col4,col1_gri) return col1_coord,col2_coord,col3_coord,col4_coord def ogrno_islemleri(ogrno,ogrno_gri,coords): yanit=\"\" thresh=cv2.threshold(ogrno_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for", "else: continue return coord def ters_bul(kagit,areas): ret=False #print(areas[0][0]) if areas[0][0]!=1", "x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) # print(x,y) res=tekrar_bul(x_coords,x) if res is False and", "koordinat==c[0] or abs(koordinat-c[0])<15: return True #Tekrar var else: pass return", "ogrno_islemleri(ogrno_dolu,ogrno_dolu_gri,ogrno_coord) cevap_dolu=four_point_transform(warp2,bolgeler2['cevaplar']) cevap_dolu_gri=cv2.cvtColor(cevap_dolu,cv2.COLOR_BGR2GRAY) col1_dolu,col2_dolu,col3_dolu,col4_dolu=cevap_kolon(cevap_dolu) col1_gri_dolu,col2_gri_dolu,col3_gri_dolu,col4_gri_dolu=cevap_gri(col1_dolu,col2_dolu,col3_dolu,col4_dolu) #contour_cizdir(col1_dolu,col1_coord,\"colon1 dolu\") if len(cevap_anahtar)<=30: basarim=cevap_islemleri(col1_gri_dolu,col1_coord,1)", "AND FIND ONE BY ONE ''' cevap_bos=four_point_transform(kagit,bolgeler['cevaplar']) cevap_bos_gri=four_point_transform(kagit_gri,bolgeler['cevaplar']) col1,col2,col3,col4=cevap_kolon(cevap_bos) col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4)", "in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) if cevap", "or abs(koordinat-c[0])<15: return True #Tekrar var else: pass return False", "Numarası:\",yanit) def sinav_islemleri(sinav,sinav_gri,coords): yanit=[\"QUİZ\",\"ARA\",\"FİNAL\",\"BÜTÜNLEME\"] thresh=cv2.threshold(sinav_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)):", "col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord,col2_coord,col3_coord,col4_coord=cevap_contour(col1,col2,col3,col4) #contour_cizdir(col1,col1_coord) #cevap_islemleri(col2_gri,coord_cevap) ''' EXAM TYPE FIND PART '''", "edilemedi\") return \"Tespit edilemedi\" #################################################################### def main_starter(bos_kagit,dolu_kagit): image=cv2.imread(bos_kagit) gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) kagit,kagit_gri=kagit_bul(image,gray)", "ret,kagit def kagit_bul(image,gray): thr=cv2.threshold(gray,150,255,cv2.THRESH_BINARY)[1] contour=cv2.findContours(thr,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) contour=imutils.grab_contours(contour) contour=sorted(contour,key=cv2.contourArea,reverse=True) for c in", "cv2.boundingRect(approx) ar = w / float(h) if area<1500 and area>250", "(j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) a+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap", "a=0 yanit=[] ad_str=\"\" coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i) in enumerate(np.arange(0,len(coords),32)): cevap=None cnt=contours.sort_contours(coords[i:i+32],method=\"top-to-bottom\")[0]", "ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) res=tekrar_bul(x_coords,x) if res", "in cont: cv2.drawContours(resim,[c],0,(0,255,0),thickness=4) #print(f\"Bulunan contour sayısı: {len(cont)}\") def bolge_bul(resim,gri): bolgeler={}", "coords=contours.sort_contours(coords,method=\"top-to-bottom\")[0] for (s,i) in enumerate(np.arange(0,len(coords),5)): cevap=None cnt=contours.sort_contours(coords[i:i+5])[0] toplam_beyaz=None say=0 for", "cevap_kolon(cevap): pts1=np.array([(2,50),(300,50),(2,1545),(300,1545)]) pts2=np.array([(300,50),(600,50),(302,1545),(602,1545)]) pts3=np.array([(600,50),(900,50),(602,1545),(902,1545)]) pts4=np.array([(900,50),(1200,50),(902,1545),(1202,1545)]) col1=four_point_transform(cevap,pts1) col2=four_point_transform(cevap,pts2) col3=four_point_transform(cevap,pts3) col4=four_point_transform(cevap,pts4) return", "and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) if cv2.contourArea(box)>150: coords.append(approx) cv2.drawContours(resim,[box],0,(0,0,255),thickness=3) if", "cont=contours.sort_contours(cont,method=\"top-to-bottom\")[0] for c in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w,", "cevap[0]>500: yanit.append(alfabe[cevap[1]]) elif cevap[0]<600: yanit.append(\" \") for s in yanit:", "maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() sayac+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap is", "float(h) if cv2.contourArea(c)>30 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int)", "say>1: #İKİ ŞIK İŞARETLEME DURUMU iki_cevap+=1 continue elif cevap[0]<800:# BOŞ", "yanit=\"\" thresh=cv2.threshold(ogrno_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method=\"left-to-right\")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method=\"top-to-bottom\")[0] toplam_beyaz=None", "contour: approx=cv2.approxPolyDP(c,0.02*cv2.arcLength(c,True),True) if len(approx)==4: #cv2.drawContours(image,[approx],0,(0,255,0),thickness=3) break warp=four_point_transform(image,approx.reshape(4,2)) warp_gri=four_point_transform(gray,approx.reshape(4,2)) return warp,warp_gri", "return coords else: return 0 def tekrar_bul(array,koordinat): for c in", "ONAY DOLU KAGIT ''' ogretim_dolu=four_point_transform(warp2,bolgeler2['ogretim_onay']) ogretim_dolu_gri=cv2.cvtColor(ogretim_dolu,cv2.COLOR_BGR2GRAY) ogret_onay=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogret_cont) print(\"Öğretim Onayı:\",ogret_onay) #cv2.drawContours(ogretim_dolu,ogret_cont,-1,(255,0,0),thickness=3)" ]
[ "# and/or other materials provided with the distribution. # #", "materials provided with the distribution. # # THIS SOFTWARE IS", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "+ \" on filmtipset:\", [\"Skip\", \"1\", \"2\", \"3\", \"4\", \"5\"])", "must reproduce the above copyright notice, # this list of", "above copyright notice, # this list of conditions and the", "# # Redistribution and use in source and binary forms,", "self.imdb addon = xbmcaddon.Addon(id='script.filmtipset-grade') key = addon.getSetting(\"key\") user = addon.getSetting(\"user\")", "# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF", "= addon.getSetting(\"key\") user = addon.getSetting(\"user\") grader = filmtipset.Filmtipset(FILMTIPSET_ACCESS_KEY, key, user)", "notice, this # list of conditions and the following disclaimer.", "THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR", "NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF", "dialog = xbmcgui.Dialog() grade = dialog.select(\"Grade \" + movie[\"orgname\"] +", "copyright notice, this # list of conditions and the following", "EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE", "self.time print \"getTotalTime\", self.time_total print \"imdb\", self.imdb addon = xbmcaddon.Addon(id='script.filmtipset-grade')", "source and binary forms, with or without # modification, are", "onDone(self): print \"getTime\", self.time print \"getTotalTime\", self.time_total print \"imdb\", self.imdb", "INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER", "COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT,", "[\"Skip\", \"1\", \"2\", \"3\", \"4\", \"5\"]) if grade != 0:", "CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", "= dialog.select(\"Grade \" + movie[\"orgname\"] + \" on filmtipset:\", [\"Skip\",", "of source code must retain the above copyright notice, this", "= self.getTime() self.time_total = self.getTotalTime() def onDone(self): print \"getTime\", self.time", "addon = xbmcaddon.Addon(id='script.filmtipset-grade') key = addon.getSetting(\"key\") user = addon.getSetting(\"user\") grader", "DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE", "def onPlayBackEnded(self): self.onDone() def onPlayBackStopped(self): self.onDone() def update(self): info =", "AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED", "grader.get_movie_imdb(self.imdb) print movie if movie[\"grade\"][\"type\"] != \"seen\": dialog = xbmcgui.Dialog()", "and binary forms, with or without # modification, are permitted", "import xbmcaddon import xbmcgui import filmtipset FILMTIPSET_ACCESS_KEY = \"7ndg3Q3qwW8dPzbJMrB5Rw\" class", "!= 0: print dialog, grade print grader.grade(movie[\"id\"], grade) player =", "source code must retain the above copyright notice, this #", "\"3\", \"4\", \"5\"]) if grade != 0: print dialog, grade", "xbmcgui import filmtipset FILMTIPSET_ACCESS_KEY = \"7ndg3Q3qwW8dPzbJMrB5Rw\" class XBMCPlayer(xbmc.Player): def __init__(self,", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import xbmc", "DAMAGE. import xbmc import xbmcaddon import xbmcgui import filmtipset FILMTIPSET_ACCESS_KEY", "notice, # this list of conditions and the following disclaimer", "\"2\", \"3\", \"4\", \"5\"]) if grade != 0: print dialog,", "0: print dialog, grade print grader.grade(movie[\"id\"], grade) player = XBMCPlayer()", "GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR", "WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF", "\"4\", \"5\"]) if grade != 0: print dialog, grade print", "retain the above copyright notice, this # list of conditions", "BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,", "* Redistributions in binary form must reproduce the above copyright", "addon.getSetting(\"user\") grader = filmtipset.Filmtipset(FILMTIPSET_ACCESS_KEY, key, user) movie = grader.get_movie_imdb(self.imdb) print", "are met: # # * Redistributions of source code must", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "disclaimer in the documentation # and/or other materials provided with", "self.time = self.getTime() self.time_total = self.getTotalTime() def onDone(self): print \"getTime\",", "\"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR", "self.getTime() self.time_total = self.getTotalTime() def onDone(self): print \"getTime\", self.time print", "filmtipset:\", [\"Skip\", \"1\", \"2\", \"3\", \"4\", \"5\"]) if grade !=", "TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES,", "\"1\", \"2\", \"3\", \"4\", \"5\"]) if grade != 0: print", "disclaimer. # # * Redistributions in binary form must reproduce", "THE # POSSIBILITY OF SUCH DAMAGE. import xbmc import xbmcaddon", "\"getTotalTime\", self.time_total print \"imdb\", self.imdb addon = xbmcaddon.Addon(id='script.filmtipset-grade') key =", "if grade != 0: print dialog, grade print grader.grade(movie[\"id\"], grade)", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL", "# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "self.time_total print \"imdb\", self.imdb addon = xbmcaddon.Addon(id='script.filmtipset-grade') key = addon.getSetting(\"key\")", "CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #", "self.getTotalTime() def onDone(self): print \"getTime\", self.time print \"getTotalTime\", self.time_total print", "!= \"seen\": dialog = xbmcgui.Dialog() grade = dialog.select(\"Grade \" +", "AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT,", "self.time = None self.time_total = None def onPlayBackStarted(self): self.update() def", "Redistributions of source code must retain the above copyright notice,", "user) movie = grader.get_movie_imdb(self.imdb) print movie if movie[\"grade\"][\"type\"] != \"seen\":", "grade print grader.grade(movie[\"id\"], grade) player = XBMCPlayer() while(not xbmc.abortRequested): if", "SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #", "documentation # and/or other materials provided with the distribution. #", "that the following conditions are met: # # * Redistributions", "IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE", "or without # modification, are permitted provided that the following", "HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN", "Redistribution and use in source and binary forms, with or", "code must retain the above copyright notice, this # list", "# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "this # list of conditions and the following disclaimer. #", "key = addon.getSetting(\"key\") user = addon.getSetting(\"user\") grader = filmtipset.Filmtipset(FILMTIPSET_ACCESS_KEY, key,", "xbmcaddon.Addon(id='script.filmtipset-grade') key = addon.getSetting(\"key\") user = addon.getSetting(\"user\") grader = filmtipset.Filmtipset(FILMTIPSET_ACCESS_KEY,", "USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED", "rights reserved. # # Redistribution and use in source and", "movie if movie[\"grade\"][\"type\"] != \"seen\": dialog = xbmcgui.Dialog() grade =", "EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE.", "\"seen\": dialog = xbmcgui.Dialog() grade = dialog.select(\"Grade \" + movie[\"orgname\"]", "IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR", "info = self.getVideoInfoTag() self.imdb = info.getIMDBNumber() self.time = self.getTime() self.time_total", "binary form must reproduce the above copyright notice, # this", "OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE", "with the distribution. # # THIS SOFTWARE IS PROVIDED BY", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" #", "INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT", "CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #", "on filmtipset:\", [\"Skip\", \"1\", \"2\", \"3\", \"4\", \"5\"]) if grade", "OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS", "= addon.getSetting(\"user\") grader = filmtipset.Filmtipset(FILMTIPSET_ACCESS_KEY, key, user) movie = grader.get_movie_imdb(self.imdb)", "PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY", "TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY", "THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE", "xbmc import xbmcaddon import xbmcgui import filmtipset FILMTIPSET_ACCESS_KEY = \"7ndg3Q3qwW8dPzbJMrB5Rw\"", "in source and binary forms, with or without # modification,", "# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,", "A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "permitted provided that the following conditions are met: # #", "list of conditions and the following disclaimer. # # *", "self.onDone() def onPlayBackStopped(self): self.onDone() def update(self): info = self.getVideoInfoTag() self.imdb", "in the documentation # and/or other materials provided with the", "of conditions and the following disclaimer in the documentation #", "XBMCPlayer(xbmc.Player): def __init__(self, *args): self.imdb = None self.time = None", "if movie[\"grade\"][\"type\"] != \"seen\": dialog = xbmcgui.Dialog() grade = dialog.select(\"Grade", "form must reproduce the above copyright notice, # this list", "= filmtipset.Filmtipset(FILMTIPSET_ACCESS_KEY, key, user) movie = grader.get_movie_imdb(self.imdb) print movie if", "use in source and binary forms, with or without #", "\"5\"]) if grade != 0: print dialog, grade print grader.grade(movie[\"id\"],", "USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #", "+ movie[\"orgname\"] + \" on filmtipset:\", [\"Skip\", \"1\", \"2\", \"3\",", "\"getTime\", self.time print \"getTotalTime\", self.time_total print \"imdb\", self.imdb addon =", "STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING", "EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,", "OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY", "OF SUCH DAMAGE. import xbmc import xbmcaddon import xbmcgui import", "= grader.get_movie_imdb(self.imdb) print movie if movie[\"grade\"][\"type\"] != \"seen\": dialog =", "# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER", "grade = dialog.select(\"Grade \" + movie[\"orgname\"] + \" on filmtipset:\",", "and the following disclaimer. # # * Redistributions in binary", "THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF", "ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES", "__init__(self, *args): self.imdb = None self.time = None self.time_total =", "OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED", "OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "# # * Redistributions of source code must retain the", "LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION)", "with or without # modification, are permitted provided that the", "# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR", "distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "import filmtipset FILMTIPSET_ACCESS_KEY = \"7ndg3Q3qwW8dPzbJMrB5Rw\" class XBMCPlayer(xbmc.Player): def __init__(self, *args):", "following disclaimer. # # * Redistributions in binary form must", "THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY", "HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT,", "user = addon.getSetting(\"user\") grader = filmtipset.Filmtipset(FILMTIPSET_ACCESS_KEY, key, user) movie =", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES", "NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES;", "\" + movie[\"orgname\"] + \" on filmtipset:\", [\"Skip\", \"1\", \"2\",", "2013, <NAME> # All rights reserved. # # Redistribution and", "# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR", "# this list of conditions and the following disclaimer in", "filmtipset.Filmtipset(FILMTIPSET_ACCESS_KEY, key, user) movie = grader.get_movie_imdb(self.imdb) print movie if movie[\"grade\"][\"type\"]", "must retain the above copyright notice, this # list of", "DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND", "BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF", "met: # # * Redistributions of source code must retain", "print \"getTime\", self.time print \"getTotalTime\", self.time_total print \"imdb\", self.imdb addon", "the following disclaimer in the documentation # and/or other materials", "HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR", "of conditions and the following disclaimer. # # * Redistributions", "# * Redistributions in binary form must reproduce the above", "and use in source and binary forms, with or without", "= self.getTotalTime() def onDone(self): print \"getTime\", self.time print \"getTotalTime\", self.time_total", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE", "Copyright (c) 2013, <NAME> # All rights reserved. # #", "IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import", "POSSIBILITY OF SUCH DAMAGE. import xbmc import xbmcaddon import xbmcgui", "conditions and the following disclaimer in the documentation # and/or", "OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF", "PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT", "reproduce the above copyright notice, # this list of conditions", "FILMTIPSET_ACCESS_KEY = \"7ndg3Q3qwW8dPzbJMrB5Rw\" class XBMCPlayer(xbmc.Player): def __init__(self, *args): self.imdb =", "def __init__(self, *args): self.imdb = None self.time = None self.time_total", "in binary form must reproduce the above copyright notice, #", "dialog, grade print grader.grade(movie[\"id\"], grade) player = XBMCPlayer() while(not xbmc.abortRequested):", "forms, with or without # modification, are permitted provided that", "binary forms, with or without # modification, are permitted provided", "and/or other materials provided with the distribution. # # THIS", "SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR", "self.imdb = info.getIMDBNumber() self.time = self.getTime() self.time_total = self.getTotalTime() def", "def onPlayBackStarted(self): self.update() def onPlayBackEnded(self): self.onDone() def onPlayBackStopped(self): self.onDone() def", "provided that the following conditions are met: # # *", "the documentation # and/or other materials provided with the distribution.", "filmtipset FILMTIPSET_ACCESS_KEY = \"7ndg3Q3qwW8dPzbJMrB5Rw\" class XBMCPlayer(xbmc.Player): def __init__(self, *args): self.imdb", "xbmcaddon import xbmcgui import filmtipset FILMTIPSET_ACCESS_KEY = \"7ndg3Q3qwW8dPzbJMrB5Rw\" class XBMCPlayer(xbmc.Player):", "are permitted provided that the following conditions are met: #", "def update(self): info = self.getVideoInfoTag() self.imdb = info.getIMDBNumber() self.time =", "above copyright notice, this # list of conditions and the", "# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)", "# list of conditions and the following disclaimer. # #", "update(self): info = self.getVideoInfoTag() self.imdb = info.getIMDBNumber() self.time = self.getTime()", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #", "ARISING IN ANY WAY OUT OF THE USE OF THIS", "import xbmcgui import filmtipset FILMTIPSET_ACCESS_KEY = \"7ndg3Q3qwW8dPzbJMrB5Rw\" class XBMCPlayer(xbmc.Player): def", "# All rights reserved. # # Redistribution and use in", "ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN", "onPlayBackStopped(self): self.onDone() def update(self): info = self.getVideoInfoTag() self.imdb = info.getIMDBNumber()", "LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS", "\"7ndg3Q3qwW8dPzbJMrB5Rw\" class XBMCPlayer(xbmc.Player): def __init__(self, *args): self.imdb = None self.time", "Redistributions in binary form must reproduce the above copyright notice,", "ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR", "# Redistribution and use in source and binary forms, with", "self.getVideoInfoTag() self.imdb = info.getIMDBNumber() self.time = self.getTime() self.time_total = self.getTotalTime()", "onPlayBackEnded(self): self.onDone() def onPlayBackStopped(self): self.onDone() def update(self): info = self.getVideoInfoTag()", "the above copyright notice, # this list of conditions and", "the following conditions are met: # # * Redistributions of", "movie[\"grade\"][\"type\"] != \"seen\": dialog = xbmcgui.Dialog() grade = dialog.select(\"Grade \"", "provided with the distribution. # # THIS SOFTWARE IS PROVIDED", "print \"getTotalTime\", self.time_total print \"imdb\", self.imdb addon = xbmcaddon.Addon(id='script.filmtipset-grade') key", "LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN", "list of conditions and the following disclaimer in the documentation", "* Redistributions of source code must retain the above copyright", "# modification, are permitted provided that the following conditions are", "the following disclaimer. # # * Redistributions in binary form", "= None self.time_total = None def onPlayBackStarted(self): self.update() def onPlayBackEnded(self):", "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING,", "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS", "OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT", "OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,", "key, user) movie = grader.get_movie_imdb(self.imdb) print movie if movie[\"grade\"][\"type\"] !=", "following disclaimer in the documentation # and/or other materials provided", "self.update() def onPlayBackEnded(self): self.onDone() def onPlayBackStopped(self): self.onDone() def update(self): info", "self.onDone() def update(self): info = self.getVideoInfoTag() self.imdb = info.getIMDBNumber() self.time", "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS", "LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING", "= xbmcaddon.Addon(id='script.filmtipset-grade') key = addon.getSetting(\"key\") user = addon.getSetting(\"user\") grader =", "self.time_total = None def onPlayBackStarted(self): self.update() def onPlayBackEnded(self): self.onDone() def", "# # * Redistributions in binary form must reproduce the", "grader.grade(movie[\"id\"], grade) player = XBMCPlayer() while(not xbmc.abortRequested): if player.isPlayingVideo(): player.update()", "grade) player = XBMCPlayer() while(not xbmc.abortRequested): if player.isPlayingVideo(): player.update() xbmc.sleep(1000)", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED", "grader = filmtipset.Filmtipset(FILMTIPSET_ACCESS_KEY, key, user) movie = grader.get_movie_imdb(self.imdb) print movie", "copyright notice, # this list of conditions and the following", "following conditions are met: # # * Redistributions of source", "print movie if movie[\"grade\"][\"type\"] != \"seen\": dialog = xbmcgui.Dialog() grade", "def onDone(self): print \"getTime\", self.time print \"getTotalTime\", self.time_total print \"imdb\",", "print dialog, grade print grader.grade(movie[\"id\"], grade) player = XBMCPlayer() while(not", "SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE", "INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF", "the above copyright notice, this # list of conditions and", "and the following disclaimer in the documentation # and/or other", "onPlayBackStarted(self): self.update() def onPlayBackEnded(self): self.onDone() def onPlayBackStopped(self): self.onDone() def update(self):", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "the distribution. # # THIS SOFTWARE IS PROVIDED BY THE", "conditions and the following disclaimer. # # * Redistributions in", "# Copyright (c) 2013, <NAME> # All rights reserved. #", "self.time_total = self.getTotalTime() def onDone(self): print \"getTime\", self.time print \"getTotalTime\",", "OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER", "# POSSIBILITY OF SUCH DAMAGE. import xbmc import xbmcaddon import", "LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "SUCH DAMAGE. import xbmc import xbmcaddon import xbmcgui import filmtipset", "IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,", "def onPlayBackStopped(self): self.onDone() def update(self): info = self.getVideoInfoTag() self.imdb =", "= None self.time = None self.time_total = None def onPlayBackStarted(self):", "other materials provided with the distribution. # # THIS SOFTWARE", "LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #", "All rights reserved. # # Redistribution and use in source", "without # modification, are permitted provided that the following conditions", "grade != 0: print dialog, grade print grader.grade(movie[\"id\"], grade) player", "OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY", "None def onPlayBackStarted(self): self.update() def onPlayBackEnded(self): self.onDone() def onPlayBackStopped(self): self.onDone()", "WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE", "addon.getSetting(\"key\") user = addon.getSetting(\"user\") grader = filmtipset.Filmtipset(FILMTIPSET_ACCESS_KEY, key, user) movie", "this list of conditions and the following disclaimer in the", "modification, are permitted provided that the following conditions are met:", "PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "# ARISING IN ANY WAY OUT OF THE USE OF", "\"imdb\", self.imdb addon = xbmcaddon.Addon(id='script.filmtipset-grade') key = addon.getSetting(\"key\") user =", "= info.getIMDBNumber() self.time = self.getTime() self.time_total = self.getTotalTime() def onDone(self):", "AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN", "self.imdb = None self.time = None self.time_total = None def", "reserved. # # Redistribution and use in source and binary", "movie[\"orgname\"] + \" on filmtipset:\", [\"Skip\", \"1\", \"2\", \"3\", \"4\",", "= \"7ndg3Q3qwW8dPzbJMrB5Rw\" class XBMCPlayer(xbmc.Player): def __init__(self, *args): self.imdb = None", "ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY,", "ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT", "= None def onPlayBackStarted(self): self.update() def onPlayBackEnded(self): self.onDone() def onPlayBackStopped(self):", "# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "# * Redistributions of source code must retain the above", "class XBMCPlayer(xbmc.Player): def __init__(self, *args): self.imdb = None self.time =", "None self.time = None self.time_total = None def onPlayBackStarted(self): self.update()", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #", "SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"", "*args): self.imdb = None self.time = None self.time_total = None", "NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #", "print grader.grade(movie[\"id\"], grade) player = XBMCPlayer() while(not xbmc.abortRequested): if player.isPlayingVideo():", "FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT", "(c) 2013, <NAME> # All rights reserved. # # Redistribution", "= xbmcgui.Dialog() grade = dialog.select(\"Grade \" + movie[\"orgname\"] + \"", "NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND", "OF THE # POSSIBILITY OF SUCH DAMAGE. import xbmc import", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;", "FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO", "\" on filmtipset:\", [\"Skip\", \"1\", \"2\", \"3\", \"4\", \"5\"]) if", "BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY", "IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS", "PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE", "(INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT", "<NAME> # All rights reserved. # # Redistribution and use", "OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY", "THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS", "movie = grader.get_movie_imdb(self.imdb) print movie if movie[\"grade\"][\"type\"] != \"seen\": dialog", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "None self.time_total = None def onPlayBackStarted(self): self.update() def onPlayBackEnded(self): self.onDone()", "import xbmc import xbmcaddon import xbmcgui import filmtipset FILMTIPSET_ACCESS_KEY =", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #", "print \"imdb\", self.imdb addon = xbmcaddon.Addon(id='script.filmtipset-grade') key = addon.getSetting(\"key\") user", "OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON", "dialog.select(\"Grade \" + movie[\"orgname\"] + \" on filmtipset:\", [\"Skip\", \"1\",", "conditions are met: # # * Redistributions of source code", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED.", "OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT", "= self.getVideoInfoTag() self.imdb = info.getIMDBNumber() self.time = self.getTime() self.time_total =", "xbmcgui.Dialog() grade = dialog.select(\"Grade \" + movie[\"orgname\"] + \" on", "TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY", "info.getIMDBNumber() self.time = self.getTime() self.time_total = self.getTotalTime() def onDone(self): print" ]
[ "createMatrix(3, 3, [1, 0, 0, 9, 1, 0, 5, 6,", "2, 3]+[0, 5, -2]+[0, 0, Ra(-86, 5)]) self.assertEqual(triangle, d3.triangulate()) def", "self.assertRaises(ValueError, b6.smithNormalForm) self.assertEqual([1, 1, 1], b7.smithNormalForm()) self.assertEqual([9, 3, 1], b8.smithNormalForm())", "1, 4, 2]) self.assertEqual(sum2, b1 + b2) def testSub(self): sub1", "10, 15, 22]) self.assertEqual(pow1, b1 ** 2) pow2 = createMatrix(2,", "3, 0, 0, 0, 1]) U_2, V_2, M_2 = b8.extsmithNormalForm()", "2, [2, 3, 3, 5]) assert symmetric.isSymmetricMatrix() class RingMatrixTest(unittest.TestCase): def", "= c2.kernel() self.assertTrue(not c2 * ker) def testImage(self): img =", "[Ra(7, 43), Ra(6, 43), Ra(-1, 43)]+[Ra(35, 86), Ra(-13, 86), Ra(-5,", "-64]+[156, -93]) self.assertEqual(mul3, b3 * a4) def testScalarMul(self): mul =", "d4 = createMatrix(6, 6, \\ [Ra(4), 2, 5, 0, 2,", "1]) b8 = Matrix(3, 3, [3, 15, 12]+[2,7,5]+[1,-4,-2]) ## for", "** 2)) def testReduce(self): self.assertEqual(-2, a3.reduce(min)) def testGetRow(self): row1 =", "self.assertEqual(col2, b1.getColumn(1)) def testTranspose(self): trans = createMatrix(2, 3, [7, 3,", "4, \\ [1, 0, 0, 0]+[2, 3, 1, 0]+[4, 5,", "-2]+[0, 0, 1, 3, -1]+[0, 0, 1, 2, 0]) c3", "bug#1849220 M1 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2), Ra(1,", "Subspace(3, 2, [1, 0]+[0, 1]+[0, 0]) unit2 = unitMatrix(3) unit2.toSubspace()", "9]) d3 = Matrix(3, 3, \\ [Ra(1), Ra(2), Ra(3)]+[Ra(0), Ra(5),", "Ra(-86, 5)]) self.assertEqual(triangle, d3.triangulate()) def testDeterminant(self): self.assertEqual(Ra(-7, 15), d7.determinant()) def", "alias of one. \"\"\" self.assertEqual(self.m2z.one, self.m2z.unitMatrix()) def testRingAPI(self): m3z =", "def testNeg(self): neg = createMatrix(2, 2, [0, 1, -1, 2])", "-4]) self.assertEqual(sum1, a1 + a2) sum2 = createMatrix(2, 2, [1,", "2, 3), (4, 5, 6), (7, 8, 9)], Q) self.assertTrue(mat3.row", "lessrank.hermiteNormalForm() self.assertEqual(h.row, lessrank.row) self.assertEqual(h.column, lessrank.column) zerovec = vector.Vector([0, 0]) self.assertEqual(zerovec,", "* U == d4 assert L.isLowerTriangularMatrix() assert U.isUpperTriangularMatrix() class MatrixRingTest", "\\ [Ra(2), -1, 0, 0]+[-1, 2, -1, 0]+[0, -1, 2,", "one. \"\"\" self.assertEqual(self.m2z.one, self.m2z.unitMatrix()) def testRingAPI(self): m3z = MatrixRing.getInstance(3, Int)", "6, 5, -4, 2]+[4, 1, 5, 6, 3, 1]) d5", "vector.Vector([-12, -1, 0]) self.assertEqual(col1, a4.getColumn(2)) col2 = vector.Vector([1, 3]) self.assertEqual(col2,", "testExtHermiteNormalForm(self): already = createMatrix(4, 3, [1, 0, 0, 0, 1,", "= createMatrix(1, 2, [-2, 8]) self.assertEqual(sub1, a1 - a2) sub2", "[1, 2, 3, 4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0,", "0, 0, 1, 1, 0, 1, 1]) h = square.hermiteNormalForm()", "9]+[0, 0, 0, 1]) notUT = createMatrix(4, 4, \\ [1,", "self.m2z.unitMatrix()) def testRingAPI(self): m3z = MatrixRing.getInstance(3, Int) m2q = MatrixRing.getInstance(2,", "2]) a2 = Matrix(1, 2, [5, -6]) a3 = createMatrix(3,", "zerovec = vector.Vector([0, 0]) self.assertEqual(zerovec, h.getColumn(1)) square = createMatrix(3, 3,", "2, [Ra(3), Ra(2)]) c2 = createMatrix(4, 5, \\ [Ra(0), 0,", "3) cinverse.set([Ra(-47, 86), Ra(15, 86), Ra(19, 86)]+\\ [Ra(7, 43), Ra(6,", "row2 = vector.Vector([1, 2]) self.assertEqual(row2, b1.getRow(1)) def testGetColumn(self): col1 =", "= createMatrix(3, 2, [Ra(1), 2]+[2, 5]+[6, 7]) ## for FieldSquareMatrix", "-4, 2]+[4, 1, 5, 6, 3, 1]) d5 = createMatrix(4,", "Subspace(3, 2, [1, 2, 3, 4, 5, 7]) supbase =", "vector.Vector([1, 2]) self.assertEqual(row2, b1.getRow(1)) def testGetColumn(self): col1 = vector.Vector([-12, -1,", "* a1) def testVectorMul(self): mul = vector.Vector([9, 19]) self.assertEqual(mul, b1", "self.assertEqual(sub2, b1 - b2) def testMul(self): mul1 = createMatrix(1, 2,", "def testSumOfSubspaces(self): unit1 = Subspace(3, 1, [1, 0, 0]) unit2", "Ra(2)]) c2 = createMatrix(4, 5, \\ [Ra(0), 0, 1, 2,", "10]) notLT = createMatrix(4, 4, \\ [1, 0, 0, 0]+[2,", "notUT.isUpperTriangularMatrix() def testIsLowerTriangularMatrix(self): LT = createMatrix(4, 4, \\ [1, 0,", "symmetric = createMatrix(2, 2, [2, 3, 3, 5]) assert symmetric.isSymmetricMatrix()", "[7, 5, 0, 8, 2, 5]+[8, 2, 6, 5, -4,", "5,\\ [Ra(0), 0, 1, 0, 0]+[0, 0, 0, 2, 3]+[0,", "2, [5, -1, 9, -5]) self.assertEqual(commutator, b1.commutator(b2)) def testCharacteristicMatrix(self): charMat", "U == d4 assert L.isLowerTriangularMatrix() assert U.isUpperTriangularMatrix() class MatrixRingTest (unittest.TestCase):", "2, 0, 3, 4, 0, 5, 7, 1]) self.assertEqual(supbase, ba.supplementBasis())", "2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)]) M1.inverse() M2 =", "self.assertEqual(mul1, a1 * b2) mul2 = createMatrix(3, 2, [-15, -6]+[-2,", "54, 8, 4, 6]+\\ [7, 5, 0, 8, 2, 5]+[8,", "self.assertEqual(0, o[1, 2]) self.assertEqual(0, o[2, 1]) self.assertEqual(1, o[2, 2]) def", "0, 0, 1]) self.assertEqual(hermite, h) def testExtHermiteNormalForm(self): already = createMatrix(4,", "2, [1, 2, 3, 4, 5, 7]) supbase = createMatrix(3,", "0, 0]+[2, 3, 1, 0]+[4, 5, 6, 0]+[7, 8, 9,", "createMatrix(3, 3, [0, 1, 0, 0 ,0, 1, 0, 0,", "= globals() for name in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\"))", "self.assertRaises(TypeError, self.m2z.getCommonSuperring, Int) class SubspaceTest(unittest.TestCase): def testSupplementBasis(self): ba = Subspace(3,", "9]) b5 = createMatrix(3, 3, [1, 3, 2, 4, 6,", "0, 0]+[4, 5, 6, 0]+[7, 8, 9, 10]) notLT =", "def testColumnEchelonForm(self): echelon = createMatrix(4, 5,\\ [Ra(0), 0, 1, 0,", "0])]) self.assertEqual(a4, lst_vect) def testGetitem(self): self.assertEqual(2, a1[1, 2]) self.assertEqual(-2, b2[2,", "= createMatrix(4, 5,\\ [Ra(0), 0, 1, 0, 0]+[0, 0, 0,", "b1.determinant()) #sf.bug #1914349 self.assertTrue(isinstance(b3.determinant(), int)) self.assertEqual(36, b3.determinant()) def testCofactor(self): self.assertEqual(-6,", "2, [13, 4]) self.assertEqual(call, a5(2)) def testMap(self): pow_two = createMatrix(1,", "mul2 = createMatrix(3, 2, [-15, -6]+[-2, -2]+[0, 0]) self.assertEqual(mul2, a4", "= createMatrix(2, 2, [10, 0, 0, 10]) assert scaler.isScalarMatrix() def", "5), Ra(3, 5)]) assert orthogonal.isOrthogonalMatrix() def testIsAlternatingMatrix(self): alternate1 = createMatrix(2,", "testCall(self): call = createMatrix(1, 2, [13, 4]) self.assertEqual(call, a5(2)) def", "cofact = d5.cofactorMatrix() self.assertEqual(d5.cofactor(2, 3), cofact[2, 3]) def testSmithNormalForm(self): self.assertEqual([12,", "= createMatrix(2, 2, [4, 5, 6, 9]) self.assertEqual(sub2, b5.subMatrix([2, 3],", "a1 = createMatrix(1, 2, [3, 2]) a2 = Matrix(1, 2,", "c2.columnEchelonForm()) class FieldSquareMatrixTest(unittest.TestCase): def testPow(self): pow3 = createMatrix(2, 2, [Ra(11,", "MatrixRingTest (unittest.TestCase): def setUp(self): self.m2z = MatrixRing.getInstance(2, Int) def testZero(self):", "name in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return suite if", "diag.isDiagonalMatrix() def testIsScalarMatrix(self): scaler = createMatrix(2, 2, [10, 0, 0,", "0, 1, 1]) h = square.hermiteNormalForm() self.assertEqual(h.row, square.row) self.assertEqual(h.column, square.column)", "o[2, 2]) def testUnitMatrix(self): \"\"\" unitMatrix() is an alias of", "8, 9, 10]) notLT = createMatrix(4, 4, \\ [1, 0,", "1, 1]) h = square.hermiteNormalForm() self.assertEqual(h.row, square.row) self.assertEqual(h.column, square.column) hermite", "class RingMatrixTest(unittest.TestCase): def testAdd(self): sum1 = createMatrix(1, 2, [8, -4])", "a1[1, 2]) self.assertEqual(-2, b2[2, 2]) self.assertRaises(IndexError, a1.__getitem__, \"wrong\") self.assertEqual(vector.Vector([21, 1,", "def testCommutator(self): commutator = createMatrix(2, 2, [5, -1, 9, -5])", "-2, 0]) assert not alternate2.isAntisymmetricMatrix() def testIsSingular(self): assert b6.isSingular() def", "Int) mat5 = createMatrix(5, 6, Int) self.assertTrue(mat5 == 0) mat6", "from .testMatrixFiniteField import * ## for RingMatrix a1 = createMatrix(1,", "createMatrix(3, [(1, 2, 3), (4, 5, 6), (7, 8, 9)],", "= createMatrix(3, 3) cinverse.set([Ra(-47, 86), Ra(15, 86), Ra(19, 86)]+\\ [Ra(7,", "0]+[2, 3, 1, 0]+[4, 5, 6, 0]+[7, 8, 9, 10])", "d5.inverseImage(d6)) self.assertRaises(NoInverseImage, d2.inverseImage, unitMatrix(3)) def testSolve(self): for i in range(1,", "3, 2, 4, 6, 5, 6, 8, 9]) b6 =", "Ra(-2)]+[7, 1, 9]) d4 = createMatrix(6, 6, \\ [Ra(4), 2,", "0, 8, 9]+[0, 0, 0, 1]) notUT = createMatrix(4, 4,", "Matrix(2, 3, [4, 6, 5, 6, 8, 9]) self.assertEqual(block, b5.getBlock(2,", "Matrix(3, 3, [1, 2, 3]+[0, 5, -2]+[7, 1, 9]) b5", "self.assertTrue(self.m2z.issubring(self.m2z)) self.assertTrue(self.m2z.issubring(m2q)) self.assertFalse(self.m2z.issubring(m3z)) # issuperring self.assertFalse(self.m2z.issuperring(Int)) self.assertTrue(self.m2z.issuperring(self.m2z)) self.assertFalse(self.m2z.issuperring(m2q)) self.assertFalse(self.m2z.issuperring(m3z)) #", "(7, 8, 9)], Q) self.assertTrue(mat3.row == mat3.column) self.assertTrue(mat3.__class__, FieldSquareMatrix) mat4", "10]) assert LT.isLowerTriangularMatrix() assert not notLT.isLowerTriangularMatrix() def testIsDiagonalMatrix(self): diag =", "createMatrix(1, 2, [2, -7]) self.assertEqual(mul1, a1 * b2) mul2 =", "1, 5, 6, 3, 1]) d5 = createMatrix(4, 4, \\", "2, [-3, 0, 0, 5]) assert diag.isDiagonalMatrix() def testIsScalarMatrix(self): scaler", "= Matrix(3, 2, [(21, 1, 0), (-12, -1, 0)]) self.assertEqual(a4,", "1, 9]) d4 = createMatrix(6, 6, \\ [Ra(4), 2, 5,", "[Ra(3), Ra(2)]) c2 = createMatrix(4, 5, \\ [Ra(0), 0, 1,", "[7, 10, 15, 22]) self.assertEqual(pow1, b1 ** 2) pow2 =", "self.assertEqual(pow1, b1 ** 2) pow2 = createMatrix(2, 2, [1, 0,", "[1, 0, 0, 0, 1, 0, 0, 0, 1, 0,", "4]+[2, 3, 4, 5]+[3, 4, 5, 6]+[4, 5, 6, 7])", "1)) sub2 = createMatrix(2, 2, [4, 5, 6, 9]) self.assertEqual(sub2,", "5, 6, 0]+[7, 8, 9, 10]) notLT = createMatrix(4, 4,", "self.m2z.zero self.assertEqual(0, z[1, 1]) self.assertEqual(0, z[1, 2]) self.assertEqual(0, z[2, 1])", "FieldSquareMatrix) mat4 = createMatrix(2, [vector.Vector([1, 4]), vector.Vector([6, 8])]) self.assertEqual(mat4.coeff_ring, Int)", "= createMatrix(2, 3, [7, 3, 0]+[8, -2, 10]) self.assertEqual(trans, a3.transpose())", "[1, 0, 0, 0]+[2, 3, 1, 0]+[4, 5, 6, 0]+[7,", "= createMatrix(4, 4, \\ [1, 0, 0, 0]+[2, 3, 1,", "testScalarMul(self): mul = createMatrix(1, 2, [15, 10]) self.assertEqual(mul, 5 *", "def testIsSingular(self): assert b6.isSingular() def testTrace(self): self.assertEqual(15, b4.trace()) def testDeterminant(self):", "testKernel(self): ker = c2.kernel() self.assertTrue(not c2 * ker) def testImage(self):", "2, [1, 0]+[0, 1]+[0, 0]) unit2 = unitMatrix(3) unit2.toSubspace() intersect", "o[2, 1]) self.assertEqual(1, o[2, 2]) def testUnitMatrix(self): \"\"\" unitMatrix() is", "def testKernel(self): ker = c2.kernel() self.assertTrue(not c2 * ker) def", "self.assertEqual(mod1, a3 % 3) def testNeg(self): neg = createMatrix(2, 2,", "testCofactorMatrix(self): cofact = d5.cofactorMatrix() self.assertEqual(d5.cofactor(2, 3), cofact[2, 3]) def testSmithNormalForm(self):", "0, 0, 1, 0, 0, 0, 1]) U_1, V_1, M_1", "[Poly({0:-1,1:1}, Int), Poly({0:-2}, Int)]+[Poly({0:-3}, Int), Poly({0:-4,1:1}, Int)]) self.assertEqual(charMat, b1.characteristicMatrix()) def", "self.assertEqual(-2, b2[2, 2]) self.assertRaises(IndexError, a1.__getitem__, \"wrong\") self.assertEqual(vector.Vector([21, 1, 0]), a4[1])", "* unitMatrix(d1.row) def testCofactorMatrix(self): cofact = d5.cofactorMatrix() self.assertEqual(d5.cofactor(2, 3), cofact[2,", "Ra(-1, 43)]+[Ra(35, 86), Ra(-13, 86), Ra(-5, 86)]) self.assertEqual(cinverse, d3.inverse()) self.assertRaises(NoInverse,", "a4.subMatrix(2, 1)) sub2 = createMatrix(2, 2, [4, 5, 6, 9])", "= already.exthermiteNormalForm() self.assertEqual(h_1, already) self.assertEqual(already * U_1, h_1) lessrank =", "suite(suffix=\"Test\"): suite = unittest.TestSuite() all_names = globals() for name in", "setUp(self): self.m2z = MatrixRing.getInstance(2, Int) def testZero(self): z = self.m2z.zero", "def testSupplementBasis(self): ba = Subspace(3, 2, [1, 2, 3, 4,", "h = lessrank.hermiteNormalForm() self.assertEqual(h.row, lessrank.row) self.assertEqual(h.column, lessrank.column) zerovec = vector.Vector([0,", "-15, -19, -14, -12, 2, -35, 13, 5]) self.assertEqual(adjugate, b4.adjugateMatrix())", "lessrank.column) zerovec = vector.Vector([0, 0]) self.assertEqual(zerovec, h.getColumn(1)) square = createMatrix(3,", "2, -35, 13, 5]) self.assertEqual(adjugate, b4.adjugateMatrix()) assert d1 * d1.adjugateMatrix()", "vector.Vector([3, -2]) self.assertEqual(row1, a3.getRow(2)) row2 = vector.Vector([1, 2]) self.assertEqual(row2, b1.getRow(1))", "Int) self.assertTrue(mat5 == 0) mat6 = createMatrix(1, 4) self.assertTrue(mat6 ==", "Ra(-15, 4), Ra(7, 4)]) self.assertEqual(pow3, d1 ** (-2)) def testTriangulate(self):", "[Ra(4), 2, 5, 0, 2, 1]+[5, 1, 2, 5, 1,", "for RingMatrix a1 = createMatrix(1, 2, [3, 2]) a2 =", "M1.inverse() M2 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2), Ra(1,", "-1, 0, 0]+[-1, 2, -1, 0]+[0, -1, 2, -1]+[0, 0,", "createMatrix(4, 4, \\ [Ra(1), 2, 3, 4]+[2, 3, 4, 5]+[3,", "createMatrix(2, 2, [Ra(3, 5), Ra(4, 5), Ra(-4, 5), Ra(3, 5)])", "i in range(len(sol1[1])): self.assertEqual(v2, c1 * (sol1[0]+sol1[1][i])) self.assertRaises(NoInverseImage, c3.solve, v3)", "MatrixRing.getInstance(2, Int) def testZero(self): z = self.m2z.zero self.assertEqual(0, z[1, 1])", "self.assertEqual(M2, M1) def testHessenbergForm(self): pass def testLUDecomposition(self): L, U =", "= self.m2z.zero self.assertEqual(0, z[1, 1]) self.assertEqual(0, z[1, 2]) self.assertEqual(0, z[2,", "Int) def testZero(self): z = self.m2z.zero self.assertEqual(0, z[1, 1]) self.assertEqual(0,", "createMatrix(2, 3, [[2,3,4], [5,6,7]]) self.assertEqual(mat1.coeff_ring, Int) mat2 = createMatrix(2, 3,", "<reponame>turkeydonkey/nzmath3 import unittest from nzmath.matrix import * import nzmath.vector as", "def testHessenbergForm(self): pass def testLUDecomposition(self): L, U = d4.LUDecomposition() assert", "5, 6), (7, 8, 9)], Q) self.assertTrue(mat3.row == mat3.column) self.assertTrue(mat3.__class__,", "def testIntersectionOfSubspace(self): unit1 = Subspace(3, 2, [1, 0]+[0, 1]+[0, 0])", "a3.reduce(min)) def testGetRow(self): row1 = vector.Vector([3, -2]) self.assertEqual(row1, a3.getRow(2)) row2", "= createMatrix(2, 2, [Ra(3, 5), Ra(4, 5), Ra(-4, 5), Ra(3,", "= vector.Vector([1, 4]) v2 = vector.Vector([8]) v3 = vector.Vector([0, 0,", "[vector.Vector([21, 1, 0]), vector.Vector([-12, -1, 0])]) self.assertEqual(a4, lst_vect) def testGetitem(self):", "commutator = createMatrix(2, 2, [5, -1, 9, -5]) self.assertEqual(commutator, b1.commutator(b2))", "testMap(self): pow_two = createMatrix(1, 2, [9, 4]) self.assertEqual(pow_two, a1.map(lambda n", "3, [47, -15, -19, -14, -12, 2, -35, 13, 5])", "3)) def testContains(self): self.assertTrue(5 in a2) def testCall(self): call =", "createMatrix(2, 2, [Ra(1), Ra(2)]+[Ra(3), Ra(4)]) d2 = createMatrix(3, 3, [Ra(1),", "4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0, 1, 1])", "a3 % 3) def testNeg(self): neg = createMatrix(2, 2, [0,", "testIsSymmetricMatrix(self): symmetric = createMatrix(2, 2, [2, 3, 3, 5]) assert", "[Ra(2), -1, 0, 0]+[-1, 2, -1, 0]+[0, -1, 2, -1]+[0,", "6, 7]) d7 = Matrix(3, 3, \\ [Ra(1, 2), Ra(2,", "def testGetColumn(self): col1 = vector.Vector([-12, -1, 0]) self.assertEqual(col1, a4.getColumn(2)) col2", "3, [Ra(1), 2, 3]+[4, 5, 6]+[5, 7, 9]) d3 =", "FieldSquareMatrix d1 = createMatrix(2, 2, [Ra(1), Ra(2)]+[Ra(3), Ra(4)]) d2 =", "\\ [Ra(1), Ra(2), Ra(3)]+[Ra(0), Ra(5), Ra(-2)]+[7, 1, 9]) d4 =", "3, [1, 0, 0, 0, 1, 1, 0, 1, 1])", "== a1, bool)) def testNonZero(self): self.assertTrue(not zeroMatrix(2, 3)) def testContains(self):", "def testZero(self): z = self.m2z.zero self.assertEqual(0, z[1, 1]) self.assertEqual(0, z[1,", "6, 3, 1]) d5 = createMatrix(4, 4, \\ [Ra(2), -1,", "n : n ** 2)) def testReduce(self): self.assertEqual(-2, a3.reduce(min)) def", "0, 2, 1]+[5, 1, 2, 5, 1, 1]+[90, 7, 54,", "0, 2, 3]+[0, 0, 0, 1, 0]+[0, 0, 0, 0,", "testColumnEchelonForm(self): echelon = createMatrix(4, 5,\\ [Ra(0), 0, 1, 0, 0]+[0,", "= createMatrix(1, 2, [2, -7]) self.assertEqual(mul1, a1 * b2) mul2", "smith2 = Matrix(3, 3, [9, 0, 0, 0, 3, 0,", "d4 assert L.isLowerTriangularMatrix() assert U.isUpperTriangularMatrix() class MatrixRingTest (unittest.TestCase): def setUp(self):", "= createMatrix(1, 2, [3, 2]) a2 = Matrix(1, 2, [5,", "z[2, 1]) self.assertEqual(0, z[2, 2]) def testOne(self): o = self.m2z.one", "FieldSquareMatrixTest(unittest.TestCase): def testPow(self): pow3 = createMatrix(2, 2, [Ra(11, 2), Ra(-5,", "= createMatrix(1, 2, [9, 4]) self.assertEqual(pow_two, a1.map(lambda n : n", "4, 5, 6]+[4, 5, 6, 7]) d7 = Matrix(3, 3,", "mul1 = createMatrix(1, 2, [2, -7]) self.assertEqual(mul1, a1 * b2)", "d3.rank()) def testInverseImage(self): self.assertEqual(d6, d5 * d5.inverseImage(d6)) self.assertRaises(NoInverseImage, d2.inverseImage, unitMatrix(3))", "sum1 = createMatrix(1, 2, [8, -4]) self.assertEqual(sum1, a1 + a2)", "1, 0, 0, 0, 1, 0, 0, 1]) h =", "b3.determinant()) def testCofactor(self): self.assertEqual(-6, b5.cofactor(1, 2)) def testCommutator(self): commutator =", "createMatrix(5, 6, Int) self.assertTrue(mat5 == 0) mat6 = createMatrix(1, 4)", "Poly({1:2}, Int)]) ## for RingSquareMatrix b1 = createMatrix(2, 2, [1,", "3]+[0, 5, -2]+[7, 1, 9]) b5 = createMatrix(3, 3, [1,", "z[1, 2]) self.assertEqual(0, z[2, 1]) self.assertEqual(0, z[2, 2]) def testOne(self):", "3, \\ [Ra(1), Ra(2), Ra(3)]+[Ra(0), Ra(5), Ra(-2)]+[7, 1, 9]) d4", "test try: from test.testMatrixFiniteField import * except: try: from nzmath.test.testMatrixFiniteField", "5, -2]+[0, 0, Ra(-86, 5)]) self.assertEqual(triangle, d3.triangulate()) def testDeterminant(self): self.assertEqual(Ra(-7,", "sub1 = createMatrix(2, 1, [-12, 0]) self.assertEqual(sub1, a4.subMatrix(2, 1)) sub2", "U_1 * b5 * V_1) smith2 = Matrix(3, 3, [9,", "Matrix(3, 3, \\ [Ra(1, 2), Ra(2, 3), Ra(1, 5)]+[Ra(3, 2),", "8, 9, 10]) assert LT.isLowerTriangularMatrix() assert not notLT.isLowerTriangularMatrix() def testIsDiagonalMatrix(self):", "not notLT.isLowerTriangularMatrix() def testIsDiagonalMatrix(self): diag = createMatrix(2, 2, [-3, 0,", "0]+[1, 0]+[0, 1]) self.assertEqual(unitMatrix(3), unit1.sumOfSubspaces(unit2)) def testIntersectionOfSubspace(self): unit1 = Subspace(3,", "createMatrix(1, 2, [8, -4]) self.assertEqual(sum1, a1 + a2) sum2 =", "self.m2z.one self.assertEqual(1, o[1, 1]) self.assertEqual(0, o[1, 2]) self.assertEqual(0, o[2, 1])", "0, 5, 12, -2]+[0, 0, 1, 3, -1]+[0, 0, 1,", "2, [Ra(1, 2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)]) M1.inverse()", "1]+[0, 0]) unit2 = unitMatrix(3) unit2.toSubspace() intersect = Subspace(3, 2,", "self.assertEqual(h, already) lessrank = createMatrix(2, 3, [1, 0, 0, 0,", "== mat7.column) self.assertTrue(mat7 == 0) self.assertEqual(mat7.coeff_ring, Q) mat8 = createMatrix(7)", "2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)]) self.assertEqual(M2, M1) def", "= createMatrix(2, [vector.Vector([1, 4]), vector.Vector([6, 8])]) self.assertEqual(mat4.coeff_ring, Int) mat5 =", "square.hermiteNormalForm() self.assertEqual(h.row, square.row) self.assertEqual(h.column, square.column) hermite = createMatrix(3, 3, [0,", "0, 0 ,0, 1, 0, 0, 1]) self.assertEqual(hermite, h) def", "3, 4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0, 1,", "testAdjugateMatrix(self): adjugate = createMatrix(3, 3, [47, -15, -19, -14, -12,", "lst_tuple) lst_vect = Matrix(3, 2, [vector.Vector([21, 1, 0]), vector.Vector([-12, -1,", "0, 0, 1, 0, 0, 0, 1, 0, 0, 1])", "in range(1, d6.column+1): self.assertEqual(d6[i], d5 * d5.solve(d6[i])[0]) sol1 = c1.solve(v2)", "22]) self.assertEqual(pow1, b1 ** 2) pow2 = createMatrix(2, 2, [1,", "5, -2]+[7, 1, 9]) b5 = createMatrix(3, 3, [1, 3,", "2, [0, -1]+[1, -2]) b3 = createMatrix(3, 3, [0, 1,", "2), Ra(1, 3), Ra(2, 5)]+[Ra(-1, 2), Ra(4, 3), Ra(3, 5)])", "-93]) self.assertEqual(mul3, b3 * a4) def testScalarMul(self): mul = createMatrix(1,", "createMatrix(3, 3, \\ [Ra(1, 1), 2, 3]+[0, 5, -2]+[0, 0,", "[15, 10]) self.assertEqual(mul, 5 * a1) def testVectorMul(self): mul =", "self.assertEqual(ker_2, None) class RingSquareMatrixTest(unittest.TestCase): def testPow(self): pow1 = createMatrix(2, 2,", "0, 0, 0, 1, 0, 0, 0, 1, 0, 0,", "= vector.Vector([1, 3]) self.assertEqual(col2, b1.getColumn(1)) def testTranspose(self): trans = createMatrix(2,", "v3) def testColumnEchelonForm(self): echelon = createMatrix(4, 5,\\ [Ra(0), 0, 1,", "createMatrix(2, 3, [1, 0, 0, 0, 1, 0]) h =", "3, 1], b8.smithNormalForm()) def testExtSmithNormalForm(self): smith1 = Matrix(3, 3, [12,", "z = self.m2z.zero self.assertEqual(0, z[1, 1]) self.assertEqual(0, z[1, 2]) self.assertEqual(0,", "createMatrix(4, 4, \\ [1, 0, 0, 0]+[2, 3, 0, 0]+[4,", "b5.subMatrix([2, 3], [1, 3])) class SquareMatrixTest(unittest.TestCase): def testIsUpperTriangularMatrix(self): UT =", "= createMatrix(3, 3, [0, 1, 0, 0 ,0, 1, 0,", "2, 3, 4]+[2, 3, 4, 5]+[3, 4, 5, 6]+[4, 5,", "9, 8]) b4 = Matrix(3, 3, [1, 2, 3]+[0, 5,", "= Matrix(1, 2, [5, -6]) a3 = createMatrix(3, 2, [7,", "0, 1, 0, 0, 0, 1]) U_1, V_1, M_1 =", "1, 1], b5.smithNormalForm()) self.assertRaises(ValueError, b6.smithNormalForm) self.assertEqual([1, 1, 1], b7.smithNormalForm()) self.assertEqual([9,", "-1]+[0, 0, 1, 2, 0]) c3 = createMatrix(3, 2, [Ra(1),", "pow1 = createMatrix(2, 2, [7, 10, 15, 22]) self.assertEqual(pow1, b1", "self.assertEqual(d6[i], d5 * d5.solve(d6[i])[0]) sol1 = c1.solve(v2) for i in", "b5 = createMatrix(3, 3, [1, 3, 2, 4, 6, 5,", "self.assertEqual(h_1, already) self.assertEqual(already * U_1, h_1) lessrank = createMatrix(2, 3,", "4]) self.assertEqual(pow_two, a1.map(lambda n : n ** 2)) def testReduce(self):", "suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return suite if __name__ == '__main__': runner =", "createMatrix(2, 2, [-3, 0, 0, 5]) assert diag.isDiagonalMatrix() def testIsScalarMatrix(self):", "a4 = Matrix(3, 2, [21, -12]+[1, -1]+[0, 0]) a5 =", "2, [1, 2]+[0, 1]+[0, 1]) self.assertEqual(mod1, a3 % 3) def", "def testExtSmithNormalForm(self): smith1 = Matrix(3, 3, [12, 0, 0, 0,", "Matrix(3, 3, \\ [Ra(1), Ra(2), Ra(3)]+[Ra(0), Ra(5), Ra(-2)]+[7, 1, 9])", "= vector.Vector([1, 2]) self.assertEqual(row2, b1.getRow(1)) def testGetColumn(self): col1 = vector.Vector([-12,", "= b5.extsmithNormalForm() self.assertEqual(smith1, M_1) self.assertEqual(M_1, U_1 * b5 * V_1)", "\"wrong\") self.assertEqual(vector.Vector([21, 1, 0]), a4[1]) def testEqual(self): self.assertTrue(a1 == Matrix(1,", "8, 2, 5]+[8, 2, 6, 5, -4, 2]+[4, 1, 5,", "= Matrix(3, 3, [9, 0, 0, 0, 3, 0, 0,", "Ra(3, 5)]) ## other objects v1 = vector.Vector([1, 4]) v2", "h_1 = already.exthermiteNormalForm() self.assertEqual(h_1, already) self.assertEqual(already * U_1, h_1) lessrank", "intersect = Subspace(3, 2, [-1, 0]+[0, -1]+[0, 0]) self.assertEqual(intersect, unit1.intersectionOfSubspaces(unit2))", "3, [12, 0, 0, 0, 1, 0, 0, 0, 1])", "self.assertTrue(self.m2z.issubring(m2q)) self.assertFalse(self.m2z.issubring(m3z)) # issuperring self.assertFalse(self.m2z.issuperring(Int)) self.assertTrue(self.m2z.issuperring(self.m2z)) self.assertFalse(self.m2z.issuperring(m2q)) self.assertFalse(self.m2z.issuperring(m3z)) # getCommonSuperring", "vector.Vector([1, 3]) self.assertEqual(col2, b1.getColumn(1)) def testTranspose(self): trans = createMatrix(2, 3,", "call = createMatrix(1, 2, [13, 4]) self.assertEqual(call, a5(2)) def testMap(self):", "3]+[0, 0, 0, 1, 0]+[0, 0, 0, 0, 1]) self.assertEqual(echelon,", "a4[1]) def testEqual(self): self.assertTrue(a1 == Matrix(1, 2, [3, 2])) self.assertTrue(isinstance(a1", "rational.theRationalField mat1 = createMatrix(2, 3, [[2,3,4], [5,6,7]]) self.assertEqual(mat1.coeff_ring, Int) mat2", "1, 1]) assert UT.isUpperTriangularMatrix() assert not notUT.isUpperTriangularMatrix() def testIsLowerTriangularMatrix(self): LT", "2]) def testOne(self): o = self.m2z.one self.assertEqual(1, o[1, 1]) self.assertEqual(0,", "7]) supbase = createMatrix(3, 3, [1, 2, 0, 3, 4,", "objects v1 = vector.Vector([1, 4]) v2 = vector.Vector([8]) v3 =", "d3.inverse(c3)) def testInverseNoChange(self): # sf bug#1849220 M1 = SquareMatrix(2, 2,", "createMatrix(4, 4, \\ [Ra(2), -1, 0, 0]+[-1, 2, -1, 0]+[0,", "Ra = rational.Rational Poly = uniutil.polynomial Int = rational.theIntegerRing #", "def testAdd(self): sum1 = createMatrix(1, 2, [8, -4]) self.assertEqual(sum1, a1", "import * except: try: from nzmath.test.testMatrixFiniteField import * except: from", "createMatrix(2, 1, [-12, 0]) self.assertEqual(sub1, a4.subMatrix(2, 1)) sub2 = createMatrix(2,", "sub1 = createMatrix(1, 2, [-2, 8]) self.assertEqual(sub1, a1 - a2)", "globals() for name in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return", "2]) self.assertEqual(neg, -b2) def testHermiteNormalForm(self): already = createMatrix(4, 3, [1,", "86), Ra(15, 86), Ra(19, 86)]+\\ [Ra(7, 43), Ra(6, 43), Ra(-1,", "from nzmath.test.testMatrixFiniteField import * except: from .testMatrixFiniteField import * ##", "1, 0, 0, 0, 1, 0, 0, 1]) U_1, h_1", "6, 7]+[0, 0, 8, 9]+[0, 0, 1, 1]) assert UT.isUpperTriangularMatrix()", "except: from .testMatrixFiniteField import * ## for RingMatrix a1 =", "3), Ra(1, 5)]+[Ra(3, 2), Ra(1, 3), Ra(2, 5)]+[Ra(-1, 2), Ra(4,", "6, Int) self.assertTrue(mat5 == 0) mat6 = createMatrix(1, 4) self.assertTrue(mat6", "0, 1, 2, -1]+[0, 0, 5, 12, -2]+[0, 0, 1,", "= MatrixRing.getInstance(2, rational.theRationalField) # issubring self.assertFalse(self.m2z.issubring(Int)) self.assertTrue(self.m2z.issubring(self.m2z)) self.assertTrue(self.m2z.issubring(m2q)) self.assertFalse(self.m2z.issubring(m3z)) #", "d5 * d5.inverseImage(d6)) self.assertRaises(NoInverseImage, d2.inverseImage, unitMatrix(3)) def testSolve(self): for i", "[4, 6, 5, 6, 8, 9]) self.assertEqual(block, b5.getBlock(2, 1, 2,", "def testMod(self): mod1 = createMatrix(3, 2, [1, 2]+[0, 1]+[0, 1])", "[1, 0, 0, 0, 1, 1, 0, 1, 1]) h", "0, 1, 2, 0]) c3 = createMatrix(3, 2, [Ra(1), 2]+[2,", "2, [1, -1]+[109, -64]+[156, -93]) self.assertEqual(mul3, b3 * a4) def", "= createMatrix(4, 4, \\ [1, 0, 0, 0]+[2, 3, 0,", "[1, 2]+[3, 4]) b2 = Matrix(2, 2, [0, -1]+[1, -2])", "h_2 = lessrank.exthermiteNormalForm() self.assertEqual(h_2.row, lessrank.row) self.assertEqual(h_2.column, lessrank.column) self.assertEqual(lessrank * U_2,", "0]), a4[1]) def testEqual(self): self.assertTrue(a1 == Matrix(1, 2, [3, 2]))", "0]) assert not alternate2.isAntisymmetricMatrix() def testIsSingular(self): assert b6.isSingular() def testTrace(self):", "2, 3]+[0, 0, 0, 1, 0]+[0, 0, 0, 0, 1])", "not alternate2.isAntisymmetricMatrix() def testIsSingular(self): assert b6.isSingular() def testTrace(self): self.assertEqual(15, b4.trace())", "v2 = vector.Vector([8]) v3 = vector.Vector([0, 0, 1]) class MatrixTest(unittest.TestCase):", "[1, 3, 2, 4, 6, 5, 6, 8, 9]) b6", "= Matrix(3, 3, [3, 15, 12]+[2,7,5]+[1,-4,-2]) ## for FieldMatrix c1", "[[21, -12], [1, -1], [0, 0]]) self.assertEqual(a4, lst_lst) lst_tuple =", "b4.trace()) def testDeterminant(self): self.assertEqual(-2, b1.determinant()) #sf.bug #1914349 self.assertTrue(isinstance(b3.determinant(), int)) self.assertEqual(36,", "[5, -1, 9, -5]) self.assertEqual(commutator, b1.commutator(b2)) def testCharacteristicMatrix(self): charMat =", "2, [[21, -12], [1, -1], [0, 0]]) self.assertEqual(a4, lst_lst) lst_tuple", "adjugate = createMatrix(3, 3, [47, -15, -19, -14, -12, 2,", "not notUT.isUpperTriangularMatrix() def testIsLowerTriangularMatrix(self): LT = createMatrix(4, 4, \\ [1,", "lst_vect) def testGetitem(self): self.assertEqual(2, a1[1, 2]) self.assertEqual(-2, b2[2, 2]) self.assertRaises(IndexError,", "\\ [1, 0, 0, 0]+[2, 3, 0, 0]+[4, 5, 6,", "mat7.column) self.assertTrue(mat7 == 0) self.assertEqual(mat7.coeff_ring, Q) mat8 = createMatrix(7) self.assertTrue(mat8", "v3 = vector.Vector([0, 0, 1]) class MatrixTest(unittest.TestCase): def testInit(self): lst_lst", "= Subspace(3, 2, [0, 0]+[1, 0]+[0, 1]) self.assertEqual(unitMatrix(3), unit1.sumOfSubspaces(unit2)) def", "self.assertEqual(d3.inverse() * c3, d3.inverse(c3)) def testInverseNoChange(self): # sf bug#1849220 M1", "[Ra(0), 0, 1, 0, 0]+[0, 0, 0, 2, 3]+[0, 0,", "0]+[7, 8, 9, 10]) assert LT.isLowerTriangularMatrix() assert not notLT.isLowerTriangularMatrix() def", "[Ra(1), 2, 3]+[4, 5, 6]+[5, 7, 9]) d3 = Matrix(3,", "Int) class SubspaceTest(unittest.TestCase): def testSupplementBasis(self): ba = Subspace(3, 2, [1,", "img = createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0]) self.assertEqual(img, c2.image()) def testRank(self): self.assertEqual(3, c2.rank()) self.assertEqual(3,", "div = createMatrix(1, 2, [1, Ra(2, 3)]) self.assertEqual(div, c1 /", "Ra(2, 3), Ra(1, 5)]+[Ra(3, 2), Ra(1, 3), Ra(2, 5)]+[Ra(-1, 2),", "1, 0, 0, 0, 1]) U_1, V_1, M_1 = b5.extsmithNormalForm()", "4, 0, 3, 5, 0, 0, 0]) b7 = createMatrix(3,", "M_1 = b5.extsmithNormalForm() self.assertEqual(smith1, M_1) self.assertEqual(M_1, U_1 * b5 *", "self.assertEqual(charMat, b1.characteristicMatrix()) def testCharacteristicPolynomial(self): assert d1.characteristicPolynomial() == d1.characteristicMatrix().determinant() def testAdjugateMatrix(self):", "2]+[4, 1, 5, 6, 3, 1]) d5 = createMatrix(4, 4,", "= Matrix(3, 2, [vector.Vector([21, 1, 0]), vector.Vector([-12, -1, 0])]) self.assertEqual(a4,", "3, [[2,3,4], [5,6,7]]) self.assertEqual(mat1.coeff_ring, Int) mat2 = createMatrix(2, 3, [[2,3,4],", "4]) v2 = vector.Vector([8]) v3 = vector.Vector([0, 0, 1]) class", "3, [0, 1, 2]+[5, 4, 6]+[7, 9, 8]) b4 =", "4, 6, 5, 6, 8, 9]) b6 = createMatrix(3, 3,", "2, -2, 0]) assert alternate1.isAlternatingMatrix() alternate2 = createMatrix(2, [1, 2,", "= Subspace(3, 2, [1, 2, 3, 4, 5, 7]) supbase", "= lessrank.exthermiteNormalForm() self.assertEqual(h_2.row, lessrank.row) self.assertEqual(h_2.column, lessrank.column) self.assertEqual(lessrank * U_2, h_2)", "1], b8.smithNormalForm()) def testExtSmithNormalForm(self): smith1 = Matrix(3, 3, [12, 0,", "Int), Poly({0:-4,1:1}, Int)]) self.assertEqual(charMat, b1.characteristicMatrix()) def testCharacteristicPolynomial(self): assert d1.characteristicPolynomial() ==", "= Matrix(3, 3, [1, 2, 3]+[0, 5, -2]+[7, 1, 9])", "* b5 * V_1) smith2 = Matrix(3, 3, [9, 0,", "already) lessrank = createMatrix(2, 3, [1, 0, 0, 0, 1,", "== mat3.column) self.assertTrue(mat3.__class__, FieldSquareMatrix) mat4 = createMatrix(2, [vector.Vector([1, 4]), vector.Vector([6,", "in range(len(sol1[1])): self.assertEqual(v2, c1 * (sol1[0]+sol1[1][i])) self.assertRaises(NoInverseImage, c3.solve, v3) def", "other objects v1 = vector.Vector([1, 4]) v2 = vector.Vector([8]) v3", "[1, 2, -2, 0]) assert not alternate2.isAntisymmetricMatrix() def testIsSingular(self): assert", "mul = vector.Vector([9, 19]) self.assertEqual(mul, b1 * v1) def testMod(self):", "0, 3, 4, 0, 5, 7, 1]) self.assertEqual(supbase, ba.supplementBasis()) def", "mat8 = createMatrix(7) self.assertTrue(mat8 == 0) def suite(suffix=\"Test\"): suite =", "def testMul(self): mul1 = createMatrix(1, 2, [2, -7]) self.assertEqual(mul1, a1", "pow_two = createMatrix(1, 2, [9, 4]) self.assertEqual(pow_two, a1.map(lambda n :", "self.assertEqual(pow3, d1 ** (-2)) def testTriangulate(self): triangle = createMatrix(3, 3,", "8, 9]) self.assertEqual(block, b5.getBlock(2, 1, 2, 3)) def testSubMatrix(self): sub1", "2, 5]+[8, 2, 6, 5, -4, 2]+[4, 1, 5, 6,", "rational.theIntegerRing # sub test try: from test.testMatrixFiniteField import * except:", "** 0) def testIsOrthogonalMatrix(self): orthogonal = createMatrix(2, 2, [Ra(3, 5),", "LT.isLowerTriangularMatrix() assert not notLT.isLowerTriangularMatrix() def testIsDiagonalMatrix(self): diag = createMatrix(2, 2,", "-b2) def testHermiteNormalForm(self): already = createMatrix(4, 3, [1, 0, 0,", "= createMatrix(3, 3, [1, 2, 0, 3, 4, 0, 5,", "b1.getRow(1)) def testGetColumn(self): col1 = vector.Vector([-12, -1, 0]) self.assertEqual(col1, a4.getColumn(2))", "[Ra(1), 2]+[2, 5]+[6, 7]) ## for FieldSquareMatrix d1 = createMatrix(2,", "= SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2), Ra(1, 1), Ra(-3,", "self.assertEqual(self.m2z.one, self.m2z.unitMatrix()) def testRingAPI(self): m3z = MatrixRing.getInstance(3, Int) m2q =", "cinverse.set([Ra(-47, 86), Ra(15, 86), Ra(19, 86)]+\\ [Ra(7, 43), Ra(6, 43),", "* b2) mul2 = createMatrix(3, 2, [-15, -6]+[-2, -2]+[0, 0])", "mat3.column) self.assertTrue(mat3.__class__, FieldSquareMatrix) mat4 = createMatrix(2, [vector.Vector([1, 4]), vector.Vector([6, 8])])", "def testCofactorMatrix(self): cofact = d5.cofactorMatrix() self.assertEqual(d5.cofactor(2, 3), cofact[2, 3]) def", "= createMatrix(2, 2, [2, 3, 3, 5]) assert symmetric.isSymmetricMatrix() class", "5]+[8, 2, 6, 5, -4, 2]+[4, 1, 5, 6, 3,", "def testVectorMul(self): mul = vector.Vector([9, 19]) self.assertEqual(mul, b1 * v1)", "def testExtHermiteNormalForm(self): already = createMatrix(4, 3, [1, 0, 0, 0,", "= createMatrix(2, 2, \\ [Poly({0:-1,1:1}, Int), Poly({0:-2}, Int)]+[Poly({0:-3}, Int), Poly({0:-4,1:1},", "5, 6]+[4, 5, 6, 7]) d7 = Matrix(3, 3, \\", "a2 = Matrix(1, 2, [5, -6]) a3 = createMatrix(3, 2,", "3, 4, 0, 5, 7, 1]) self.assertEqual(supbase, ba.supplementBasis()) def testSumOfSubspaces(self):", "[Ra(1, 2), Ra(2, 3), Ra(1, 5)]+[Ra(3, 2), Ra(1, 3), Ra(2,", "2, 3, 4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0,", "U_2, V_2, M_2 = b8.extsmithNormalForm() self.assertEqual(smith2, M_2) self.assertEqual(M_2, U_2 *", "3, [3, 15, 12]+[2,7,5]+[1,-4,-2]) ## for FieldMatrix c1 = createMatrix(1,", "2, [1, 1, 4, 2]) self.assertEqual(sum2, b1 + b2) def", "5, 6, 8, 9]) self.assertEqual(block, b5.getBlock(2, 1, 2, 3)) def", "[13, 4]) self.assertEqual(call, a5(2)) def testMap(self): pow_two = createMatrix(1, 2,", "d1.adjugateMatrix() == d1.determinant() * unitMatrix(d1.row) def testCofactorMatrix(self): cofact = d5.cofactorMatrix()", "def testScalarMul(self): mul = createMatrix(1, 2, [15, 10]) self.assertEqual(mul, 5", "createMatrix(1, 2, [-2, 8]) self.assertEqual(sub1, a1 - a2) sub2 =", "def testDiv(self): div = createMatrix(1, 2, [1, Ra(2, 3)]) self.assertEqual(div,", "self.assertEqual(pow_two, a1.map(lambda n : n ** 2)) def testReduce(self): self.assertEqual(-2,", "cofact[2, 3]) def testSmithNormalForm(self): self.assertEqual([12, 1, 1], b5.smithNormalForm()) self.assertRaises(ValueError, b6.smithNormalForm)", "= vector.Vector([3, -2]) self.assertEqual(row1, a3.getRow(2)) row2 = vector.Vector([1, 2]) self.assertEqual(row2,", "1, 0, 0, 1]) U_1, h_1 = already.exthermiteNormalForm() self.assertEqual(h_1, already)", "2]+[5, 4, 6]+[7, 9, 8]) b4 = Matrix(3, 3, [1,", "createMatrix(7) self.assertTrue(mat8 == 0) def suite(suffix=\"Test\"): suite = unittest.TestSuite() all_names", "vector.Vector([0, 0, 1]) class MatrixTest(unittest.TestCase): def testInit(self): lst_lst = Matrix(3,", "createMatrix(3, 2, [1, 2]+[0, 1]+[0, 1]) self.assertEqual(mod1, a3 % 3)", "testDeterminant(self): self.assertEqual(-2, b1.determinant()) #sf.bug #1914349 self.assertTrue(isinstance(b3.determinant(), int)) self.assertEqual(36, b3.determinant()) def", "a2) sub2 = createMatrix(2, 2, [1, 3, 2, 6]) self.assertEqual(sub2,", "-12], [1, -1], [0, 0]]) self.assertEqual(a4, lst_lst) lst_tuple = Matrix(3,", "unit2.toSubspace() intersect = Subspace(3, 2, [-1, 0]+[0, -1]+[0, 0]) self.assertEqual(intersect,", "0, 1, 1, 0, 1, 1]) h = square.hermiteNormalForm() self.assertEqual(h.row,", "import * ## for RingMatrix a1 = createMatrix(1, 2, [3,", "6, 5, 6, 8, 9]) self.assertEqual(block, b5.getBlock(2, 1, 2, 3))", "3), cofact[2, 3]) def testSmithNormalForm(self): self.assertEqual([12, 1, 1], b5.smithNormalForm()) self.assertRaises(ValueError,", "MatrixRing.getInstance(3, Int) m2q = MatrixRing.getInstance(2, rational.theRationalField) # issubring self.assertFalse(self.m2z.issubring(Int)) self.assertTrue(self.m2z.issubring(self.m2z))", "assert scaler.isScalarMatrix() def testIsSymmetricMatrix(self): symmetric = createMatrix(2, 2, [2, 3,", "4, 2]) self.assertEqual(sum2, b1 + b2) def testSub(self): sub1 =", "Ra(1, 2), Ra(1, 1), Ra(-3, 2)]) M1.inverse() M2 = SquareMatrix(2,", "= createMatrix(3, 2, [-15, -6]+[-2, -2]+[0, 0]) self.assertEqual(mul2, a4 *", "assert not alternate2.isAntisymmetricMatrix() def testIsSingular(self): assert b6.isSingular() def testTrace(self): self.assertEqual(15,", "mod1 = createMatrix(3, 2, [1, 2]+[0, 1]+[0, 1]) self.assertEqual(mod1, a3", "1, 3, -1]+[0, 0, 1, 2, 0]) c3 = createMatrix(3,", "1, 1, 0, 1, 1]) h = square.hermiteNormalForm() self.assertEqual(h.row, square.row)", "0, 0, 1]) h = already.hermiteNormalForm() self.assertEqual(h, already) lessrank =", "alternate2.isAntisymmetricMatrix() def testIsSingular(self): assert b6.isSingular() def testTrace(self): self.assertEqual(15, b4.trace()) def", "- a2) sub2 = createMatrix(2, 2, [1, 3, 2, 6])", "self.assertEqual(h.row, square.row) self.assertEqual(h.column, square.column) hermite = createMatrix(3, 3, [0, 1,", "assert L * U == d4 assert L.isLowerTriangularMatrix() assert U.isUpperTriangularMatrix()", "trans = createMatrix(2, 3, [7, 3, 0]+[8, -2, 10]) self.assertEqual(trans,", "def testAdjugateMatrix(self): adjugate = createMatrix(3, 3, [47, -15, -19, -14,", "import * except: from .testMatrixFiniteField import * ## for RingMatrix", "in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return suite if __name__", "testPow(self): pow3 = createMatrix(2, 2, [Ra(11, 2), Ra(-5, 2), Ra(-15,", "1]) self.assertEqual(unitMatrix(3), unit1.sumOfSubspaces(unit2)) def testIntersectionOfSubspace(self): unit1 = Subspace(3, 2, [1,", "= d5.cofactorMatrix() self.assertEqual(d5.cofactor(2, 3), cofact[2, 3]) def testSmithNormalForm(self): self.assertEqual([12, 1,", "testEqual(self): self.assertTrue(a1 == Matrix(1, 2, [3, 2])) self.assertTrue(isinstance(a1 == a1,", "** (-2)) def testTriangulate(self): triangle = createMatrix(3, 3, \\ [Ra(1,", "-1]+[1, -2]) b3 = createMatrix(3, 3, [0, 1, 2]+[5, 4,", "= vector.Vector([9, 19]) self.assertEqual(mul, b1 * v1) def testMod(self): mod1", "== d4 assert L.isLowerTriangularMatrix() assert U.isUpperTriangularMatrix() class MatrixRingTest (unittest.TestCase): def", "[0, 0]]) self.assertEqual(a4, lst_lst) lst_tuple = Matrix(3, 2, [(21, 1,", "b7.smithNormalForm()) self.assertEqual([9, 3, 1], b8.smithNormalForm()) def testExtSmithNormalForm(self): smith1 = Matrix(3,", "0, 0, 0, 1, 0, 0, 1]) h = already.hermiteNormalForm()", "def testDeterminant(self): self.assertEqual(Ra(-7, 15), d7.determinant()) def testInverse(self): cinverse = createMatrix(3,", "2, [7, 10, 15, 22]) self.assertEqual(pow1, b1 ** 2) pow2", "0), (-12, -1, 0)]) self.assertEqual(a4, lst_tuple) lst_vect = Matrix(3, 2,", "0, 1]) self.assertEqual(hermite, h) def testExtHermiteNormalForm(self): already = createMatrix(4, 3,", "1]) class MatrixTest(unittest.TestCase): def testInit(self): lst_lst = Matrix(3, 2, [[21,", "** 2) pow2 = createMatrix(2, 2, [1, 0, 0, 1])", "createMatrix(2, 2, [1, 2]+[3, 4]) b2 = Matrix(2, 2, [0,", "3, 1, 0]+[4, 5, 6, 0]+[7, 8, 9, 10]) assert", "V_1, M_1 = b5.extsmithNormalForm() self.assertEqual(smith1, M_1) self.assertEqual(M_1, U_1 * b5", "1]) self.assertEqual(mod1, a3 % 3) def testNeg(self): neg = createMatrix(2,", "2]) self.assertEqual(sum2, b1 + b2) def testSub(self): sub1 = createMatrix(1,", "[0, 1, -1, 2]) self.assertEqual(neg, -b2) def testHermiteNormalForm(self): already =", "= createMatrix(1, 2, [Poly({0:3, 1:5}, Int), Poly({1:2}, Int)]) ## for", "5)]+[Ra(-1, 2), Ra(4, 3), Ra(3, 5)]) ## other objects v1", "Ra(15, 86), Ra(19, 86)]+\\ [Ra(7, 43), Ra(6, 43), Ra(-1, 43)]+[Ra(35,", "0, 0, 0, 3, 0, 0, 0, 1]) U_2, V_2,", "2, [4, 5, 6, 9]) self.assertEqual(sub2, b5.subMatrix([2, 3], [1, 3]))", "self.assertEqual(0, z[1, 1]) self.assertEqual(0, z[1, 2]) self.assertEqual(0, z[2, 1]) self.assertEqual(0,", "nzmath.poly.uniutil as uniutil Ra = rational.Rational Poly = uniutil.polynomial Int", "= createMatrix(3, 3, [1, 3, 2, 4, 6, 5, 6,", "createMatrix(4, 4, \\ [1, 0, 0, 0]+[2, 3, 1, 0]+[4,", "0, 1]) self.assertEqual(pow2, b2 ** 0) def testIsOrthogonalMatrix(self): orthogonal =", "1, 0]) h = lessrank.hermiteNormalForm() self.assertEqual(h.row, lessrank.row) self.assertEqual(h.column, lessrank.column) zerovec", "assert U.isUpperTriangularMatrix() class MatrixRingTest (unittest.TestCase): def setUp(self): self.m2z = MatrixRing.getInstance(2,", "b1.commutator(b2)) def testCharacteristicMatrix(self): charMat = createMatrix(2, 2, \\ [Poly({0:-1,1:1}, Int),", "2, [8, -4]) self.assertEqual(sum1, a1 + a2) sum2 = createMatrix(2,", "Ra(-5, 86)]) self.assertEqual(cinverse, d3.inverse()) self.assertRaises(NoInverse, d2.inverse) self.assertEqual(d3.inverse() * c3, d3.inverse(c3))", "b5.extsmithNormalForm() self.assertEqual(smith1, M_1) self.assertEqual(M_1, U_1 * b5 * V_1) smith2", "0, 0, 1]) U_1, h_1 = already.exthermiteNormalForm() self.assertEqual(h_1, already) self.assertEqual(already", "createMatrix(3, 3, [1, 2, 4, 0, 3, 5, 0, 0,", "1]) h = square.hermiteNormalForm() self.assertEqual(h.row, square.row) self.assertEqual(h.column, square.column) hermite =", "5, 6, 9]) self.assertEqual(sub2, b5.subMatrix([2, 3], [1, 3])) class SquareMatrixTest(unittest.TestCase):", "testAdd(self): sum1 = createMatrix(1, 2, [8, -4]) self.assertEqual(sum1, a1 +", "5, 1, 1]+[90, 7, 54, 8, 4, 6]+\\ [7, 5,", "3, [9, 0, 0, 0, 3, 0, 0, 0, 1])", "unitMatrix() is an alias of one. \"\"\" self.assertEqual(self.m2z.one, self.m2z.unitMatrix()) def", "9]) self.assertEqual(block, b5.getBlock(2, 1, 2, 3)) def testSubMatrix(self): sub1 =", "class SubspaceTest(unittest.TestCase): def testSupplementBasis(self): ba = Subspace(3, 2, [1, 2,", "issuperring self.assertFalse(self.m2z.issuperring(Int)) self.assertTrue(self.m2z.issuperring(self.m2z)) self.assertFalse(self.m2z.issuperring(m2q)) self.assertFalse(self.m2z.issuperring(m3z)) # getCommonSuperring self.assertRaises(TypeError, self.m2z.getCommonSuperring, Int)", "try: from nzmath.test.testMatrixFiniteField import * except: from .testMatrixFiniteField import *", "self.m2z.getCommonSuperring, Int) class SubspaceTest(unittest.TestCase): def testSupplementBasis(self): ba = Subspace(3, 2,", "3, \\ [Ra(1, 1), 2, 3]+[0, 5, -2]+[0, 0, Ra(-86,", "is an alias of one. \"\"\" self.assertEqual(self.m2z.one, self.m2z.unitMatrix()) def testRingAPI(self):", "def testTriangulate(self): triangle = createMatrix(3, 3, \\ [Ra(1, 1), 2,", "* ker_1[1], vector.Vector([0])) #zero test ker_2 = b1.kernelAsModule() self.assertEqual(ker_2, None)", "[0, -1]+[1, -2]) b3 = createMatrix(3, 3, [0, 1, 2]+[5,", "Subspace(3, 2, [-1, 0]+[0, -1]+[0, 0]) self.assertEqual(intersect, unit1.intersectionOfSubspaces(unit2)) class FunctionTest(unittest.TestCase):", "0, 0]) b7 = createMatrix(3, 3, [1, 0, 0, 9,", "0]+[0, -1, 2, -1]+[0, 0, -1, 2]) d6 = createMatrix(4,", "self.assertFalse(self.m2z.issubring(Int)) self.assertTrue(self.m2z.issubring(self.m2z)) self.assertTrue(self.m2z.issubring(m2q)) self.assertFalse(self.m2z.issubring(m3z)) # issuperring self.assertFalse(self.m2z.issuperring(Int)) self.assertTrue(self.m2z.issuperring(self.m2z)) self.assertFalse(self.m2z.issuperring(m2q)) self.assertFalse(self.m2z.issuperring(m3z))", "* except: from .testMatrixFiniteField import * ## for RingMatrix a1", "d1.determinant() * unitMatrix(d1.row) def testCofactorMatrix(self): cofact = d5.cofactorMatrix() self.assertEqual(d5.cofactor(2, 3),", "diag = createMatrix(2, 2, [-3, 0, 0, 5]) assert diag.isDiagonalMatrix()", "= rational.theIntegerRing # sub test try: from test.testMatrixFiniteField import *", "6, 8, 9]) b6 = createMatrix(3, 3, [1, 2, 4,", "* d5.inverseImage(d6)) self.assertRaises(NoInverseImage, d2.inverseImage, unitMatrix(3)) def testSolve(self): for i in", "(sol1[0]+sol1[1][i])) self.assertRaises(NoInverseImage, c3.solve, v3) def testColumnEchelonForm(self): echelon = createMatrix(4, 5,\\", "8]) self.assertEqual(sub1, a1 - a2) sub2 = createMatrix(2, 2, [1,", "self.assertEqual(3, d3.rank()) def testInverseImage(self): self.assertEqual(d6, d5 * d5.inverseImage(d6)) self.assertRaises(NoInverseImage, d2.inverseImage,", "createMatrix(3, 2, [1, -1]+[109, -64]+[156, -93]) self.assertEqual(mul3, b3 * a4)", "3) def testKernel(self): ker = c2.kernel() self.assertTrue(not c2 * ker)", "[Poly({0:3, 1:5}, Int), Poly({1:2}, Int)]) ## for RingSquareMatrix b1 =", "[1, 2, 3]+[0, 5, -2]+[7, 1, 9]) b5 = createMatrix(3,", "2]+[0, 1]+[0, 1]) self.assertEqual(mod1, a3 % 3) def testNeg(self): neg", "M_1) self.assertEqual(M_1, U_1 * b5 * V_1) smith2 = Matrix(3,", "d7 = Matrix(3, 3, \\ [Ra(1, 2), Ra(2, 3), Ra(1,", "self.assertEqual(commutator, b1.commutator(b2)) def testCharacteristicMatrix(self): charMat = createMatrix(2, 2, \\ [Poly({0:-1,1:1},", "0, 5, 7, 1]) self.assertEqual(supbase, ba.supplementBasis()) def testSumOfSubspaces(self): unit1 =", "-1, 2, -1]+[0, 0, -1, 2]) d6 = createMatrix(4, 4,", "2]) def testUnitMatrix(self): \"\"\" unitMatrix() is an alias of one.", "createMatrix(1, 2, [15, 10]) self.assertEqual(mul, 5 * a1) def testVectorMul(self):", "\\ [Ra(4), 2, 5, 0, 2, 1]+[5, 1, 2, 5,", "[0, 2, -2, 0]) assert alternate1.isAlternatingMatrix() alternate2 = createMatrix(2, [1,", "testGetBlock(self): block = Matrix(2, 3, [4, 6, 5, 6, 8,", "2, [Poly({0:3, 1:5}, Int), Poly({1:2}, Int)]) ## for RingSquareMatrix b1", "for FieldSquareMatrix d1 = createMatrix(2, 2, [Ra(1), Ra(2)]+[Ra(3), Ra(4)]) d2", "2, 4, 0, 3, 5, 0, 0, 0]) b7 =", "= Matrix(3, 2, [21, -12]+[1, -1]+[0, 0]) a5 = createMatrix(1,", "0, 1]) self.assertEqual(echelon, c2.columnEchelonForm()) class FieldSquareMatrixTest(unittest.TestCase): def testPow(self): pow3 =", "ker_1 = a1.kernelAsModule() self.assertEqual(a1 * ker_1[1], vector.Vector([0])) #zero test ker_2", "from nzmath.matrix import * import nzmath.vector as vector import nzmath.rational", "# issubring self.assertFalse(self.m2z.issubring(Int)) self.assertTrue(self.m2z.issubring(self.m2z)) self.assertTrue(self.m2z.issubring(m2q)) self.assertFalse(self.m2z.issubring(m3z)) # issuperring self.assertFalse(self.m2z.issuperring(Int)) self.assertTrue(self.m2z.issuperring(self.m2z))", "range(len(sol1[1])): self.assertEqual(v2, c1 * (sol1[0]+sol1[1][i])) self.assertRaises(NoInverseImage, c3.solve, v3) def testColumnEchelonForm(self):", "2), Ra(1, 1), Ra(-3, 2)]) M1.inverse() M2 = SquareMatrix(2, 2,", "self.assertEqual(sub1, a1 - a2) sub2 = createMatrix(2, 2, [1, 3,", "0, 1]) U_1, h_1 = already.exthermiteNormalForm() self.assertEqual(h_1, already) self.assertEqual(already *", "1:5}, Int), Poly({1:2}, Int)]) ## for RingSquareMatrix b1 = createMatrix(2,", "createMatrix(2, 3, [7, 3, 0]+[8, -2, 10]) self.assertEqual(trans, a3.transpose()) def", "-1, 0)]) self.assertEqual(a4, lst_tuple) lst_vect = Matrix(3, 2, [vector.Vector([21, 1,", "0) def testIsOrthogonalMatrix(self): orthogonal = createMatrix(2, 2, [Ra(3, 5), Ra(4,", "2) pow2 = createMatrix(2, 2, [1, 0, 0, 1]) self.assertEqual(pow2,", "5)]) ## other objects v1 = vector.Vector([1, 4]) v2 =", "Poly = uniutil.polynomial Int = rational.theIntegerRing # sub test try:", "-2]) b3 = createMatrix(3, 3, [0, 1, 2]+[5, 4, 6]+[7,", "self.assertEqual(d6, d5 * d5.inverseImage(d6)) self.assertRaises(NoInverseImage, d2.inverseImage, unitMatrix(3)) def testSolve(self): for", "h_1) lessrank = createMatrix(2, 3, [1, 0, 0, 0, 1,", "5]) assert symmetric.isSymmetricMatrix() class RingMatrixTest(unittest.TestCase): def testAdd(self): sum1 = createMatrix(1,", "5, 6, 3, 1]) d5 = createMatrix(4, 4, \\ [Ra(2),", "a5(2)) def testMap(self): pow_two = createMatrix(1, 2, [9, 4]) self.assertEqual(pow_two,", "5, 7]) supbase = createMatrix(3, 3, [1, 2, 0, 3,", "vector.Vector([-12, -1, 0])]) self.assertEqual(a4, lst_vect) def testGetitem(self): self.assertEqual(2, a1[1, 2])", "testHessenbergForm(self): pass def testLUDecomposition(self): L, U = d4.LUDecomposition() assert L", "0) mat7 = createMatrix(3, Q) self.assertTrue(mat7.row == mat7.column) self.assertTrue(mat7 ==", "testInit(self): lst_lst = Matrix(3, 2, [[21, -12], [1, -1], [0,", "0, 1]) U_1, V_1, M_1 = b5.extsmithNormalForm() self.assertEqual(smith1, M_1) self.assertEqual(M_1,", "= uniutil.polynomial Int = rational.theIntegerRing # sub test try: from", "= c1.solve(v2) for i in range(len(sol1[1])): self.assertEqual(v2, c1 * (sol1[0]+sol1[1][i]))", "b4.adjugateMatrix()) assert d1 * d1.adjugateMatrix() == d1.determinant() * unitMatrix(d1.row) def", "M2 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2), Ra(1, 1),", "* V_1) smith2 = Matrix(3, 3, [9, 0, 0, 0,", "assert not notLT.isLowerTriangularMatrix() def testIsDiagonalMatrix(self): diag = createMatrix(2, 2, [-3,", "if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return suite if __name__ == '__main__':", "testNeg(self): neg = createMatrix(2, 2, [0, 1, -1, 2]) self.assertEqual(neg,", "m3z = MatrixRing.getInstance(3, Int) m2q = MatrixRing.getInstance(2, rational.theRationalField) # issubring", "lessrank = createMatrix(2, 3, [1, 0, 0, 0, 1, 0])", "unit1.sumOfSubspaces(unit2)) def testIntersectionOfSubspace(self): unit1 = Subspace(3, 2, [1, 0]+[0, 1]+[0,", "1, 0, 0 ,0, 1, 0, 0, 1]) self.assertEqual(hermite, h)", "3]+[0, 5, -2]+[0, 0, Ra(-86, 5)]) self.assertEqual(triangle, d3.triangulate()) def testDeterminant(self):", "3, [1, 3, 2, 4, 6, 5, 6, 8, 9])", "0, 1, 1]) assert UT.isUpperTriangularMatrix() assert not notUT.isUpperTriangularMatrix() def testIsLowerTriangularMatrix(self):", "testImage(self): img = createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0]) self.assertEqual(img, c2.image()) def testRank(self): self.assertEqual(3, c2.rank())", "i in range(1, d6.column+1): self.assertEqual(d6[i], d5 * d5.solve(d6[i])[0]) sol1 =", "self.assertEqual(mat2.coeff_ring, Q) mat3 = createMatrix(3, [(1, 2, 3), (4, 5,", "createMatrix(2, 2, [Ra(11, 2), Ra(-5, 2), Ra(-15, 4), Ra(7, 4)])", "testIsDiagonalMatrix(self): diag = createMatrix(2, 2, [-3, 0, 0, 5]) assert", "b8.smithNormalForm()) def testExtSmithNormalForm(self): smith1 = Matrix(3, 3, [12, 0, 0,", "testContains(self): self.assertTrue(5 in a2) def testCall(self): call = createMatrix(1, 2,", "+ a2) sum2 = createMatrix(2, 2, [1, 1, 4, 2])", "7]) ## for FieldSquareMatrix d1 = createMatrix(2, 2, [Ra(1), Ra(2)]+[Ra(3),", "6]+[7, 9, 8]) b4 = Matrix(3, 3, [1, 2, 3]+[0,", "7, 54, 8, 4, 6]+\\ [7, 5, 0, 8, 2,", "Q = rational.theRationalField mat1 = createMatrix(2, 3, [[2,3,4], [5,6,7]]) self.assertEqual(mat1.coeff_ring,", "Ra(7, 4)]) self.assertEqual(pow3, d1 ** (-2)) def testTriangulate(self): triangle =", "== d1.determinant() * unitMatrix(d1.row) def testCofactorMatrix(self): cofact = d5.cofactorMatrix() self.assertEqual(d5.cofactor(2,", "m2q = MatrixRing.getInstance(2, rational.theRationalField) # issubring self.assertFalse(self.m2z.issubring(Int)) self.assertTrue(self.m2z.issubring(self.m2z)) self.assertTrue(self.m2z.issubring(m2q)) self.assertFalse(self.m2z.issubring(m3z))", "def testContains(self): self.assertTrue(5 in a2) def testCall(self): call = createMatrix(1,", "0, 9, 1, 0, 5, 6, 1]) b8 = Matrix(3,", "5)]) self.assertEqual(triangle, d3.triangulate()) def testDeterminant(self): self.assertEqual(Ra(-7, 15), d7.determinant()) def testInverse(self):", "def testLUDecomposition(self): L, U = d4.LUDecomposition() assert L * U", "rational.Rational Poly = uniutil.polynomial Int = rational.theIntegerRing # sub test", "lst_tuple = Matrix(3, 2, [(21, 1, 0), (-12, -1, 0)])", "2, [0, 1, -1, 2]) self.assertEqual(neg, -b2) def testHermiteNormalForm(self): already", "\"\"\" self.assertEqual(self.m2z.one, self.m2z.unitMatrix()) def testRingAPI(self): m3z = MatrixRing.getInstance(3, Int) m2q", "# sf bug#1849220 M1 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1,", "self.assertEqual(already * U_1, h_1) lessrank = createMatrix(2, 3, [1, 0,", "4, 0, 5, 7, 1]) self.assertEqual(supbase, ba.supplementBasis()) def testSumOfSubspaces(self): unit1", "self.assertFalse(self.m2z.issuperring(m3z)) # getCommonSuperring self.assertRaises(TypeError, self.m2z.getCommonSuperring, Int) class SubspaceTest(unittest.TestCase): def testSupplementBasis(self):", "def testOne(self): o = self.m2z.one self.assertEqual(1, o[1, 1]) self.assertEqual(0, o[1,", "smith1 = Matrix(3, 3, [12, 0, 0, 0, 1, 0,", "1]+[90, 7, 54, 8, 4, 6]+\\ [7, 5, 0, 8,", "except: try: from nzmath.test.testMatrixFiniteField import * except: from .testMatrixFiniteField import", "= createMatrix(4, 3, [1, 0, 0, 0, 1, 0, 0,", "2), Ra(2, 3), Ra(1, 5)]+[Ra(3, 2), Ra(1, 3), Ra(2, 5)]+[Ra(-1,", "pow3 = createMatrix(2, 2, [Ra(11, 2), Ra(-5, 2), Ra(-15, 4),", "def testNonZero(self): self.assertTrue(not zeroMatrix(2, 3)) def testContains(self): self.assertTrue(5 in a2)", "Int)]) ## for RingSquareMatrix b1 = createMatrix(2, 2, [1, 2]+[3,", "= vector.Vector([-12, -1, 0]) self.assertEqual(col1, a4.getColumn(2)) col2 = vector.Vector([1, 3])", "\"\"\" unitMatrix() is an alias of one. \"\"\" self.assertEqual(self.m2z.one, self.m2z.unitMatrix())", "o[1, 2]) self.assertEqual(0, o[2, 1]) self.assertEqual(1, o[2, 2]) def testUnitMatrix(self):", "1, -1, 2]) self.assertEqual(neg, -b2) def testHermiteNormalForm(self): already = createMatrix(4,", "0]) self.assertEqual(col1, a4.getColumn(2)) col2 = vector.Vector([1, 3]) self.assertEqual(col2, b1.getColumn(1)) def", "1, [1, 0, 0]) unit2 = Subspace(3, 2, [0, 0]+[1,", "c1.solve(v2) for i in range(len(sol1[1])): self.assertEqual(v2, c1 * (sol1[0]+sol1[1][i])) self.assertRaises(NoInverseImage,", "Int)]+[Poly({0:-3}, Int), Poly({0:-4,1:1}, Int)]) self.assertEqual(charMat, b1.characteristicMatrix()) def testCharacteristicPolynomial(self): assert d1.characteristicPolynomial()", "7, 9]) d3 = Matrix(3, 3, \\ [Ra(1), Ra(2), Ra(3)]+[Ra(0),", "1, 0, 0, 1]) h = already.hermiteNormalForm() self.assertEqual(h, already) lessrank", "testTranspose(self): trans = createMatrix(2, 3, [7, 3, 0]+[8, -2, 10])", "def testPow(self): pow1 = createMatrix(2, 2, [7, 10, 15, 22])", "def testPow(self): pow3 = createMatrix(2, 2, [Ra(11, 2), Ra(-5, 2),", "unit1 = Subspace(3, 2, [1, 0]+[0, 1]+[0, 0]) unit2 =", "0, 0, 1, 0, 0, 1]) h = already.hermiteNormalForm() self.assertEqual(h,", "row1 = vector.Vector([3, -2]) self.assertEqual(row1, a3.getRow(2)) row2 = vector.Vector([1, 2])", "3, [1, 2, 3]+[0, 5, -2]+[7, 1, 9]) b5 =", "createMatrix(3, 2, [Ra(1), 2]+[2, 5]+[6, 7]) ## for FieldSquareMatrix d1", "-2]) self.assertEqual(row1, a3.getRow(2)) row2 = vector.Vector([1, 2]) self.assertEqual(row2, b1.getRow(1)) def", "= createMatrix(4, 4, \\ [Ra(2), -1, 0, 0]+[-1, 2, -1,", "def testSubMatrix(self): sub1 = createMatrix(2, 1, [-12, 0]) self.assertEqual(sub1, a4.subMatrix(2,", "2)) def testReduce(self): self.assertEqual(-2, a3.reduce(min)) def testGetRow(self): row1 = vector.Vector([3,", "2, [3, 2]) a2 = Matrix(1, 2, [5, -6]) a3", "12]+[2,7,5]+[1,-4,-2]) ## for FieldMatrix c1 = createMatrix(1, 2, [Ra(3), Ra(2)])", "[1, 0, 0, 0, 1, 0]) U_2, h_2 = lessrank.exthermiteNormalForm()", "1]) self.assertEqual(echelon, c2.columnEchelonForm()) class FieldSquareMatrixTest(unittest.TestCase): def testPow(self): pow3 = createMatrix(2,", "self.assertEqual(h.column, lessrank.column) zerovec = vector.Vector([0, 0]) self.assertEqual(zerovec, h.getColumn(1)) square =", "self.assertEqual(unitMatrix(3), unit1.sumOfSubspaces(unit2)) def testIntersectionOfSubspace(self): unit1 = Subspace(3, 2, [1, 0]+[0,", "self.assertEqual(call, a5(2)) def testMap(self): pow_two = createMatrix(1, 2, [9, 4])", "0]) self.assertEqual(intersect, unit1.intersectionOfSubspaces(unit2)) class FunctionTest(unittest.TestCase): def testCreateMatrix(self): Q = rational.theRationalField", "== 0) self.assertEqual(mat7.coeff_ring, Q) mat8 = createMatrix(7) self.assertTrue(mat8 == 0)", "Subspace(3, 2, [0, 0]+[1, 0]+[0, 1]) self.assertEqual(unitMatrix(3), unit1.sumOfSubspaces(unit2)) def testIntersectionOfSubspace(self):", "Ra(3)]+[Ra(0), Ra(5), Ra(-2)]+[7, 1, 9]) d4 = createMatrix(6, 6, \\", "[1, 3, 2, 6]) self.assertEqual(sub2, b1 - b2) def testMul(self):", "0]) b7 = createMatrix(3, 3, [1, 0, 0, 9, 1,", "-1, 0]) self.assertEqual(col1, a4.getColumn(2)) col2 = vector.Vector([1, 3]) self.assertEqual(col2, b1.getColumn(1))", "1]) self.assertEqual(0, z[1, 2]) self.assertEqual(0, z[2, 1]) self.assertEqual(0, z[2, 2])", "2, [-15, -6]+[-2, -2]+[0, 0]) self.assertEqual(mul2, a4 * b1) mul3", "= unitMatrix(3) unit2.toSubspace() intersect = Subspace(3, 2, [-1, 0]+[0, -1]+[0,", "= createMatrix(2, 2, [Ra(11, 2), Ra(-5, 2), Ra(-15, 4), Ra(7,", "[(21, 1, 0), (-12, -1, 0)]) self.assertEqual(a4, lst_tuple) lst_vect =", "= Matrix(2, 3, [4, 6, 5, 6, 8, 9]) self.assertEqual(block,", "5), Ra(-4, 5), Ra(3, 5)]) assert orthogonal.isOrthogonalMatrix() def testIsAlternatingMatrix(self): alternate1", "1, 0, 1, 1]) h = square.hermiteNormalForm() self.assertEqual(h.row, square.row) self.assertEqual(h.column,", "def testReduce(self): self.assertEqual(-2, a3.reduce(min)) def testGetRow(self): row1 = vector.Vector([3, -2])", "createMatrix(1, 4) self.assertTrue(mat6 == 0) mat7 = createMatrix(3, Q) self.assertTrue(mat7.row", "4, 6]+\\ [7, 5, 0, 8, 2, 5]+[8, 2, 6,", "a1 - a2) sub2 = createMatrix(2, 2, [1, 3, 2,", "0]) unit2 = unitMatrix(3) unit2.toSubspace() intersect = Subspace(3, 2, [-1,", "0, 0, 0, 1, 0, 0, 1]) U_1, h_1 =", "\\ [Ra(0), 0, 1, 2, -1]+[0, 0, 5, 12, -2]+[0,", "5, 0, 0, 0]) b7 = createMatrix(3, 3, [1, 0,", "0, 0, 0]) b7 = createMatrix(3, 3, [1, 0, 0,", "testInverseImage(self): self.assertEqual(d6, d5 * d5.inverseImage(d6)) self.assertRaises(NoInverseImage, d2.inverseImage, unitMatrix(3)) def testSolve(self):", "h = square.hermiteNormalForm() self.assertEqual(h.row, square.row) self.assertEqual(h.column, square.column) hermite = createMatrix(3,", "class MatrixTest(unittest.TestCase): def testInit(self): lst_lst = Matrix(3, 2, [[21, -12],", "createMatrix(3, 3, [1, 2, 0, 3, 4, 0, 5, 7,", "3, [1, 0, 0, 0, 1, 0]) U_2, h_2 =", "1]) h = already.hermiteNormalForm() self.assertEqual(h, already) lessrank = createMatrix(2, 3,", "assert b6.isSingular() def testTrace(self): self.assertEqual(15, b4.trace()) def testDeterminant(self): self.assertEqual(-2, b1.determinant())", "0, 0, 3, 0, 0, 0, 1]) U_2, V_2, M_2", "[(1, 2, 3), (4, 5, 6), (7, 8, 9)], Q)", "[3, 15, 12]+[2,7,5]+[1,-4,-2]) ## for FieldMatrix c1 = createMatrix(1, 2,", "self.assertEqual(mul2, a4 * b1) mul3 = createMatrix(3, 2, [1, -1]+[109,", "SquareMatrixTest(unittest.TestCase): def testIsUpperTriangularMatrix(self): UT = createMatrix(4, 4, \\ [1, 2,", "d3.triangulate()) def testDeterminant(self): self.assertEqual(Ra(-7, 15), d7.determinant()) def testInverse(self): cinverse =", "9, -5]) self.assertEqual(commutator, b1.commutator(b2)) def testCharacteristicMatrix(self): charMat = createMatrix(2, 2,", "= MatrixRing.getInstance(3, Int) m2q = MatrixRing.getInstance(2, rational.theRationalField) # issubring self.assertFalse(self.m2z.issubring(Int))", "self.assertEqual(row1, a3.getRow(2)) row2 = vector.Vector([1, 2]) self.assertEqual(row2, b1.getRow(1)) def testGetColumn(self):", "2, 1]+[5, 1, 2, 5, 1, 1]+[90, 7, 54, 8,", "0, 10]) assert scaler.isScalarMatrix() def testIsSymmetricMatrix(self): symmetric = createMatrix(2, 2,", "self.assertEqual(15, b4.trace()) def testDeterminant(self): self.assertEqual(-2, b1.determinant()) #sf.bug #1914349 self.assertTrue(isinstance(b3.determinant(), int))", "createMatrix(3, Q) self.assertTrue(mat7.row == mat7.column) self.assertTrue(mat7 == 0) self.assertEqual(mat7.coeff_ring, Q)", "\\ [Ra(1), 2, 3, 4]+[2, 3, 4, 5]+[3, 4, 5,", "b5.cofactor(1, 2)) def testCommutator(self): commutator = createMatrix(2, 2, [5, -1,", "0, 0, 1, 0]) h = lessrank.hermiteNormalForm() self.assertEqual(h.row, lessrank.row) self.assertEqual(h.column,", "## for RingMatrix a1 = createMatrix(1, 2, [3, 2]) a2", "h_2) def testKernelAsModule(self): ker_1 = a1.kernelAsModule() self.assertEqual(a1 * ker_1[1], vector.Vector([0]))", "[1, 0, 0, 0]+[2, 3, 0, 0]+[4, 5, 6, 0]+[7,", "self.assertEqual(vector.Vector([21, 1, 0]), a4[1]) def testEqual(self): self.assertTrue(a1 == Matrix(1, 2,", "[1, 0, 0, 0, 1, 0]) h = lessrank.hermiteNormalForm() self.assertEqual(h.row,", "UT.isUpperTriangularMatrix() assert not notUT.isUpperTriangularMatrix() def testIsLowerTriangularMatrix(self): LT = createMatrix(4, 4,", "0, -1, 2]) d6 = createMatrix(4, 4, \\ [Ra(1), 2,", "* b8 * V_2) class FieldMatrixTest(unittest.TestCase): def testDiv(self): div =", "(unittest.TestCase): def setUp(self): self.m2z = MatrixRing.getInstance(2, Int) def testZero(self): z", "2, 5, 1, 1]+[90, 7, 54, 8, 4, 6]+\\ [7,", "0]) self.assertEqual(mul2, a4 * b1) mul3 = createMatrix(3, 2, [1,", "nzmath.matrix import * import nzmath.vector as vector import nzmath.rational as", "self.assertEqual(2, a1[1, 2]) self.assertEqual(-2, b2[2, 2]) self.assertRaises(IndexError, a1.__getitem__, \"wrong\") self.assertEqual(vector.Vector([21,", "self.m2z = MatrixRing.getInstance(2, Int) def testZero(self): z = self.m2z.zero self.assertEqual(0,", "self.assertTrue(self.m2z.issuperring(self.m2z)) self.assertFalse(self.m2z.issuperring(m2q)) self.assertFalse(self.m2z.issuperring(m3z)) # getCommonSuperring self.assertRaises(TypeError, self.m2z.getCommonSuperring, Int) class SubspaceTest(unittest.TestCase):", "self.assertTrue(mat3.__class__, FieldSquareMatrix) mat4 = createMatrix(2, [vector.Vector([1, 4]), vector.Vector([6, 8])]) self.assertEqual(mat4.coeff_ring,", "orthogonal = createMatrix(2, 2, [Ra(3, 5), Ra(4, 5), Ra(-4, 5),", "= a1.kernelAsModule() self.assertEqual(a1 * ker_1[1], vector.Vector([0])) #zero test ker_2 =", "0]+[4, 5, 6, 0]+[7, 8, 9, 10]) notLT = createMatrix(4,", "testGetitem(self): self.assertEqual(2, a1[1, 2]) self.assertEqual(-2, b2[2, 2]) self.assertRaises(IndexError, a1.__getitem__, \"wrong\")", "1]) self.assertEqual(pow2, b2 ** 0) def testIsOrthogonalMatrix(self): orthogonal = createMatrix(2,", "lessrank.row) self.assertEqual(h_2.column, lessrank.column) self.assertEqual(lessrank * U_2, h_2) def testKernelAsModule(self): ker_1", "testDiv(self): div = createMatrix(1, 2, [1, Ra(2, 3)]) self.assertEqual(div, c1", "[1, 0, 0]) unit2 = Subspace(3, 2, [0, 0]+[1, 0]+[0,", "5, \\ [Ra(0), 0, 1, 2, -1]+[0, 0, 5, 12,", "createMatrix(3, 3) cinverse.set([Ra(-47, 86), Ra(15, 86), Ra(19, 86)]+\\ [Ra(7, 43),", "-1, 0]+[0, -1, 2, -1]+[0, 0, -1, 2]) d6 =", "createMatrix(3, 2, [-15, -6]+[-2, -2]+[0, 0]) self.assertEqual(mul2, a4 * b1)", "square = createMatrix(3, 3, [1, 0, 0, 0, 1, 1,", "= createMatrix(2, [1, 2, -2, 0]) assert not alternate2.isAntisymmetricMatrix() def", "[vector.Vector([1, 4]), vector.Vector([6, 8])]) self.assertEqual(mat4.coeff_ring, Int) mat5 = createMatrix(5, 6,", "self.assertEqual(mat7.coeff_ring, Q) mat8 = createMatrix(7) self.assertTrue(mat8 == 0) def suite(suffix=\"Test\"):", "U_2, h_2) def testKernelAsModule(self): ker_1 = a1.kernelAsModule() self.assertEqual(a1 * ker_1[1],", "createMatrix(2, 2, [10, 0, 0, 10]) assert scaler.isScalarMatrix() def testIsSymmetricMatrix(self):", "0]) assert alternate1.isAlternatingMatrix() alternate2 = createMatrix(2, [1, 2, -2, 0])", "5 * a1) def testVectorMul(self): mul = vector.Vector([9, 19]) self.assertEqual(mul,", "0, 1]) U_2, V_2, M_2 = b8.extsmithNormalForm() self.assertEqual(smith2, M_2) self.assertEqual(M_2,", "createMatrix(2, 2, [1, 1, 4, 2]) self.assertEqual(sum2, b1 + b2)", "testUnitMatrix(self): \"\"\" unitMatrix() is an alias of one. \"\"\" self.assertEqual(self.m2z.one,", "= vector.Vector([8]) v3 = vector.Vector([0, 0, 1]) class MatrixTest(unittest.TestCase): def", "12, -2]+[0, 0, 1, 3, -1]+[0, 0, 1, 2, 0])", "= rational.Rational Poly = uniutil.polynomial Int = rational.theIntegerRing # sub", "5, 6, 7]+[0, 0, 8, 9]+[0, 0, 0, 1]) notUT", "0) self.assertEqual(mat7.coeff_ring, Q) mat8 = createMatrix(7) self.assertTrue(mat8 == 0) def", "3, 1]) d5 = createMatrix(4, 4, \\ [Ra(2), -1, 0,", "[4, 5, 6, 9]) self.assertEqual(sub2, b5.subMatrix([2, 3], [1, 3])) class", "createMatrix(1, 2, [13, 4]) self.assertEqual(call, a5(2)) def testMap(self): pow_two =", "self.assertTrue(mat3.row == mat3.column) self.assertTrue(mat3.__class__, FieldSquareMatrix) mat4 = createMatrix(2, [vector.Vector([1, 4]),", "Ra(2, 3)]) self.assertEqual(div, c1 / 3) def testKernel(self): ker =", "Ra(-5, 2), Ra(-15, 4), Ra(7, 4)]) self.assertEqual(pow3, d1 ** (-2))", "Ra(6, 43), Ra(-1, 43)]+[Ra(35, 86), Ra(-13, 86), Ra(-5, 86)]) self.assertEqual(cinverse,", "def testTrace(self): self.assertEqual(15, b4.trace()) def testDeterminant(self): self.assertEqual(-2, b1.determinant()) #sf.bug #1914349", "[1, Ra(2, 3)]) self.assertEqual(div, c1 / 3) def testKernel(self): ker", "sub2 = createMatrix(2, 2, [1, 3, 2, 6]) self.assertEqual(sub2, b1", "-19, -14, -12, 2, -35, 13, 5]) self.assertEqual(adjugate, b4.adjugateMatrix()) assert", "assert not notUT.isUpperTriangularMatrix() def testIsLowerTriangularMatrix(self): LT = createMatrix(4, 4, \\", "assert orthogonal.isOrthogonalMatrix() def testIsAlternatingMatrix(self): alternate1 = createMatrix(2, 2, [0, 2,", "2]) self.assertEqual(row2, b1.getRow(1)) def testGetColumn(self): col1 = vector.Vector([-12, -1, 0])", "U_2, h_2 = lessrank.exthermiteNormalForm() self.assertEqual(h_2.row, lessrank.row) self.assertEqual(h_2.column, lessrank.column) self.assertEqual(lessrank *", "testCommutator(self): commutator = createMatrix(2, 2, [5, -1, 9, -5]) self.assertEqual(commutator,", "self.assertEqual(mul3, b3 * a4) def testScalarMul(self): mul = createMatrix(1, 2,", "Matrix(1, 2, [5, -6]) a3 = createMatrix(3, 2, [7, 8]+[3,", "3]+[4, 5, 6]+[5, 7, 9]) d3 = Matrix(3, 3, \\", "sub test try: from test.testMatrixFiniteField import * except: try: from", "0, 0, 1]) self.assertEqual(echelon, c2.columnEchelonForm()) class FieldSquareMatrixTest(unittest.TestCase): def testPow(self): pow3", "a1.kernelAsModule() self.assertEqual(a1 * ker_1[1], vector.Vector([0])) #zero test ker_2 = b1.kernelAsModule()", "3])) class SquareMatrixTest(unittest.TestCase): def testIsUpperTriangularMatrix(self): UT = createMatrix(4, 4, \\", "testHermiteNormalForm(self): already = createMatrix(4, 3, [1, 0, 0, 0, 1,", "M1) def testHessenbergForm(self): pass def testLUDecomposition(self): L, U = d4.LUDecomposition()", "2, [10, 0, 0, 10]) assert scaler.isScalarMatrix() def testIsSymmetricMatrix(self): symmetric", "0, 0, 1]) U_2, V_2, M_2 = b8.extsmithNormalForm() self.assertEqual(smith2, M_2)", "* c3, d3.inverse(c3)) def testInverseNoChange(self): # sf bug#1849220 M1 =", "U = d4.LUDecomposition() assert L * U == d4 assert", "0]) c3 = createMatrix(3, 2, [Ra(1), 2]+[2, 5]+[6, 7]) ##", "suite = unittest.TestSuite() all_names = globals() for name in all_names:", "d4.LUDecomposition() assert L * U == d4 assert L.isLowerTriangularMatrix() assert", "0, 5, 6, 1]) b8 = Matrix(3, 3, [3, 15,", "d6 = createMatrix(4, 4, \\ [Ra(1), 2, 3, 4]+[2, 3,", "test ker_2 = b1.kernelAsModule() self.assertEqual(ker_2, None) class RingSquareMatrixTest(unittest.TestCase): def testPow(self):", "2)]) M1.inverse() M2 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2),", "class FunctionTest(unittest.TestCase): def testCreateMatrix(self): Q = rational.theRationalField mat1 = createMatrix(2,", "5, 12, -2]+[0, 0, 1, 3, -1]+[0, 0, 1, 2,", "4, 6]+[7, 9, 8]) b4 = Matrix(3, 3, [1, 2,", "notUT = createMatrix(4, 4, \\ [1, 2, 3, 4]+[0, 5,", "[1, 1, 4, 2]) self.assertEqual(sum2, b1 + b2) def testSub(self):", "3, 4, 5]+[3, 4, 5, 6]+[4, 5, 6, 7]) d7", "= MatrixRing.getInstance(2, Int) def testZero(self): z = self.m2z.zero self.assertEqual(0, z[1,", "testCharacteristicMatrix(self): charMat = createMatrix(2, 2, \\ [Poly({0:-1,1:1}, Int), Poly({0:-2}, Int)]+[Poly({0:-3},", "2, [9, 4]) self.assertEqual(pow_two, a1.map(lambda n : n ** 2))", "Matrix(3, 3, [3, 15, 12]+[2,7,5]+[1,-4,-2]) ## for FieldMatrix c1 =", "0, 0, 1, 0]+[0, 0, 0, 0, 1]) self.assertEqual(echelon, c2.columnEchelonForm())", "+ b2) def testSub(self): sub1 = createMatrix(1, 2, [-2, 8])", "4), Ra(7, 4)]) self.assertEqual(pow3, d1 ** (-2)) def testTriangulate(self): triangle", "1, 0]), a4[1]) def testEqual(self): self.assertTrue(a1 == Matrix(1, 2, [3,", "* V_2) class FieldMatrixTest(unittest.TestCase): def testDiv(self): div = createMatrix(1, 2,", "= Matrix(3, 3, \\ [Ra(1, 2), Ra(2, 3), Ra(1, 5)]+[Ra(3,", "testGetColumn(self): col1 = vector.Vector([-12, -1, 0]) self.assertEqual(col1, a4.getColumn(2)) col2 =", "createMatrix(4, 4, \\ [1, 2, 3, 4]+[0, 5, 6, 7]+[0,", "-1, 0])]) self.assertEqual(a4, lst_vect) def testGetitem(self): self.assertEqual(2, a1[1, 2]) self.assertEqual(-2,", "symmetric.isSymmetricMatrix() class RingMatrixTest(unittest.TestCase): def testAdd(self): sum1 = createMatrix(1, 2, [8,", "1), Ra(-3, 2)]) M1.inverse() M2 = SquareMatrix(2, 2, [Ra(1, 2),", "#sf.bug #1914349 self.assertTrue(isinstance(b3.determinant(), int)) self.assertEqual(36, b3.determinant()) def testCofactor(self): self.assertEqual(-6, b5.cofactor(1,", "= createMatrix(2, 2, [1, 1, 4, 2]) self.assertEqual(sum2, b1 +", "[1, -1], [0, 0]]) self.assertEqual(a4, lst_lst) lst_tuple = Matrix(3, 2,", "2, [1, Ra(2, 3)]) self.assertEqual(div, c1 / 3) def testKernel(self):", "testIsUpperTriangularMatrix(self): UT = createMatrix(4, 4, \\ [1, 2, 3, 4]+[0,", "def testInverseNoChange(self): # sf bug#1849220 M1 = SquareMatrix(2, 2, [Ra(1,", "def testCharacteristicMatrix(self): charMat = createMatrix(2, 2, \\ [Poly({0:-1,1:1}, Int), Poly({0:-2},", "FieldMatrixTest(unittest.TestCase): def testDiv(self): div = createMatrix(1, 2, [1, Ra(2, 3)])", "self.assertEqual(a4, lst_vect) def testGetitem(self): self.assertEqual(2, a1[1, 2]) self.assertEqual(-2, b2[2, 2])", "6, 0]+[7, 8, 9, 10]) assert LT.isLowerTriangularMatrix() assert not notLT.isLowerTriangularMatrix()", "assert LT.isLowerTriangularMatrix() assert not notLT.isLowerTriangularMatrix() def testIsDiagonalMatrix(self): diag = createMatrix(2,", "6, 0]+[7, 8, 9, 10]) notLT = createMatrix(4, 4, \\", "2, [5, -6]) a3 = createMatrix(3, 2, [7, 8]+[3, -2]+[0,", "c3, d3.inverse(c3)) def testInverseNoChange(self): # sf bug#1849220 M1 = SquareMatrix(2,", "import unittest from nzmath.matrix import * import nzmath.vector as vector", "lessrank.exthermiteNormalForm() self.assertEqual(h_2.row, lessrank.row) self.assertEqual(h_2.column, lessrank.column) self.assertEqual(lessrank * U_2, h_2) def", "already = createMatrix(4, 3, [1, 0, 0, 0, 1, 0,", "2, -1]+[0, 0, 5, 12, -2]+[0, 0, 1, 3, -1]+[0,", "col2 = vector.Vector([1, 3]) self.assertEqual(col2, b1.getColumn(1)) def testTranspose(self): trans =", "7]) d7 = Matrix(3, 3, \\ [Ra(1, 2), Ra(2, 3),", "charMat = createMatrix(2, 2, \\ [Poly({0:-1,1:1}, Int), Poly({0:-2}, Int)]+[Poly({0:-3}, Int),", "Ra(19, 86)]+\\ [Ra(7, 43), Ra(6, 43), Ra(-1, 43)]+[Ra(35, 86), Ra(-13,", "self.assertRaises(NoInverse, d2.inverse) self.assertEqual(d3.inverse() * c3, d3.inverse(c3)) def testInverseNoChange(self): # sf", "15, 12]+[2,7,5]+[1,-4,-2]) ## for FieldMatrix c1 = createMatrix(1, 2, [Ra(3),", "testKernelAsModule(self): ker_1 = a1.kernelAsModule() self.assertEqual(a1 * ker_1[1], vector.Vector([0])) #zero test", "import nzmath.poly.uniutil as uniutil Ra = rational.Rational Poly = uniutil.polynomial", "0]+[4, 5, 6, 0]+[7, 8, 9, 10]) assert LT.isLowerTriangularMatrix() assert", "[Ra(1), Ra(2), Ra(3)]+[Ra(0), Ra(5), Ra(-2)]+[7, 1, 9]) d4 = createMatrix(6,", "= createMatrix(1, 4) self.assertTrue(mat6 == 0) mat7 = createMatrix(3, Q)", "= createMatrix(4, 4, \\ [1, 2, 3, 4]+[0, 5, 6,", "9]) d4 = createMatrix(6, 6, \\ [Ra(4), 2, 5, 0,", "def testIsScalarMatrix(self): scaler = createMatrix(2, 2, [10, 0, 0, 10])", "testInverse(self): cinverse = createMatrix(3, 3) cinverse.set([Ra(-47, 86), Ra(15, 86), Ra(19,", "ba = Subspace(3, 2, [1, 2, 3, 4, 5, 7])", "def testImage(self): img = createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0]) self.assertEqual(img, c2.image()) def testRank(self): self.assertEqual(3,", "9, 10]) notLT = createMatrix(4, 4, \\ [1, 0, 0,", "assert d1.characteristicPolynomial() == d1.characteristicMatrix().determinant() def testAdjugateMatrix(self): adjugate = createMatrix(3, 3,", "testZero(self): z = self.m2z.zero self.assertEqual(0, z[1, 1]) self.assertEqual(0, z[1, 2])", "cinverse = createMatrix(3, 3) cinverse.set([Ra(-47, 86), Ra(15, 86), Ra(19, 86)]+\\", "[-1, 0]+[0, -1]+[0, 0]) self.assertEqual(intersect, unit1.intersectionOfSubspaces(unit2)) class FunctionTest(unittest.TestCase): def testCreateMatrix(self):", "4, \\ [Ra(1), 2, 3, 4]+[2, 3, 4, 5]+[3, 4,", "ker) def testImage(self): img = createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0]) self.assertEqual(img, c2.image()) def testRank(self):", "testIsLowerTriangularMatrix(self): LT = createMatrix(4, 4, \\ [1, 0, 0, 0]+[2,", "0, 1, 0, 0, 1]) U_1, h_1 = already.exthermiteNormalForm() self.assertEqual(h_1,", "13, 5]) self.assertEqual(adjugate, b4.adjugateMatrix()) assert d1 * d1.adjugateMatrix() == d1.determinant()", "5)]) assert orthogonal.isOrthogonalMatrix() def testIsAlternatingMatrix(self): alternate1 = createMatrix(2, 2, [0,", "for RingSquareMatrix b1 = createMatrix(2, 2, [1, 2]+[3, 4]) b2", "1, 2]+[5, 4, 6]+[7, 9, 8]) b4 = Matrix(3, 3,", "#1914349 self.assertTrue(isinstance(b3.determinant(), int)) self.assertEqual(36, b3.determinant()) def testCofactor(self): self.assertEqual(-6, b5.cofactor(1, 2))", "0, 0, 1]) U_1, V_1, M_1 = b5.extsmithNormalForm() self.assertEqual(smith1, M_1)", "3, [4, 6, 5, 6, 8, 9]) self.assertEqual(block, b5.getBlock(2, 1,", "-7]) self.assertEqual(mul1, a1 * b2) mul2 = createMatrix(3, 2, [-15,", ": n ** 2)) def testReduce(self): self.assertEqual(-2, a3.reduce(min)) def testGetRow(self):", "5, 6, 7]+[0, 0, 8, 9]+[0, 0, 1, 1]) assert", "5)]+[Ra(3, 2), Ra(1, 3), Ra(2, 5)]+[Ra(-1, 2), Ra(4, 3), Ra(3,", "86), Ra(-13, 86), Ra(-5, 86)]) self.assertEqual(cinverse, d3.inverse()) self.assertRaises(NoInverse, d2.inverse) self.assertEqual(d3.inverse()", "0, 0, 1, 0]) U_2, h_2 = lessrank.exthermiteNormalForm() self.assertEqual(h_2.row, lessrank.row)", "vector.Vector([6, 8])]) self.assertEqual(mat4.coeff_ring, Int) mat5 = createMatrix(5, 6, Int) self.assertTrue(mat5", "2]) self.assertRaises(IndexError, a1.__getitem__, \"wrong\") self.assertEqual(vector.Vector([21, 1, 0]), a4[1]) def testEqual(self):", "0]+[8, -2, 10]) self.assertEqual(trans, a3.transpose()) def testGetBlock(self): block = Matrix(2,", "d1 = createMatrix(2, 2, [Ra(1), Ra(2)]+[Ra(3), Ra(4)]) d2 = createMatrix(3,", "[Ra(0), 0, 1, 2, -1]+[0, 0, 5, 12, -2]+[0, 0,", "self.assertEqual(row2, b1.getRow(1)) def testGetColumn(self): col1 = vector.Vector([-12, -1, 0]) self.assertEqual(col1,", "1]) self.assertEqual(hermite, h) def testExtHermiteNormalForm(self): already = createMatrix(4, 3, [1,", "self.assertEqual(block, b5.getBlock(2, 1, 2, 3)) def testSubMatrix(self): sub1 = createMatrix(2,", "* d5.solve(d6[i])[0]) sol1 = c1.solve(v2) for i in range(len(sol1[1])): self.assertEqual(v2,", "class FieldMatrixTest(unittest.TestCase): def testDiv(self): div = createMatrix(1, 2, [1, Ra(2,", "-2]+[0, 10]) a4 = Matrix(3, 2, [21, -12]+[1, -1]+[0, 0])", "# issuperring self.assertFalse(self.m2z.issuperring(Int)) self.assertTrue(self.m2z.issuperring(self.m2z)) self.assertFalse(self.m2z.issuperring(m2q)) self.assertFalse(self.m2z.issuperring(m3z)) # getCommonSuperring self.assertRaises(TypeError, self.m2z.getCommonSuperring,", "Q) mat3 = createMatrix(3, [(1, 2, 3), (4, 5, 6),", "unittest from nzmath.matrix import * import nzmath.vector as vector import", "issubring self.assertFalse(self.m2z.issubring(Int)) self.assertTrue(self.m2z.issubring(self.m2z)) self.assertTrue(self.m2z.issubring(m2q)) self.assertFalse(self.m2z.issubring(m3z)) # issuperring self.assertFalse(self.m2z.issuperring(Int)) self.assertTrue(self.m2z.issuperring(self.m2z)) self.assertFalse(self.m2z.issuperring(m2q))", "self.assertEqual(h.column, square.column) hermite = createMatrix(3, 3, [0, 1, 0, 0", "createMatrix(3, 3, [47, -15, -19, -14, -12, 2, -35, 13,", "L * U == d4 assert L.isLowerTriangularMatrix() assert U.isUpperTriangularMatrix() class", "def testGetBlock(self): block = Matrix(2, 3, [4, 6, 5, 6,", "self.assertEqual(trans, a3.transpose()) def testGetBlock(self): block = Matrix(2, 3, [4, 6,", "Ra(4)]) d2 = createMatrix(3, 3, [Ra(1), 2, 3]+[4, 5, 6]+[5,", "== d1.characteristicMatrix().determinant() def testAdjugateMatrix(self): adjugate = createMatrix(3, 3, [47, -15,", "def testDeterminant(self): self.assertEqual(-2, b1.determinant()) #sf.bug #1914349 self.assertTrue(isinstance(b3.determinant(), int)) self.assertEqual(36, b3.determinant())", "createMatrix(4, 5,\\ [Ra(0), 0, 1, 0, 0]+[0, 0, 0, 2,", "0, 1]) notUT = createMatrix(4, 4, \\ [1, 2, 3,", "% 3) def testNeg(self): neg = createMatrix(2, 2, [0, 1,", "ker = c2.kernel() self.assertTrue(not c2 * ker) def testImage(self): img", "Poly({0:-2}, Int)]+[Poly({0:-3}, Int), Poly({0:-4,1:1}, Int)]) self.assertEqual(charMat, b1.characteristicMatrix()) def testCharacteristicPolynomial(self): assert", "self.assertEqual(1, o[1, 1]) self.assertEqual(0, o[1, 2]) self.assertEqual(0, o[2, 1]) self.assertEqual(1,", "test.testMatrixFiniteField import * except: try: from nzmath.test.testMatrixFiniteField import * except:", "from test.testMatrixFiniteField import * except: try: from nzmath.test.testMatrixFiniteField import *", "6, 1]) b8 = Matrix(3, 3, [3, 15, 12]+[2,7,5]+[1,-4,-2]) ##", "= createMatrix(3, 3, \\ [Ra(1, 1), 2, 3]+[0, 5, -2]+[0,", "-12]+[1, -1]+[0, 0]) a5 = createMatrix(1, 2, [Poly({0:3, 1:5}, Int),", "c1 * (sol1[0]+sol1[1][i])) self.assertRaises(NoInverseImage, c3.solve, v3) def testColumnEchelonForm(self): echelon =", "= createMatrix(3, [(1, 2, 3), (4, 5, 6), (7, 8,", "= createMatrix(3, 3, [47, -15, -19, -14, -12, 2, -35,", "0, 0, 1]) self.assertEqual(pow2, b2 ** 0) def testIsOrthogonalMatrix(self): orthogonal", "1, 0]+[4, 5, 6, 0]+[7, 8, 9, 10]) assert LT.isLowerTriangularMatrix()", "[5,6,7]], Q) self.assertEqual(mat2.coeff_ring, Q) mat3 = createMatrix(3, [(1, 2, 3),", "unitMatrix(d1.row) def testCofactorMatrix(self): cofact = d5.cofactorMatrix() self.assertEqual(d5.cofactor(2, 3), cofact[2, 3])", "2, [vector.Vector([21, 1, 0]), vector.Vector([-12, -1, 0])]) self.assertEqual(a4, lst_vect) def", "mat1 = createMatrix(2, 3, [[2,3,4], [5,6,7]]) self.assertEqual(mat1.coeff_ring, Int) mat2 =", "int)) self.assertEqual(36, b3.determinant()) def testCofactor(self): self.assertEqual(-6, b5.cofactor(1, 2)) def testCommutator(self):", "2, 3]+[0, 5, -2]+[7, 1, 9]) b5 = createMatrix(3, 3,", "3, [1, 0, 0, 0, 1, 0, 0, 0, 1,", "b6.smithNormalForm) self.assertEqual([1, 1, 1], b7.smithNormalForm()) self.assertEqual([9, 3, 1], b8.smithNormalForm()) def", "self.assertTrue(isinstance(a1 == a1, bool)) def testNonZero(self): self.assertTrue(not zeroMatrix(2, 3)) def", "0, 0, 0, 1, 0]) h = lessrank.hermiteNormalForm() self.assertEqual(h.row, lessrank.row)", "10]) self.assertEqual(mul, 5 * a1) def testVectorMul(self): mul = vector.Vector([9,", "[-2, 8]) self.assertEqual(sub1, a1 - a2) sub2 = createMatrix(2, 2,", "testRank(self): self.assertEqual(3, c2.rank()) self.assertEqual(3, d3.rank()) def testInverseImage(self): self.assertEqual(d6, d5 *", "d1 ** (-2)) def testTriangulate(self): triangle = createMatrix(3, 3, \\", "0, 1, 0]+[0, 0, 0, 0, 1]) self.assertEqual(echelon, c2.columnEchelonForm()) class", "Ra(2)]+[Ra(3), Ra(4)]) d2 = createMatrix(3, 3, [Ra(1), 2, 3]+[4, 5,", "1]) self.assertEqual(0, z[2, 2]) def testOne(self): o = self.m2z.one self.assertEqual(1,", "[2, 3, 3, 5]) assert symmetric.isSymmetricMatrix() class RingMatrixTest(unittest.TestCase): def testAdd(self):", "Matrix(3, 2, [(21, 1, 0), (-12, -1, 0)]) self.assertEqual(a4, lst_tuple)", "= createMatrix(5, 6, Int) self.assertTrue(mat5 == 0) mat6 = createMatrix(1,", "-1]+[0, 0]) a5 = createMatrix(1, 2, [Poly({0:3, 1:5}, Int), Poly({1:2},", "= createMatrix(2, 2, [1, 3, 2, 6]) self.assertEqual(sub2, b1 -", "RingMatrixTest(unittest.TestCase): def testAdd(self): sum1 = createMatrix(1, 2, [8, -4]) self.assertEqual(sum1,", "testSmithNormalForm(self): self.assertEqual([12, 1, 1], b5.smithNormalForm()) self.assertRaises(ValueError, b6.smithNormalForm) self.assertEqual([1, 1, 1],", "1, 9]) b5 = createMatrix(3, 3, [1, 3, 2, 4,", "b1.getColumn(1)) def testTranspose(self): trans = createMatrix(2, 3, [7, 3, 0]+[8,", "testPow(self): pow1 = createMatrix(2, 2, [7, 10, 15, 22]) self.assertEqual(pow1,", "mul = createMatrix(1, 2, [15, 10]) self.assertEqual(mul, 5 * a1)", "1]) d5 = createMatrix(4, 4, \\ [Ra(2), -1, 0, 0]+[-1,", "createMatrix(3, 3, [1, 0, 0, 0, 1, 1, 0, 1,", "alternate1 = createMatrix(2, 2, [0, 2, -2, 0]) assert alternate1.isAlternatingMatrix()", "b1 - b2) def testMul(self): mul1 = createMatrix(1, 2, [2,", "1, 2, 5, 1, 1]+[90, 7, 54, 8, 4, 6]+\\", "self.assertTrue(a1 == Matrix(1, 2, [3, 2])) self.assertTrue(isinstance(a1 == a1, bool))", "= Subspace(3, 2, [-1, 0]+[0, -1]+[0, 0]) self.assertEqual(intersect, unit1.intersectionOfSubspaces(unit2)) class", "0]) U_2, h_2 = lessrank.exthermiteNormalForm() self.assertEqual(h_2.row, lessrank.row) self.assertEqual(h_2.column, lessrank.column) self.assertEqual(lessrank", "0]) a5 = createMatrix(1, 2, [Poly({0:3, 1:5}, Int), Poly({1:2}, Int)])", "1, 1], b7.smithNormalForm()) self.assertEqual([9, 3, 1], b8.smithNormalForm()) def testExtSmithNormalForm(self): smith1", "a1.__getitem__, \"wrong\") self.assertEqual(vector.Vector([21, 1, 0]), a4[1]) def testEqual(self): self.assertTrue(a1 ==", "5, 0, 8, 2, 5]+[8, 2, 6, 5, -4, 2]+[4,", "testSubMatrix(self): sub1 = createMatrix(2, 1, [-12, 0]) self.assertEqual(sub1, a4.subMatrix(2, 1))", "9, 1, 0, 5, 6, 1]) b8 = Matrix(3, 3,", "8, 9]+[0, 0, 1, 1]) assert UT.isUpperTriangularMatrix() assert not notUT.isUpperTriangularMatrix()", "[47, -15, -19, -14, -12, 2, -35, 13, 5]) self.assertEqual(adjugate,", "1, 0), (-12, -1, 0)]) self.assertEqual(a4, lst_tuple) lst_vect = Matrix(3,", "U_1, h_1 = already.exthermiteNormalForm() self.assertEqual(h_1, already) self.assertEqual(already * U_1, h_1)", "self.assertTrue(mat5 == 0) mat6 = createMatrix(1, 4) self.assertTrue(mat6 == 0)", "[-15, -6]+[-2, -2]+[0, 0]) self.assertEqual(mul2, a4 * b1) mul3 =", "4, \\ [Ra(2), -1, 0, 0]+[-1, 2, -1, 0]+[0, -1,", "2, [15, 10]) self.assertEqual(mul, 5 * a1) def testVectorMul(self): mul", "= vector.Vector([0, 0, 1]) class MatrixTest(unittest.TestCase): def testInit(self): lst_lst =", "* (sol1[0]+sol1[1][i])) self.assertRaises(NoInverseImage, c3.solve, v3) def testColumnEchelonForm(self): echelon = createMatrix(4,", "mat6 = createMatrix(1, 4) self.assertTrue(mat6 == 0) mat7 = createMatrix(3,", "* a4) def testScalarMul(self): mul = createMatrix(1, 2, [15, 10])", "= already.hermiteNormalForm() self.assertEqual(h, already) lessrank = createMatrix(2, 3, [1, 0,", "def testIsOrthogonalMatrix(self): orthogonal = createMatrix(2, 2, [Ra(3, 5), Ra(4, 5),", "testLUDecomposition(self): L, U = d4.LUDecomposition() assert L * U ==", "uniutil.polynomial Int = rational.theIntegerRing # sub test try: from test.testMatrixFiniteField", "- b2) def testMul(self): mul1 = createMatrix(1, 2, [2, -7])", "for FieldMatrix c1 = createMatrix(1, 2, [Ra(3), Ra(2)]) c2 =", "rational import nzmath.poly.uniutil as uniutil Ra = rational.Rational Poly =", "* v1) def testMod(self): mod1 = createMatrix(3, 2, [1, 2]+[0,", "-14, -12, 2, -35, 13, 5]) self.assertEqual(adjugate, b4.adjugateMatrix()) assert d1", "= createMatrix(2, 2, [-3, 0, 0, 5]) assert diag.isDiagonalMatrix() def", "M1 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2), Ra(1, 1),", "o = self.m2z.one self.assertEqual(1, o[1, 1]) self.assertEqual(0, o[1, 2]) self.assertEqual(0,", "RingSquareMatrixTest(unittest.TestCase): def testPow(self): pow1 = createMatrix(2, 2, [7, 10, 15,", "= createMatrix(2, 2, [5, -1, 9, -5]) self.assertEqual(commutator, b1.commutator(b2)) def", "0]) h = lessrank.hermiteNormalForm() self.assertEqual(h.row, lessrank.row) self.assertEqual(h.column, lessrank.column) zerovec =", "2), Ra(1, 1), Ra(-3, 2)]) self.assertEqual(M2, M1) def testHessenbergForm(self): pass", "Ra(1, 1), Ra(-3, 2)]) self.assertEqual(M2, M1) def testHessenbergForm(self): pass def", "hermite = createMatrix(3, 3, [0, 1, 0, 0 ,0, 1,", "= Matrix(3, 3, \\ [Ra(1), Ra(2), Ra(3)]+[Ra(0), Ra(5), Ra(-2)]+[7, 1,", "[0, 0]+[1, 0]+[0, 1]) self.assertEqual(unitMatrix(3), unit1.sumOfSubspaces(unit2)) def testIntersectionOfSubspace(self): unit1 =", "def testKernelAsModule(self): ker_1 = a1.kernelAsModule() self.assertEqual(a1 * ker_1[1], vector.Vector([0])) #zero", "2, [1, 0, 0, 1]) self.assertEqual(pow2, b2 ** 0) def", "\\ [Poly({0:-1,1:1}, Int), Poly({0:-2}, Int)]+[Poly({0:-3}, Int), Poly({0:-4,1:1}, Int)]) self.assertEqual(charMat, b1.characteristicMatrix())", "## other objects v1 = vector.Vector([1, 4]) v2 = vector.Vector([8])", "0, 0]) unit2 = Subspace(3, 2, [0, 0]+[1, 0]+[0, 1])", "neg = createMatrix(2, 2, [0, 1, -1, 2]) self.assertEqual(neg, -b2)", "1, [-12, 0]) self.assertEqual(sub1, a4.subMatrix(2, 1)) sub2 = createMatrix(2, 2,", "2, -1]+[0, 0, -1, 2]) d6 = createMatrix(4, 4, \\", "self.assertEqual(neg, -b2) def testHermiteNormalForm(self): already = createMatrix(4, 3, [1, 0,", "self.assertRaises(NoInverseImage, d2.inverseImage, unitMatrix(3)) def testSolve(self): for i in range(1, d6.column+1):", "(4, 5, 6), (7, 8, 9)], Q) self.assertTrue(mat3.row == mat3.column)", "a3.transpose()) def testGetBlock(self): block = Matrix(2, 3, [4, 6, 5,", "self.assertEqual(1, o[2, 2]) def testUnitMatrix(self): \"\"\" unitMatrix() is an alias", "8, 9]+[0, 0, 0, 1]) notUT = createMatrix(4, 4, \\", "= createMatrix(3, 3, [1, 2, 4, 0, 3, 5, 0,", "a2) sum2 = createMatrix(2, 2, [1, 1, 4, 2]) self.assertEqual(sum2,", "b1 * v1) def testMod(self): mod1 = createMatrix(3, 2, [1,", "-35, 13, 5]) self.assertEqual(adjugate, b4.adjugateMatrix()) assert d1 * d1.adjugateMatrix() ==", "= Matrix(3, 3, [12, 0, 0, 0, 1, 0, 0,", "def testCreateMatrix(self): Q = rational.theRationalField mat1 = createMatrix(2, 3, [[2,3,4],", "createMatrix(2, 2, [4, 5, 6, 9]) self.assertEqual(sub2, b5.subMatrix([2, 3], [1,", "Poly({0:-4,1:1}, Int)]) self.assertEqual(charMat, b1.characteristicMatrix()) def testCharacteristicPolynomial(self): assert d1.characteristicPolynomial() == d1.characteristicMatrix().determinant()", "sum2 = createMatrix(2, 2, [1, 1, 4, 2]) self.assertEqual(sum2, b1", "5]) self.assertEqual(adjugate, b4.adjugateMatrix()) assert d1 * d1.adjugateMatrix() == d1.determinant() *", "5, 6, 8, 9]) b6 = createMatrix(3, 3, [1, 2,", "6]) self.assertEqual(sub2, b1 - b2) def testMul(self): mul1 = createMatrix(1,", "testVectorMul(self): mul = vector.Vector([9, 19]) self.assertEqual(mul, b1 * v1) def", "M_2 = b8.extsmithNormalForm() self.assertEqual(smith2, M_2) self.assertEqual(M_2, U_2 * b8 *", "1, 2, -1]+[0, 0, 5, 12, -2]+[0, 0, 1, 3,", "1, 2, 0]) c3 = createMatrix(3, 2, [Ra(1), 2]+[2, 5]+[6,", "ker_1[1], vector.Vector([0])) #zero test ker_2 = b1.kernelAsModule() self.assertEqual(ker_2, None) class", "1, 0]+[0, 0, 0, 0, 1]) self.assertEqual(echelon, c2.columnEchelonForm()) class FieldSquareMatrixTest(unittest.TestCase):", "/ 3) def testKernel(self): ker = c2.kernel() self.assertTrue(not c2 *", "3, 4, 5, 7]) supbase = createMatrix(3, 3, [1, 2,", "b8 * V_2) class FieldMatrixTest(unittest.TestCase): def testDiv(self): div = createMatrix(1,", "= createMatrix(2, 3, [[2,3,4], [5,6,7]]) self.assertEqual(mat1.coeff_ring, Int) mat2 = createMatrix(2,", "[Ra(11, 2), Ra(-5, 2), Ra(-15, 4), Ra(7, 4)]) self.assertEqual(pow3, d1", "FieldMatrix c1 = createMatrix(1, 2, [Ra(3), Ra(2)]) c2 = createMatrix(4,", "3)]) self.assertEqual(div, c1 / 3) def testKernel(self): ker = c2.kernel()", "= createMatrix(4, 4, \\ [Ra(1), 2, 3, 4]+[2, 3, 4,", "self.assertRaises(NoInverseImage, c3.solve, v3) def testColumnEchelonForm(self): echelon = createMatrix(4, 5,\\ [Ra(0),", "3, [1, 0, 0, 0, 1, 0]) h = lessrank.hermiteNormalForm()", "= createMatrix(1, 2, [15, 10]) self.assertEqual(mul, 5 * a1) def", "self.assertEqual(triangle, d3.triangulate()) def testDeterminant(self): self.assertEqual(Ra(-7, 15), d7.determinant()) def testInverse(self): cinverse", "self.assertTrue(not zeroMatrix(2, 3)) def testContains(self): self.assertTrue(5 in a2) def testCall(self):", "testIsScalarMatrix(self): scaler = createMatrix(2, 2, [10, 0, 0, 10]) assert", "testIntersectionOfSubspace(self): unit1 = Subspace(3, 2, [1, 0]+[0, 1]+[0, 0]) unit2", "c1 = createMatrix(1, 2, [Ra(3), Ra(2)]) c2 = createMatrix(4, 5,", "lst_lst) lst_tuple = Matrix(3, 2, [(21, 1, 0), (-12, -1,", "[Ra(1, 1), 2, 3]+[0, 5, -2]+[0, 0, Ra(-86, 5)]) self.assertEqual(triangle,", "lessrank.column) self.assertEqual(lessrank * U_2, h_2) def testKernelAsModule(self): ker_1 = a1.kernelAsModule()", "3, 3, 5]) assert symmetric.isSymmetricMatrix() class RingMatrixTest(unittest.TestCase): def testAdd(self): sum1", "self.assertEqual(smith2, M_2) self.assertEqual(M_2, U_2 * b8 * V_2) class FieldMatrixTest(unittest.TestCase):", "0, 0, 9, 1, 0, 5, 6, 1]) b8 =", "def testIsLowerTriangularMatrix(self): LT = createMatrix(4, 4, \\ [1, 0, 0,", "already.exthermiteNormalForm() self.assertEqual(h_1, already) self.assertEqual(already * U_1, h_1) lessrank = createMatrix(2,", "[21, -12]+[1, -1]+[0, 0]) a5 = createMatrix(1, 2, [Poly({0:3, 1:5},", "0, 0, 0, 1, 1, 0, 1, 1]) h =", "= self.m2z.one self.assertEqual(1, o[1, 1]) self.assertEqual(0, o[1, 2]) self.assertEqual(0, o[2,", "9]) b6 = createMatrix(3, 3, [1, 2, 4, 0, 3,", "4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0, 0, 1])", "6, 8, 9]) self.assertEqual(block, b5.getBlock(2, 1, 2, 3)) def testSubMatrix(self):", "0]+[0, 1]) self.assertEqual(unitMatrix(3), unit1.sumOfSubspaces(unit2)) def testIntersectionOfSubspace(self): unit1 = Subspace(3, 2,", "3, [1, 0, 0, 9, 1, 0, 5, 6, 1])", "def testEqual(self): self.assertTrue(a1 == Matrix(1, 2, [3, 2])) self.assertTrue(isinstance(a1 ==", "== 0) mat6 = createMatrix(1, 4) self.assertTrue(mat6 == 0) mat7", "* ker) def testImage(self): img = createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0]) self.assertEqual(img, c2.image()) def", "testMod(self): mod1 = createMatrix(3, 2, [1, 2]+[0, 1]+[0, 1]) self.assertEqual(mod1,", "as vector import nzmath.rational as rational import nzmath.poly.uniutil as uniutil", "2, [(21, 1, 0), (-12, -1, 0)]) self.assertEqual(a4, lst_tuple) lst_vect", "assert UT.isUpperTriangularMatrix() assert not notUT.isUpperTriangularMatrix() def testIsLowerTriangularMatrix(self): LT = createMatrix(4,", "Int = rational.theIntegerRing # sub test try: from test.testMatrixFiniteField import", "-1]+[0, 0]) self.assertEqual(intersect, unit1.intersectionOfSubspaces(unit2)) class FunctionTest(unittest.TestCase): def testCreateMatrix(self): Q =", "d7.determinant()) def testInverse(self): cinverse = createMatrix(3, 3) cinverse.set([Ra(-47, 86), Ra(15,", "MatrixTest(unittest.TestCase): def testInit(self): lst_lst = Matrix(3, 2, [[21, -12], [1,", "0]+[0, 0, 0, 2, 3]+[0, 0, 0, 1, 0]+[0, 0,", "block = Matrix(2, 3, [4, 6, 5, 6, 8, 9])", "SubspaceTest(unittest.TestCase): def testSupplementBasis(self): ba = Subspace(3, 2, [1, 2, 3,", "b8 = Matrix(3, 3, [3, 15, 12]+[2,7,5]+[1,-4,-2]) ## for FieldMatrix", "0]+[2, 3, 0, 0]+[4, 5, 6, 0]+[7, 8, 9, 10])", "Ra(-3, 2)]) self.assertEqual(M2, M1) def testHessenbergForm(self): pass def testLUDecomposition(self): L,", "3, [1, 2, 4, 0, 3, 5, 0, 0, 0])", "a3.getRow(2)) row2 = vector.Vector([1, 2]) self.assertEqual(row2, b1.getRow(1)) def testGetColumn(self): col1", "0, 3, 0, 0, 0, 1]) U_2, V_2, M_2 =", "2, [21, -12]+[1, -1]+[0, 0]) a5 = createMatrix(1, 2, [Poly({0:3,", "0, 3, 5, 0, 0, 0]) b7 = createMatrix(3, 3,", "* b1) mul3 = createMatrix(3, 2, [1, -1]+[109, -64]+[156, -93])", "0, 0, 0, 1, 0]) U_2, h_2 = lessrank.exthermiteNormalForm() self.assertEqual(h_2.row,", "self.assertFalse(self.m2z.issuperring(Int)) self.assertTrue(self.m2z.issuperring(self.m2z)) self.assertFalse(self.m2z.issuperring(m2q)) self.assertFalse(self.m2z.issuperring(m3z)) # getCommonSuperring self.assertRaises(TypeError, self.m2z.getCommonSuperring, Int) class", "scaler = createMatrix(2, 2, [10, 0, 0, 10]) assert scaler.isScalarMatrix()", "supbase = createMatrix(3, 3, [1, 2, 0, 3, 4, 0,", "## for FieldSquareMatrix d1 = createMatrix(2, 2, [Ra(1), Ra(2)]+[Ra(3), Ra(4)])", "6, 7]+[0, 0, 8, 9]+[0, 0, 0, 1]) notUT =", "5]+[6, 7]) ## for FieldSquareMatrix d1 = createMatrix(2, 2, [Ra(1),", "5, 6, 7]) d7 = Matrix(3, 3, \\ [Ra(1, 2),", "def testInverseImage(self): self.assertEqual(d6, d5 * d5.inverseImage(d6)) self.assertRaises(NoInverseImage, d2.inverseImage, unitMatrix(3)) def", "3, [7, 3, 0]+[8, -2, 10]) self.assertEqual(trans, a3.transpose()) def testGetBlock(self):", "3]) def testSmithNormalForm(self): self.assertEqual([12, 1, 1], b5.smithNormalForm()) self.assertRaises(ValueError, b6.smithNormalForm) self.assertEqual([1,", "MatrixRing.getInstance(2, rational.theRationalField) # issubring self.assertFalse(self.m2z.issubring(Int)) self.assertTrue(self.m2z.issubring(self.m2z)) self.assertTrue(self.m2z.issubring(m2q)) self.assertFalse(self.m2z.issubring(m3z)) # issuperring", "2, 3, 4, 5, 7]) supbase = createMatrix(3, 3, [1,", "self.assertEqual(echelon, c2.columnEchelonForm()) class FieldSquareMatrixTest(unittest.TestCase): def testPow(self): pow3 = createMatrix(2, 2,", "assert d1 * d1.adjugateMatrix() == d1.determinant() * unitMatrix(d1.row) def testCofactorMatrix(self):", "0]+[0, -1]+[0, 0]) self.assertEqual(intersect, unit1.intersectionOfSubspaces(unit2)) class FunctionTest(unittest.TestCase): def testCreateMatrix(self): Q", "0, 0]+[2, 3, 0, 0]+[4, 5, 6, 0]+[7, 8, 9,", "b4 = Matrix(3, 3, [1, 2, 3]+[0, 5, -2]+[7, 1,", "b1.kernelAsModule() self.assertEqual(ker_2, None) class RingSquareMatrixTest(unittest.TestCase): def testPow(self): pow1 = createMatrix(2,", "[9, 4]) self.assertEqual(pow_two, a1.map(lambda n : n ** 2)) def", "0, 1, 0, 0]+[0, 0, 0, 2, 3]+[0, 0, 0,", "bool)) def testNonZero(self): self.assertTrue(not zeroMatrix(2, 3)) def testContains(self): self.assertTrue(5 in", "vector.Vector([0])) #zero test ker_2 = b1.kernelAsModule() self.assertEqual(ker_2, None) class RingSquareMatrixTest(unittest.TestCase):", "testIsAlternatingMatrix(self): alternate1 = createMatrix(2, 2, [0, 2, -2, 0]) assert", "2, 5, 0, 2, 1]+[5, 1, 2, 5, 1, 1]+[90,", "def testSmithNormalForm(self): self.assertEqual([12, 1, 1], b5.smithNormalForm()) self.assertRaises(ValueError, b6.smithNormalForm) self.assertEqual([1, 1,", "6, \\ [Ra(4), 2, 5, 0, 2, 1]+[5, 1, 2,", "* d1.adjugateMatrix() == d1.determinant() * unitMatrix(d1.row) def testCofactorMatrix(self): cofact =", "0, 0, 0, 1]) U_1, V_1, M_1 = b5.extsmithNormalForm() self.assertEqual(smith1,", "testExtSmithNormalForm(self): smith1 = Matrix(3, 3, [12, 0, 0, 0, 1,", "0, 1]) h = already.hermiteNormalForm() self.assertEqual(h, already) lessrank = createMatrix(2,", "self.assertEqual(a1 * ker_1[1], vector.Vector([0])) #zero test ker_2 = b1.kernelAsModule() self.assertEqual(ker_2,", "1, 2, 3)) def testSubMatrix(self): sub1 = createMatrix(2, 1, [-12,", "d5.cofactorMatrix() self.assertEqual(d5.cofactor(2, 3), cofact[2, 3]) def testSmithNormalForm(self): self.assertEqual([12, 1, 1],", "1]) self.assertEqual(supbase, ba.supplementBasis()) def testSumOfSubspaces(self): unit1 = Subspace(3, 1, [1,", "ba.supplementBasis()) def testSumOfSubspaces(self): unit1 = Subspace(3, 1, [1, 0, 0])", "createMatrix(2, 2, [2, 3, 3, 5]) assert symmetric.isSymmetricMatrix() class RingMatrixTest(unittest.TestCase):", "as uniutil Ra = rational.Rational Poly = uniutil.polynomial Int =", "2, [1, 3, 2, 6]) self.assertEqual(sub2, b1 - b2) def", "= createMatrix(2, 3, [[2,3,4], [5,6,7]], Q) self.assertEqual(mat2.coeff_ring, Q) mat3 =", "Ra(4, 5), Ra(-4, 5), Ra(3, 5)]) assert orthogonal.isOrthogonalMatrix() def testIsAlternatingMatrix(self):", "9]) self.assertEqual(sub2, b5.subMatrix([2, 3], [1, 3])) class SquareMatrixTest(unittest.TestCase): def testIsUpperTriangularMatrix(self):", "testIsOrthogonalMatrix(self): orthogonal = createMatrix(2, 2, [Ra(3, 5), Ra(4, 5), Ra(-4,", "2]+[3, 4]) b2 = Matrix(2, 2, [0, -1]+[1, -2]) b3", "1]) U_2, V_2, M_2 = b8.extsmithNormalForm() self.assertEqual(smith2, M_2) self.assertEqual(M_2, U_2", "createMatrix(2, 2, \\ [Poly({0:-1,1:1}, Int), Poly({0:-2}, Int)]+[Poly({0:-3}, Int), Poly({0:-4,1:1}, Int)])", "5, 7, 1]) self.assertEqual(supbase, ba.supplementBasis()) def testSumOfSubspaces(self): unit1 = Subspace(3,", "b1 + b2) def testSub(self): sub1 = createMatrix(1, 2, [-2,", "n ** 2)) def testReduce(self): self.assertEqual(-2, a3.reduce(min)) def testGetRow(self): row1", "def testCharacteristicPolynomial(self): assert d1.characteristicPolynomial() == d1.characteristicMatrix().determinant() def testAdjugateMatrix(self): adjugate =", "[5,6,7]]) self.assertEqual(mat1.coeff_ring, Int) mat2 = createMatrix(2, 3, [[2,3,4], [5,6,7]], Q)", "== Matrix(1, 2, [3, 2])) self.assertTrue(isinstance(a1 == a1, bool)) def", "2, [1, 2]+[3, 4]) b2 = Matrix(2, 2, [0, -1]+[1,", "def testGetRow(self): row1 = vector.Vector([3, -2]) self.assertEqual(row1, a3.getRow(2)) row2 =", "self.assertEqual(lessrank * U_2, h_2) def testKernelAsModule(self): ker_1 = a1.kernelAsModule() self.assertEqual(a1", "b3 = createMatrix(3, 3, [0, 1, 2]+[5, 4, 6]+[7, 9,", "0) mat6 = createMatrix(1, 4) self.assertTrue(mat6 == 0) mat7 =", "createMatrix(1, 2, [Ra(3), Ra(2)]) c2 = createMatrix(4, 5, \\ [Ra(0),", "unit1.intersectionOfSubspaces(unit2)) class FunctionTest(unittest.TestCase): def testCreateMatrix(self): Q = rational.theRationalField mat1 =", "zeroMatrix(2, 3)) def testContains(self): self.assertTrue(5 in a2) def testCall(self): call", "testSolve(self): for i in range(1, d6.column+1): self.assertEqual(d6[i], d5 * d5.solve(d6[i])[0])", "0, 0, 1]) notUT = createMatrix(4, 4, \\ [1, 2,", "0, 1, 3, -1]+[0, 0, 1, 2, 0]) c3 =", "-5]) self.assertEqual(commutator, b1.commutator(b2)) def testCharacteristicMatrix(self): charMat = createMatrix(2, 2, \\", "def testRingAPI(self): m3z = MatrixRing.getInstance(3, Int) m2q = MatrixRing.getInstance(2, rational.theRationalField)", "unit2 = unitMatrix(3) unit2.toSubspace() intersect = Subspace(3, 2, [-1, 0]+[0,", "6, 5, 6, 8, 9]) b6 = createMatrix(3, 3, [1,", "createMatrix(3, 3, [Ra(1), 2, 3]+[4, 5, 6]+[5, 7, 9]) d3", "= Matrix(3, 2, [[21, -12], [1, -1], [0, 0]]) self.assertEqual(a4,", "createMatrix(2, 3, [[2,3,4], [5,6,7]], Q) self.assertEqual(mat2.coeff_ring, Q) mat3 = createMatrix(3,", "2, [Ra(11, 2), Ra(-5, 2), Ra(-15, 4), Ra(7, 4)]) self.assertEqual(pow3,", "-1, 9, -5]) self.assertEqual(commutator, b1.commutator(b2)) def testCharacteristicMatrix(self): charMat = createMatrix(2,", "self.assertEqual(v2, c1 * (sol1[0]+sol1[1][i])) self.assertRaises(NoInverseImage, c3.solve, v3) def testColumnEchelonForm(self): echelon", "createMatrix(2, 2, [1, 3, 2, 6]) self.assertEqual(sub2, b1 - b2)", "* U_2, h_2) def testKernelAsModule(self): ker_1 = a1.kernelAsModule() self.assertEqual(a1 *", "triangle = createMatrix(3, 3, \\ [Ra(1, 1), 2, 3]+[0, 5,", "0, 0, 0, 1]) self.assertEqual(echelon, c2.columnEchelonForm()) class FieldSquareMatrixTest(unittest.TestCase): def testPow(self):", "43), Ra(6, 43), Ra(-1, 43)]+[Ra(35, 86), Ra(-13, 86), Ra(-5, 86)])", "a1.map(lambda n : n ** 2)) def testReduce(self): self.assertEqual(-2, a3.reduce(min))", "[9, 0, 0, 0, 3, 0, 0, 0, 1]) U_2,", "Ra(-3, 2)]) M1.inverse() M2 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1,", "== 0) def suite(suffix=\"Test\"): suite = unittest.TestSuite() all_names = globals()", "import * import nzmath.vector as vector import nzmath.rational as rational", "0]), vector.Vector([-12, -1, 0])]) self.assertEqual(a4, lst_vect) def testGetitem(self): self.assertEqual(2, a1[1,", "col1 = vector.Vector([-12, -1, 0]) self.assertEqual(col1, a4.getColumn(2)) col2 = vector.Vector([1,", "= createMatrix(3, 3, [Ra(1), 2, 3]+[4, 5, 6]+[5, 7, 9])", "7]+[0, 0, 8, 9]+[0, 0, 0, 1]) notUT = createMatrix(4,", "a4) def testScalarMul(self): mul = createMatrix(1, 2, [15, 10]) self.assertEqual(mul,", "= b8.extsmithNormalForm() self.assertEqual(smith2, M_2) self.assertEqual(M_2, U_2 * b8 * V_2)", "0]) unit2 = Subspace(3, 2, [0, 0]+[1, 0]+[0, 1]) self.assertEqual(unitMatrix(3),", "a2) def testCall(self): call = createMatrix(1, 2, [13, 4]) self.assertEqual(call,", "d3 = Matrix(3, 3, \\ [Ra(1), Ra(2), Ra(3)]+[Ra(0), Ra(5), Ra(-2)]+[7,", "Matrix(1, 2, [3, 2])) self.assertTrue(isinstance(a1 == a1, bool)) def testNonZero(self):", "[2, -7]) self.assertEqual(mul1, a1 * b2) mul2 = createMatrix(3, 2,", "2, [Ra(1, 2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)]) self.assertEqual(M2,", "RingMatrix a1 = createMatrix(1, 2, [3, 2]) a2 = Matrix(1,", "2), Ra(-5, 2), Ra(-15, 4), Ra(7, 4)]) self.assertEqual(pow3, d1 **", "createMatrix(3, 3, [1, 3, 2, 4, 6, 5, 6, 8,", "= createMatrix(3, 3, [1, 0, 0, 0, 1, 1, 0,", "b2 ** 0) def testIsOrthogonalMatrix(self): orthogonal = createMatrix(2, 2, [Ra(3,", "15), d7.determinant()) def testInverse(self): cinverse = createMatrix(3, 3) cinverse.set([Ra(-47, 86),", "= createMatrix(1, 2, [13, 4]) self.assertEqual(call, a5(2)) def testMap(self): pow_two", "0, 5]) assert diag.isDiagonalMatrix() def testIsScalarMatrix(self): scaler = createMatrix(2, 2,", "createMatrix(2, 2, [0, 1, -1, 2]) self.assertEqual(neg, -b2) def testHermiteNormalForm(self):", "L.isLowerTriangularMatrix() assert U.isUpperTriangularMatrix() class MatrixRingTest (unittest.TestCase): def setUp(self): self.m2z =", "self.assertEqual(smith1, M_1) self.assertEqual(M_1, U_1 * b5 * V_1) smith2 =", "already.hermiteNormalForm() self.assertEqual(h, already) lessrank = createMatrix(2, 3, [1, 0, 0,", "all_names = globals() for name in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name],", "4, 5]+[3, 4, 5, 6]+[4, 5, 6, 7]) d7 =", "self.assertEqual(h_2.row, lessrank.row) self.assertEqual(h_2.column, lessrank.column) self.assertEqual(lessrank * U_2, h_2) def testKernelAsModule(self):", "2)) def testCommutator(self): commutator = createMatrix(2, 2, [5, -1, 9,", "b8.extsmithNormalForm() self.assertEqual(smith2, M_2) self.assertEqual(M_2, U_2 * b8 * V_2) class", "d1.characteristicPolynomial() == d1.characteristicMatrix().determinant() def testAdjugateMatrix(self): adjugate = createMatrix(3, 3, [47,", "-2]+[7, 1, 9]) b5 = createMatrix(3, 3, [1, 3, 2,", "self.assertEqual(h.row, lessrank.row) self.assertEqual(h.column, lessrank.column) zerovec = vector.Vector([0, 0]) self.assertEqual(zerovec, h.getColumn(1))", "= unittest.TestSuite() all_names = globals() for name in all_names: if", "[3, 2]) a2 = Matrix(1, 2, [5, -6]) a3 =", "testNonZero(self): self.assertTrue(not zeroMatrix(2, 3)) def testContains(self): self.assertTrue(5 in a2) def", "assert symmetric.isSymmetricMatrix() class RingMatrixTest(unittest.TestCase): def testAdd(self): sum1 = createMatrix(1, 2,", "self.assertEqual(cinverse, d3.inverse()) self.assertRaises(NoInverse, d2.inverse) self.assertEqual(d3.inverse() * c3, d3.inverse(c3)) def testInverseNoChange(self):", "b1) mul3 = createMatrix(3, 2, [1, -1]+[109, -64]+[156, -93]) self.assertEqual(mul3,", "self.assertRaises(IndexError, a1.__getitem__, \"wrong\") self.assertEqual(vector.Vector([21, 1, 0]), a4[1]) def testEqual(self): self.assertTrue(a1", "* except: try: from nzmath.test.testMatrixFiniteField import * except: from .testMatrixFiniteField", "def testCall(self): call = createMatrix(1, 2, [13, 4]) self.assertEqual(call, a5(2))", "3, 5]) assert symmetric.isSymmetricMatrix() class RingMatrixTest(unittest.TestCase): def testAdd(self): sum1 =", "mul3 = createMatrix(3, 2, [1, -1]+[109, -64]+[156, -93]) self.assertEqual(mul3, b3", "2, [Ra(3, 5), Ra(4, 5), Ra(-4, 5), Ra(3, 5)]) assert", "self.assertEqual(supbase, ba.supplementBasis()) def testSumOfSubspaces(self): unit1 = Subspace(3, 1, [1, 0,", "self.assertEqual(adjugate, b4.adjugateMatrix()) assert d1 * d1.adjugateMatrix() == d1.determinant() * unitMatrix(d1.row)", "mat3 = createMatrix(3, [(1, 2, 3), (4, 5, 6), (7,", "d1.characteristicMatrix().determinant() def testAdjugateMatrix(self): adjugate = createMatrix(3, 3, [47, -15, -19,", "self.assertEqual(0, z[1, 2]) self.assertEqual(0, z[2, 1]) self.assertEqual(0, z[2, 2]) def", "createMatrix(1, 2, [1, Ra(2, 3)]) self.assertEqual(div, c1 / 3) def", "Matrix(3, 2, [21, -12]+[1, -1]+[0, 0]) a5 = createMatrix(1, 2,", "class MatrixRingTest (unittest.TestCase): def setUp(self): self.m2z = MatrixRing.getInstance(2, Int) def", "2, 3]+[4, 5, 6]+[5, 7, 9]) d3 = Matrix(3, 3,", "= Subspace(3, 1, [1, 0, 0]) unit2 = Subspace(3, 2,", "0, 8, 9]+[0, 0, 1, 1]) assert UT.isUpperTriangularMatrix() assert not", "testSumOfSubspaces(self): unit1 = Subspace(3, 1, [1, 0, 0]) unit2 =", "3, [0, 1, 0, 0 ,0, 1, 0, 0, 1])", "a4.getColumn(2)) col2 = vector.Vector([1, 3]) self.assertEqual(col2, b1.getColumn(1)) def testTranspose(self): trans", "= createMatrix(2, 2, [0, 2, -2, 0]) assert alternate1.isAlternatingMatrix() alternate2", "3), (4, 5, 6), (7, 8, 9)], Q) self.assertTrue(mat3.row ==", "self.assertEqual(col1, a4.getColumn(2)) col2 = vector.Vector([1, 3]) self.assertEqual(col2, b1.getColumn(1)) def testTranspose(self):", "d6.column+1): self.assertEqual(d6[i], d5 * d5.solve(d6[i])[0]) sol1 = c1.solve(v2) for i", "self.assertFalse(self.m2z.issuperring(m2q)) self.assertFalse(self.m2z.issuperring(m3z)) # getCommonSuperring self.assertRaises(TypeError, self.m2z.getCommonSuperring, Int) class SubspaceTest(unittest.TestCase): def", "Subspace(3, 1, [1, 0, 0]) unit2 = Subspace(3, 2, [0,", "Q) mat8 = createMatrix(7) self.assertTrue(mat8 == 0) def suite(suffix=\"Test\"): suite", "[[2,3,4], [5,6,7]]) self.assertEqual(mat1.coeff_ring, Int) mat2 = createMatrix(2, 3, [[2,3,4], [5,6,7]],", "\"test\")) return suite if __name__ == '__main__': runner = unittest.TextTestRunner()", "square.column) hermite = createMatrix(3, 3, [0, 1, 0, 0 ,0,", "[1, 2, 4, 0, 3, 5, 0, 0, 0]) b7", "2]+[2, 5]+[6, 7]) ## for FieldSquareMatrix d1 = createMatrix(2, 2,", "d3.inverse()) self.assertRaises(NoInverse, d2.inverse) self.assertEqual(d3.inverse() * c3, d3.inverse(c3)) def testInverseNoChange(self): #", "testTriangulate(self): triangle = createMatrix(3, 3, \\ [Ra(1, 1), 2, 3]+[0,", "Ra(5), Ra(-2)]+[7, 1, 9]) d4 = createMatrix(6, 6, \\ [Ra(4),", "0]) self.assertEqual(sub1, a4.subMatrix(2, 1)) sub2 = createMatrix(2, 2, [4, 5,", "9]+[0, 0, 1, 1]) assert UT.isUpperTriangularMatrix() assert not notUT.isUpperTriangularMatrix() def", "= rational.theRationalField mat1 = createMatrix(2, 3, [[2,3,4], [5,6,7]]) self.assertEqual(mat1.coeff_ring, Int)", "M_2) self.assertEqual(M_2, U_2 * b8 * V_2) class FieldMatrixTest(unittest.TestCase): def", "== 0) mat7 = createMatrix(3, Q) self.assertTrue(mat7.row == mat7.column) self.assertTrue(mat7", "testDeterminant(self): self.assertEqual(Ra(-7, 15), d7.determinant()) def testInverse(self): cinverse = createMatrix(3, 3)", "Q) self.assertTrue(mat7.row == mat7.column) self.assertTrue(mat7 == 0) self.assertEqual(mat7.coeff_ring, Q) mat8", "8])]) self.assertEqual(mat4.coeff_ring, Int) mat5 = createMatrix(5, 6, Int) self.assertTrue(mat5 ==", "testCharacteristicPolynomial(self): assert d1.characteristicPolynomial() == d1.characteristicMatrix().determinant() def testAdjugateMatrix(self): adjugate = createMatrix(3,", "self.assertEqual(M_2, U_2 * b8 * V_2) class FieldMatrixTest(unittest.TestCase): def testDiv(self):", "sol1 = c1.solve(v2) for i in range(len(sol1[1])): self.assertEqual(v2, c1 *", "[8, -4]) self.assertEqual(sum1, a1 + a2) sum2 = createMatrix(2, 2,", "2]) self.assertEqual(-2, b2[2, 2]) self.assertRaises(IndexError, a1.__getitem__, \"wrong\") self.assertEqual(vector.Vector([21, 1, 0]),", "= vector.Vector([0, 0]) self.assertEqual(zerovec, h.getColumn(1)) square = createMatrix(3, 3, [1,", "b3 * a4) def testScalarMul(self): mul = createMatrix(1, 2, [15,", "Matrix(2, 2, [0, -1]+[1, -2]) b3 = createMatrix(3, 3, [0,", "U_2 * b8 * V_2) class FieldMatrixTest(unittest.TestCase): def testDiv(self): div", "Ra(1, 5)]+[Ra(3, 2), Ra(1, 3), Ra(2, 5)]+[Ra(-1, 2), Ra(4, 3),", "v1) def testMod(self): mod1 = createMatrix(3, 2, [1, 2]+[0, 1]+[0,", "c2 * ker) def testImage(self): img = createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0]) self.assertEqual(img, c2.image())", "h) def testExtHermiteNormalForm(self): already = createMatrix(4, 3, [1, 0, 0,", "d5.solve(d6[i])[0]) sol1 = c1.solve(v2) for i in range(len(sol1[1])): self.assertEqual(v2, c1", "Int) m2q = MatrixRing.getInstance(2, rational.theRationalField) # issubring self.assertFalse(self.m2z.issubring(Int)) self.assertTrue(self.m2z.issubring(self.m2z)) self.assertTrue(self.m2z.issubring(m2q))", "U_1, V_1, M_1 = b5.extsmithNormalForm() self.assertEqual(smith1, M_1) self.assertEqual(M_1, U_1 *", "self.assertEqual(36, b3.determinant()) def testCofactor(self): self.assertEqual(-6, b5.cofactor(1, 2)) def testCommutator(self): commutator", "10]) a4 = Matrix(3, 2, [21, -12]+[1, -1]+[0, 0]) a5", "[1, 0, 0, 9, 1, 0, 5, 6, 1]) b8", "= createMatrix(2, 2, [Ra(1), Ra(2)]+[Ra(3), Ra(4)]) d2 = createMatrix(3, 3,", "d2 = createMatrix(3, 3, [Ra(1), 2, 3]+[4, 5, 6]+[5, 7,", "self.assertEqual(a4, lst_lst) lst_tuple = Matrix(3, 2, [(21, 1, 0), (-12,", "b6.isSingular() def testTrace(self): self.assertEqual(15, b4.trace()) def testDeterminant(self): self.assertEqual(-2, b1.determinant()) #sf.bug", "4) self.assertTrue(mat6 == 0) mat7 = createMatrix(3, Q) self.assertTrue(mat7.row ==", "range(1, d6.column+1): self.assertEqual(d6[i], d5 * d5.solve(d6[i])[0]) sol1 = c1.solve(v2) for", "8]+[3, -2]+[0, 10]) a4 = Matrix(3, 2, [21, -12]+[1, -1]+[0,", "nzmath.rational as rational import nzmath.poly.uniutil as uniutil Ra = rational.Rational", "for i in range(len(sol1[1])): self.assertEqual(v2, c1 * (sol1[0]+sol1[1][i])) self.assertRaises(NoInverseImage, c3.solve,", "* import nzmath.vector as vector import nzmath.rational as rational import", "Ra(1, 2), Ra(1, 1), Ra(-3, 2)]) self.assertEqual(M2, M1) def testHessenbergForm(self):", "sf bug#1849220 M1 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2),", "= createMatrix(2, 2, [7, 10, 15, 22]) self.assertEqual(pow1, b1 **", "mat2 = createMatrix(2, 3, [[2,3,4], [5,6,7]], Q) self.assertEqual(mat2.coeff_ring, Q) mat3", "vector.Vector([1, 4]) v2 = vector.Vector([8]) v3 = vector.Vector([0, 0, 1])", "1]+[0, 1]) self.assertEqual(mod1, a3 % 3) def testNeg(self): neg =", "Ra(-13, 86), Ra(-5, 86)]) self.assertEqual(cinverse, d3.inverse()) self.assertRaises(NoInverse, d2.inverse) self.assertEqual(d3.inverse() *", "b6 = createMatrix(3, 3, [1, 2, 4, 0, 3, 5,", "5, 6, 0]+[7, 8, 9, 10]) assert LT.isLowerTriangularMatrix() assert not", "alternate2 = createMatrix(2, [1, 2, -2, 0]) assert not alternate2.isAntisymmetricMatrix()", "\\ [Ra(1, 1), 2, 3]+[0, 5, -2]+[0, 0, Ra(-86, 5)])", "0)]) self.assertEqual(a4, lst_tuple) lst_vect = Matrix(3, 2, [vector.Vector([21, 1, 0]),", "unitMatrix(3) unit2.toSubspace() intersect = Subspace(3, 2, [-1, 0]+[0, -1]+[0, 0])", "assert L.isLowerTriangularMatrix() assert U.isUpperTriangularMatrix() class MatrixRingTest (unittest.TestCase): def setUp(self): self.m2z", "\\ [Ra(1, 2), Ra(2, 3), Ra(1, 5)]+[Ra(3, 2), Ra(1, 3),", "3), Ra(2, 5)]+[Ra(-1, 2), Ra(4, 3), Ra(3, 5)]) ## other", "square.row) self.assertEqual(h.column, square.column) hermite = createMatrix(3, 3, [0, 1, 0,", "[-12, 0]) self.assertEqual(sub1, a4.subMatrix(2, 1)) sub2 = createMatrix(2, 2, [4,", "mat7 = createMatrix(3, Q) self.assertTrue(mat7.row == mat7.column) self.assertTrue(mat7 == 0)", "createMatrix(6, 6, \\ [Ra(4), 2, 5, 0, 2, 1]+[5, 1,", "2, -2, 0]) assert not alternate2.isAntisymmetricMatrix() def testIsSingular(self): assert b6.isSingular()", "[7, 8]+[3, -2]+[0, 10]) a4 = Matrix(3, 2, [21, -12]+[1,", "= createMatrix(3, Q) self.assertTrue(mat7.row == mat7.column) self.assertTrue(mat7 == 0) self.assertEqual(mat7.coeff_ring,", "= createMatrix(4, 5, \\ [Ra(0), 0, 1, 2, -1]+[0, 0,", "self.assertEqual(mat4.coeff_ring, Int) mat5 = createMatrix(5, 6, Int) self.assertTrue(mat5 == 0)", "b2) def testMul(self): mul1 = createMatrix(1, 2, [2, -7]) self.assertEqual(mul1,", "createMatrix(1, 2, [9, 4]) self.assertEqual(pow_two, a1.map(lambda n : n **", "assert alternate1.isAlternatingMatrix() alternate2 = createMatrix(2, [1, 2, -2, 0]) assert", "Ra(1, 1), Ra(-3, 2)]) M1.inverse() M2 = SquareMatrix(2, 2, [Ra(1,", "self.assertEqual(3, c2.rank()) self.assertEqual(3, d3.rank()) def testInverseImage(self): self.assertEqual(d6, d5 * d5.inverseImage(d6))", "# sub test try: from test.testMatrixFiniteField import * except: try:", "o[1, 1]) self.assertEqual(0, o[1, 2]) self.assertEqual(0, o[2, 1]) self.assertEqual(1, o[2,", "self.assertEqual(a4, lst_tuple) lst_vect = Matrix(3, 2, [vector.Vector([21, 1, 0]), vector.Vector([-12,", "0, 0, 5]) assert diag.isDiagonalMatrix() def testIsScalarMatrix(self): scaler = createMatrix(2,", "def testIsUpperTriangularMatrix(self): UT = createMatrix(4, 4, \\ [1, 2, 3,", "echelon = createMatrix(4, 5,\\ [Ra(0), 0, 1, 0, 0]+[0, 0,", "[7, 3, 0]+[8, -2, 10]) self.assertEqual(trans, a3.transpose()) def testGetBlock(self): block", "notLT = createMatrix(4, 4, \\ [1, 0, 0, 0]+[2, 3,", "0, 1, 0, 0, 1]) h = already.hermiteNormalForm() self.assertEqual(h, already)", "self.assertEqual(-2, b1.determinant()) #sf.bug #1914349 self.assertTrue(isinstance(b3.determinant(), int)) self.assertEqual(36, b3.determinant()) def testCofactor(self):", "b2[2, 2]) self.assertRaises(IndexError, a1.__getitem__, \"wrong\") self.assertEqual(vector.Vector([21, 1, 0]), a4[1]) def", "b5 * V_1) smith2 = Matrix(3, 3, [9, 0, 0,", "self.assertFalse(self.m2z.issubring(m3z)) # issuperring self.assertFalse(self.m2z.issuperring(Int)) self.assertTrue(self.m2z.issuperring(self.m2z)) self.assertFalse(self.m2z.issuperring(m2q)) self.assertFalse(self.m2z.issuperring(m3z)) # getCommonSuperring self.assertRaises(TypeError,", "a1, bool)) def testNonZero(self): self.assertTrue(not zeroMatrix(2, 3)) def testContains(self): self.assertTrue(5", "2, 4, 6, 5, 6, 8, 9]) b6 = createMatrix(3,", "2, [0, 0]+[1, 0]+[0, 1]) self.assertEqual(unitMatrix(3), unit1.sumOfSubspaces(unit2)) def testIntersectionOfSubspace(self): unit1", "v1 = vector.Vector([1, 4]) v2 = vector.Vector([8]) v3 = vector.Vector([0,", ",0, 1, 0, 0, 1]) self.assertEqual(hermite, h) def testExtHermiteNormalForm(self): already", "[1, 2]+[0, 1]+[0, 1]) self.assertEqual(mod1, a3 % 3) def testNeg(self):", "for i in range(1, d6.column+1): self.assertEqual(d6[i], d5 * d5.solve(d6[i])[0]) sol1", "2, [2, -7]) self.assertEqual(mul1, a1 * b2) mul2 = createMatrix(3,", "unit1 = Subspace(3, 1, [1, 0, 0]) unit2 = Subspace(3,", "-1], [0, 0]]) self.assertEqual(a4, lst_lst) lst_tuple = Matrix(3, 2, [(21,", "0, 0, 10]) assert scaler.isScalarMatrix() def testIsSymmetricMatrix(self): symmetric = createMatrix(2,", "[Ra(1, 2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)]) M1.inverse() M2", "8]) b4 = Matrix(3, 3, [1, 2, 3]+[0, 5, -2]+[7,", "3, 0, 0]+[4, 5, 6, 0]+[7, 8, 9, 10]) notLT", "5, -4, 2]+[4, 1, 5, 6, 3, 1]) d5 =", "[10, 0, 0, 10]) assert scaler.isScalarMatrix() def testIsSymmetricMatrix(self): symmetric =", "createMatrix(2, 2, [1, 0, 0, 1]) self.assertEqual(pow2, b2 ** 0)", "b1.characteristicMatrix()) def testCharacteristicPolynomial(self): assert d1.characteristicPolynomial() == d1.characteristicMatrix().determinant() def testAdjugateMatrix(self): adjugate", "self.assertEqual(sum1, a1 + a2) sum2 = createMatrix(2, 2, [1, 1,", "3, [[2,3,4], [5,6,7]], Q) self.assertEqual(mat2.coeff_ring, Q) mat3 = createMatrix(3, [(1,", "SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)])", "\\ [1, 0, 0, 0]+[2, 3, 1, 0]+[4, 5, 6,", "UT = createMatrix(4, 4, \\ [1, 2, 3, 4]+[0, 5,", "4, \\ [1, 0, 0, 0]+[2, 3, 0, 0]+[4, 5,", "6), (7, 8, 9)], Q) self.assertTrue(mat3.row == mat3.column) self.assertTrue(mat3.__class__, FieldSquareMatrix)", "L, U = d4.LUDecomposition() assert L * U == d4", "2])) self.assertTrue(isinstance(a1 == a1, bool)) def testNonZero(self): self.assertTrue(not zeroMatrix(2, 3))", "c2.kernel() self.assertTrue(not c2 * ker) def testImage(self): img = createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0])", "43), Ra(-1, 43)]+[Ra(35, 86), Ra(-13, 86), Ra(-5, 86)]) self.assertEqual(cinverse, d3.inverse())", "try: from test.testMatrixFiniteField import * except: try: from nzmath.test.testMatrixFiniteField import", "self.assertEqual(mul, 5 * a1) def testVectorMul(self): mul = vector.Vector([9, 19])", "orthogonal.isOrthogonalMatrix() def testIsAlternatingMatrix(self): alternate1 = createMatrix(2, 2, [0, 2, -2,", "6]+[5, 7, 9]) d3 = Matrix(3, 3, \\ [Ra(1), Ra(2),", "86), Ra(19, 86)]+\\ [Ra(7, 43), Ra(6, 43), Ra(-1, 43)]+[Ra(35, 86),", "3, 5, 0, 0, 0]) b7 = createMatrix(3, 3, [1,", "of one. \"\"\" self.assertEqual(self.m2z.one, self.m2z.unitMatrix()) def testRingAPI(self): m3z = MatrixRing.getInstance(3,", "pow2 = createMatrix(2, 2, [1, 0, 0, 1]) self.assertEqual(pow2, b2", "def testIsDiagonalMatrix(self): diag = createMatrix(2, 2, [-3, 0, 0, 5])", "def suite(suffix=\"Test\"): suite = unittest.TestSuite() all_names = globals() for name", "createMatrix(2, 2, [5, -1, 9, -5]) self.assertEqual(commutator, b1.commutator(b2)) def testCharacteristicMatrix(self):", "def testIsAlternatingMatrix(self): alternate1 = createMatrix(2, 2, [0, 2, -2, 0])", "createMatrix(3, 2, [7, 8]+[3, -2]+[0, 10]) a4 = Matrix(3, 2,", "self.assertTrue(isinstance(b3.determinant(), int)) self.assertEqual(36, b3.determinant()) def testCofactor(self): self.assertEqual(-6, b5.cofactor(1, 2)) def", "1, 0, 0]+[0, 0, 0, 2, 3]+[0, 0, 0, 1,", "0, 0, 0]+[2, 3, 0, 0]+[4, 5, 6, 0]+[7, 8,", "2]) d6 = createMatrix(4, 4, \\ [Ra(1), 2, 3, 4]+[2,", "LT = createMatrix(4, 4, \\ [1, 0, 0, 0]+[2, 3,", "z[1, 1]) self.assertEqual(0, z[1, 2]) self.assertEqual(0, z[2, 1]) self.assertEqual(0, z[2,", "4, 5, 7]) supbase = createMatrix(3, 3, [1, 2, 0,", "-6]+[-2, -2]+[0, 0]) self.assertEqual(mul2, a4 * b1) mul3 = createMatrix(3,", "10]) assert scaler.isScalarMatrix() def testIsSymmetricMatrix(self): symmetric = createMatrix(2, 2, [2,", "self.assertEqual(mat1.coeff_ring, Int) mat2 = createMatrix(2, 3, [[2,3,4], [5,6,7]], Q) self.assertEqual(mat2.coeff_ring,", "2, \\ [Poly({0:-1,1:1}, Int), Poly({0:-2}, Int)]+[Poly({0:-3}, Int), Poly({0:-4,1:1}, Int)]) self.assertEqual(charMat,", "self.assertEqual(-2, a3.reduce(min)) def testGetRow(self): row1 = vector.Vector([3, -2]) self.assertEqual(row1, a3.getRow(2))", "2, [3, 2])) self.assertTrue(isinstance(a1 == a1, bool)) def testNonZero(self): self.assertTrue(not", "Matrix(3, 2, [vector.Vector([21, 1, 0]), vector.Vector([-12, -1, 0])]) self.assertEqual(a4, lst_vect)", "[1, 3])) class SquareMatrixTest(unittest.TestCase): def testIsUpperTriangularMatrix(self): UT = createMatrix(4, 4,", "= createMatrix(2, 3, [1, 0, 0, 0, 1, 0]) U_2,", "for name in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return suite", "0]+[0, 0, 0, 0, 1]) self.assertEqual(echelon, c2.columnEchelonForm()) class FieldSquareMatrixTest(unittest.TestCase): def", "2, 6, 5, -4, 2]+[4, 1, 5, 6, 3, 1])", "createMatrix(2, [1, 2, -2, 0]) assert not alternate2.isAntisymmetricMatrix() def testIsSingular(self):", "Int), Poly({0:-2}, Int)]+[Poly({0:-3}, Int), Poly({0:-4,1:1}, Int)]) self.assertEqual(charMat, b1.characteristicMatrix()) def testCharacteristicPolynomial(self):", "2, [0, 2, -2, 0]) assert alternate1.isAlternatingMatrix() alternate2 = createMatrix(2,", "name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return suite if __name__ == '__main__': runner", "6]+[4, 5, 6, 7]) d7 = Matrix(3, 3, \\ [Ra(1,", "lst_vect = Matrix(3, 2, [vector.Vector([21, 1, 0]), vector.Vector([-12, -1, 0])])", "43)]+[Ra(35, 86), Ra(-13, 86), Ra(-5, 86)]) self.assertEqual(cinverse, d3.inverse()) self.assertRaises(NoInverse, d2.inverse)", "testSupplementBasis(self): ba = Subspace(3, 2, [1, 2, 3, 4, 5,", "4]), vector.Vector([6, 8])]) self.assertEqual(mat4.coeff_ring, Int) mat5 = createMatrix(5, 6, Int)", "[0, 1, 2]+[5, 4, 6]+[7, 9, 8]) b4 = Matrix(3,", "= createMatrix(3, 3, [1, 0, 0, 9, 1, 0, 5,", "Ra(1, 3), Ra(2, 5)]+[Ra(-1, 2), Ra(4, 3), Ra(3, 5)]) ##", "0]+[7, 8, 9, 10]) notLT = createMatrix(4, 4, \\ [1,", "(-2)) def testTriangulate(self): triangle = createMatrix(3, 3, \\ [Ra(1, 1),", "0, 0, 0]+[2, 3, 1, 0]+[4, 5, 6, 0]+[7, 8,", "2, -1, 0]+[0, -1, 2, -1]+[0, 0, -1, 2]) d6", "0, 1]) class MatrixTest(unittest.TestCase): def testInit(self): lst_lst = Matrix(3, 2,", "# getCommonSuperring self.assertRaises(TypeError, self.m2z.getCommonSuperring, Int) class SubspaceTest(unittest.TestCase): def testSupplementBasis(self): ba", "7, 1]) self.assertEqual(supbase, ba.supplementBasis()) def testSumOfSubspaces(self): unit1 = Subspace(3, 1,", "Q) self.assertTrue(mat3.row == mat3.column) self.assertTrue(mat3.__class__, FieldSquareMatrix) mat4 = createMatrix(2, [vector.Vector([1,", "a1 + a2) sum2 = createMatrix(2, 2, [1, 1, 4,", "def testRank(self): self.assertEqual(3, c2.rank()) self.assertEqual(3, d3.rank()) def testInverseImage(self): self.assertEqual(d6, d5", "d5 = createMatrix(4, 4, \\ [Ra(2), -1, 0, 0]+[-1, 2,", "b2) mul2 = createMatrix(3, 2, [-15, -6]+[-2, -2]+[0, 0]) self.assertEqual(mul2,", "V_2, M_2 = b8.extsmithNormalForm() self.assertEqual(smith2, M_2) self.assertEqual(M_2, U_2 * b8", "Int), Poly({1:2}, Int)]) ## for RingSquareMatrix b1 = createMatrix(2, 2,", "def testUnitMatrix(self): \"\"\" unitMatrix() is an alias of one. \"\"\"", "d5 * d5.solve(d6[i])[0]) sol1 = c1.solve(v2) for i in range(len(sol1[1])):", "4]) b2 = Matrix(2, 2, [0, -1]+[1, -2]) b3 =", "6]+\\ [7, 5, 0, 8, 2, 5]+[8, 2, 6, 5,", "[5, -6]) a3 = createMatrix(3, 2, [7, 8]+[3, -2]+[0, 10])", "in a2) def testCall(self): call = createMatrix(1, 2, [13, 4])", "notLT.isLowerTriangularMatrix() def testIsDiagonalMatrix(self): diag = createMatrix(2, 2, [-3, 0, 0,", "testInverseNoChange(self): # sf bug#1849220 M1 = SquareMatrix(2, 2, [Ra(1, 2),", "[1, 0]+[0, 1]+[0, 0]) unit2 = unitMatrix(3) unit2.toSubspace() intersect =", "9, 10]) assert LT.isLowerTriangularMatrix() assert not notLT.isLowerTriangularMatrix() def testIsDiagonalMatrix(self): diag", "= createMatrix(1, 2, [8, -4]) self.assertEqual(sum1, a1 + a2) sum2", "0, 0]+[-1, 2, -1, 0]+[0, -1, 2, -1]+[0, 0, -1,", "3, 4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0, 0,", "-2, 0]) assert alternate1.isAlternatingMatrix() alternate2 = createMatrix(2, [1, 2, -2,", "scaler.isScalarMatrix() def testIsSymmetricMatrix(self): symmetric = createMatrix(2, 2, [2, 3, 3,", "-2]+[0, 0]) self.assertEqual(mul2, a4 * b1) mul3 = createMatrix(3, 2,", "h = already.hermiteNormalForm() self.assertEqual(h, already) lessrank = createMatrix(2, 3, [1,", "[Ra(1), Ra(2)]+[Ra(3), Ra(4)]) d2 = createMatrix(3, 3, [Ra(1), 2, 3]+[4,", "Ra(3, 5)]) assert orthogonal.isOrthogonalMatrix() def testIsAlternatingMatrix(self): alternate1 = createMatrix(2, 2,", "8, 4, 6]+\\ [7, 5, 0, 8, 2, 5]+[8, 2,", "Q) self.assertEqual(mat2.coeff_ring, Q) mat3 = createMatrix(3, [(1, 2, 3), (4,", "1, 0, 0, 1]) self.assertEqual(hermite, h) def testExtHermiteNormalForm(self): already =", "5, 6]+[5, 7, 9]) d3 = Matrix(3, 3, \\ [Ra(1),", "def testInit(self): lst_lst = Matrix(3, 2, [[21, -12], [1, -1],", "0, 0, 0, 1]) U_2, V_2, M_2 = b8.extsmithNormalForm() self.assertEqual(smith2,", "pass def testLUDecomposition(self): L, U = d4.LUDecomposition() assert L *", "[Ra(1), 2, 3, 4]+[2, 3, 4, 5]+[3, 4, 5, 6]+[4,", "Ra(-4, 5), Ra(3, 5)]) assert orthogonal.isOrthogonalMatrix() def testIsAlternatingMatrix(self): alternate1 =", "def testHermiteNormalForm(self): already = createMatrix(4, 3, [1, 0, 0, 0,", "testCofactor(self): self.assertEqual(-6, b5.cofactor(1, 2)) def testCommutator(self): commutator = createMatrix(2, 2,", "0, Ra(-86, 5)]) self.assertEqual(triangle, d3.triangulate()) def testDeterminant(self): self.assertEqual(Ra(-7, 15), d7.determinant())", "createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0]) self.assertEqual(img, c2.image()) def testRank(self): self.assertEqual(3, c2.rank()) self.assertEqual(3, d3.rank()) def", "1]+[5, 1, 2, 5, 1, 1]+[90, 7, 54, 8, 4,", "self.assertEqual(intersect, unit1.intersectionOfSubspaces(unit2)) class FunctionTest(unittest.TestCase): def testCreateMatrix(self): Q = rational.theRationalField mat1", "[[2,3,4], [5,6,7]], Q) self.assertEqual(mat2.coeff_ring, Q) mat3 = createMatrix(3, [(1, 2,", "a1) def testVectorMul(self): mul = vector.Vector([9, 19]) self.assertEqual(mul, b1 *", "3) def testNeg(self): neg = createMatrix(2, 2, [0, 1, -1,", "as rational import nzmath.poly.uniutil as uniutil Ra = rational.Rational Poly", "3], [1, 3])) class SquareMatrixTest(unittest.TestCase): def testIsUpperTriangularMatrix(self): UT = createMatrix(4,", "[1, 0, 0, 1]) self.assertEqual(pow2, b2 ** 0) def testIsOrthogonalMatrix(self):", "already) self.assertEqual(already * U_1, h_1) lessrank = createMatrix(2, 3, [1,", "5), Ra(4, 5), Ra(-4, 5), Ra(3, 5)]) assert orthogonal.isOrthogonalMatrix() def", "0, 0, 1, 0, 0, 1]) U_1, h_1 = already.exthermiteNormalForm()", "FunctionTest(unittest.TestCase): def testCreateMatrix(self): Q = rational.theRationalField mat1 = createMatrix(2, 3,", "0, 8, 2, 5]+[8, 2, 6, 5, -4, 2]+[4, 1,", "19]) self.assertEqual(mul, b1 * v1) def testMod(self): mod1 = createMatrix(3,", "[1, -1]+[109, -64]+[156, -93]) self.assertEqual(mul3, b3 * a4) def testScalarMul(self):", "a3 = createMatrix(3, 2, [7, 8]+[3, -2]+[0, 10]) a4 =", "nzmath.vector as vector import nzmath.rational as rational import nzmath.poly.uniutil as", "a4 * b1) mul3 = createMatrix(3, 2, [1, -1]+[109, -64]+[156,", "assert diag.isDiagonalMatrix() def testIsScalarMatrix(self): scaler = createMatrix(2, 2, [10, 0,", "a5 = createMatrix(1, 2, [Poly({0:3, 1:5}, Int), Poly({1:2}, Int)]) ##", "Matrix(3, 3, [9, 0, 0, 0, 3, 0, 0, 0,", "7]+[0, 0, 8, 9]+[0, 0, 1, 1]) assert UT.isUpperTriangularMatrix() assert", "3, \\ [Ra(1, 2), Ra(2, 3), Ra(1, 5)]+[Ra(3, 2), Ra(1,", "Ra(2, 5)]+[Ra(-1, 2), Ra(4, 3), Ra(3, 5)]) ## other objects", "self.assertEqual(h_2.column, lessrank.column) self.assertEqual(lessrank * U_2, h_2) def testKernelAsModule(self): ker_1 =", "0]) self.assertEqual(zerovec, h.getColumn(1)) square = createMatrix(3, 3, [1, 0, 0,", "= createMatrix(3, 2, [1, 2]+[0, 1]+[0, 1]) self.assertEqual(mod1, a3 %", "= Matrix(2, 2, [0, -1]+[1, -2]) b3 = createMatrix(3, 3,", "3, 4]+[2, 3, 4, 5]+[3, 4, 5, 6]+[4, 5, 6,", "[0, 1, 0, 0 ,0, 1, 0, 0, 1]) self.assertEqual(hermite,", "vector.Vector([8]) v3 = vector.Vector([0, 0, 1]) class MatrixTest(unittest.TestCase): def testInit(self):", "c2.rank()) self.assertEqual(3, d3.rank()) def testInverseImage(self): self.assertEqual(d6, d5 * d5.inverseImage(d6)) self.assertRaises(NoInverseImage,", "self.assertEqual(zerovec, h.getColumn(1)) square = createMatrix(3, 3, [1, 0, 0, 0,", "2]) self.assertEqual(0, o[2, 1]) self.assertEqual(1, o[2, 2]) def testUnitMatrix(self): \"\"\"", "5]) assert diag.isDiagonalMatrix() def testIsScalarMatrix(self): scaler = createMatrix(2, 2, [10,", "self.assertEqual(sum2, b1 + b2) def testSub(self): sub1 = createMatrix(1, 2,", "self.assertEqual([1, 1, 1], b7.smithNormalForm()) self.assertEqual([9, 3, 1], b8.smithNormalForm()) def testExtSmithNormalForm(self):", "rational.theRationalField) # issubring self.assertFalse(self.m2z.issubring(Int)) self.assertTrue(self.m2z.issubring(self.m2z)) self.assertTrue(self.m2z.issubring(m2q)) self.assertFalse(self.m2z.issubring(m3z)) # issuperring self.assertFalse(self.m2z.issuperring(Int))", "c2 = createMatrix(4, 5, \\ [Ra(0), 0, 1, 2, -1]+[0,", "= createMatrix(3, 2, [1, -1]+[109, -64]+[156, -93]) self.assertEqual(mul3, b3 *", "= createMatrix(3, 3, [0, 1, 2]+[5, 4, 6]+[7, 9, 8])", "Matrix(3, 2, [[21, -12], [1, -1], [0, 0]]) self.assertEqual(a4, lst_lst)", "Matrix(3, 3, [12, 0, 0, 0, 1, 0, 0, 0,", "## for FieldMatrix c1 = createMatrix(1, 2, [Ra(3), Ra(2)]) c2", "self.assertEqual(img, c2.image()) def testRank(self): self.assertEqual(3, c2.rank()) self.assertEqual(3, d3.rank()) def testInverseImage(self):", ".testMatrixFiniteField import * ## for RingMatrix a1 = createMatrix(1, 2,", "1]) notUT = createMatrix(4, 4, \\ [1, 2, 3, 4]+[0,", "-1]+[109, -64]+[156, -93]) self.assertEqual(mul3, b3 * a4) def testScalarMul(self): mul", "= b1.kernelAsModule() self.assertEqual(ker_2, None) class RingSquareMatrixTest(unittest.TestCase): def testPow(self): pow1 =", "86), Ra(-5, 86)]) self.assertEqual(cinverse, d3.inverse()) self.assertRaises(NoInverse, d2.inverse) self.assertEqual(d3.inverse() * c3,", "2), Ra(4, 3), Ra(3, 5)]) ## other objects v1 =", "[12, 0, 0, 0, 1, 0, 0, 0, 1]) U_1,", "0, 0]+[0, 0, 0, 2, 3]+[0, 0, 0, 1, 0]+[0,", "1], b5.smithNormalForm()) self.assertRaises(ValueError, b6.smithNormalForm) self.assertEqual([1, 1, 1], b7.smithNormalForm()) self.assertEqual([9, 3,", "Int) mat2 = createMatrix(2, 3, [[2,3,4], [5,6,7]], Q) self.assertEqual(mat2.coeff_ring, Q)", "0, 1, 0, 0, 0, 1, 0, 0, 1]) h", "V_2) class FieldMatrixTest(unittest.TestCase): def testDiv(self): div = createMatrix(1, 2, [1,", "self.assertEqual(mul, b1 * v1) def testMod(self): mod1 = createMatrix(3, 2,", "= createMatrix(3, 2, [7, 8]+[3, -2]+[0, 10]) a4 = Matrix(3,", "createMatrix(2, [vector.Vector([1, 4]), vector.Vector([6, 8])]) self.assertEqual(mat4.coeff_ring, Int) mat5 = createMatrix(5,", "self.assertEqual(0, z[2, 2]) def testOne(self): o = self.m2z.one self.assertEqual(1, o[1,", "0]]) self.assertEqual(a4, lst_lst) lst_tuple = Matrix(3, 2, [(21, 1, 0),", "1]) U_1, V_1, M_1 = b5.extsmithNormalForm() self.assertEqual(smith1, M_1) self.assertEqual(M_1, U_1", "b5.smithNormalForm()) self.assertRaises(ValueError, b6.smithNormalForm) self.assertEqual([1, 1, 1], b7.smithNormalForm()) self.assertEqual([9, 3, 1],", "8, 9)], Q) self.assertTrue(mat3.row == mat3.column) self.assertTrue(mat3.__class__, FieldSquareMatrix) mat4 =", "createMatrix(3, 3, [0, 1, 2]+[5, 4, 6]+[7, 9, 8]) b4", "lst_lst = Matrix(3, 2, [[21, -12], [1, -1], [0, 0]])", "def testInverse(self): cinverse = createMatrix(3, 3) cinverse.set([Ra(-47, 86), Ra(15, 86),", "self.assertEqual(sub1, a4.subMatrix(2, 1)) sub2 = createMatrix(2, 2, [4, 5, 6,", "sub2 = createMatrix(2, 2, [4, 5, 6, 9]) self.assertEqual(sub2, b5.subMatrix([2,", "Ra(4, 3), Ra(3, 5)]) ## other objects v1 = vector.Vector([1,", "4, \\ [1, 2, 3, 4]+[0, 5, 6, 7]+[0, 0,", "0) def suite(suffix=\"Test\"): suite = unittest.TestSuite() all_names = globals() for", "createMatrix(1, 2, [Poly({0:3, 1:5}, Int), Poly({1:2}, Int)]) ## for RingSquareMatrix", "c1 / 3) def testKernel(self): ker = c2.kernel() self.assertTrue(not c2", "-1]+[0, 0, -1, 2]) d6 = createMatrix(4, 4, \\ [Ra(1),", "5]+[3, 4, 5, 6]+[4, 5, 6, 7]) d7 = Matrix(3,", "unittest.TestSuite() all_names = globals() for name in all_names: if name.endswith(suffix):", "z[2, 2]) def testOne(self): o = self.m2z.one self.assertEqual(1, o[1, 1])", "1]) self.assertEqual(0, o[1, 2]) self.assertEqual(0, o[2, 1]) self.assertEqual(1, o[2, 2])", "2, [-2, 8]) self.assertEqual(sub1, a1 - a2) sub2 = createMatrix(2,", "= Subspace(3, 2, [1, 0]+[0, 1]+[0, 0]) unit2 = unitMatrix(3)", "0, 1, 0]) U_2, h_2 = lessrank.exthermiteNormalForm() self.assertEqual(h_2.row, lessrank.row) self.assertEqual(h_2.column,", "[Ra(3, 5), Ra(4, 5), Ra(-4, 5), Ra(3, 5)]) assert orthogonal.isOrthogonalMatrix()", "0, 1, 0, 0, 0, 1, 0, 0, 1]) U_1,", "b7 = createMatrix(3, 3, [1, 0, 0, 9, 1, 0,", "def testTranspose(self): trans = createMatrix(2, 3, [7, 3, 0]+[8, -2,", "0, 1, 0]) h = lessrank.hermiteNormalForm() self.assertEqual(h.row, lessrank.row) self.assertEqual(h.column, lessrank.column)", "U_1, h_1) lessrank = createMatrix(2, 3, [1, 0, 0, 0,", "6, 9]) self.assertEqual(sub2, b5.subMatrix([2, 3], [1, 3])) class SquareMatrixTest(unittest.TestCase): def", "getCommonSuperring self.assertRaises(TypeError, self.m2z.getCommonSuperring, Int) class SubspaceTest(unittest.TestCase): def testSupplementBasis(self): ba =", "0, 0, 2, 3]+[0, 0, 0, 1, 0]+[0, 0, 0,", "def testGetitem(self): self.assertEqual(2, a1[1, 2]) self.assertEqual(-2, b2[2, 2]) self.assertRaises(IndexError, a1.__getitem__,", "testReduce(self): self.assertEqual(-2, a3.reduce(min)) def testGetRow(self): row1 = vector.Vector([3, -2]) self.assertEqual(row1,", "1, 0, 5, 6, 1]) b8 = Matrix(3, 3, [3,", "(-12, -1, 0)]) self.assertEqual(a4, lst_tuple) lst_vect = Matrix(3, 2, [vector.Vector([21,", "mat4 = createMatrix(2, [vector.Vector([1, 4]), vector.Vector([6, 8])]) self.assertEqual(mat4.coeff_ring, Int) mat5", "U.isUpperTriangularMatrix() class MatrixRingTest (unittest.TestCase): def setUp(self): self.m2z = MatrixRing.getInstance(2, Int)", "vector.Vector([9, 19]) self.assertEqual(mul, b1 * v1) def testMod(self): mod1 =", "self.assertTrue(mat7.row == mat7.column) self.assertTrue(mat7 == 0) self.assertEqual(mat7.coeff_ring, Q) mat8 =", "def setUp(self): self.m2z = MatrixRing.getInstance(2, Int) def testZero(self): z =", "3, 0]+[8, -2, 10]) self.assertEqual(trans, a3.transpose()) def testGetBlock(self): block =", "15, 22]) self.assertEqual(pow1, b1 ** 2) pow2 = createMatrix(2, 2,", "uniutil Ra = rational.Rational Poly = uniutil.polynomial Int = rational.theIntegerRing", "createMatrix(2, 2, [0, 2, -2, 0]) assert alternate1.isAlternatingMatrix() alternate2 =", "= createMatrix(2, 3, [1, 0, 0, 0, 1, 0]) h", "[1, 2, 3, 4, 5, 7]) supbase = createMatrix(3, 3,", "* U_1, h_1) lessrank = createMatrix(2, 3, [1, 0, 0,", "86)]+\\ [Ra(7, 43), Ra(6, 43), Ra(-1, 43)]+[Ra(35, 86), Ra(-13, 86),", "-12, 2, -35, 13, 5]) self.assertEqual(adjugate, b4.adjugateMatrix()) assert d1 *", "self.assertEqual([9, 3, 1], b8.smithNormalForm()) def testExtSmithNormalForm(self): smith1 = Matrix(3, 3,", "3), Ra(3, 5)]) ## other objects v1 = vector.Vector([1, 4])", "3)) def testSubMatrix(self): sub1 = createMatrix(2, 1, [-12, 0]) self.assertEqual(sub1,", "0]+[0, 1]+[0, 0]) unit2 = unitMatrix(3) unit2.toSubspace() intersect = Subspace(3,", "[1, 2, 0, 3, 4, 0, 5, 7, 1]) self.assertEqual(supbase,", "unit2 = Subspace(3, 2, [0, 0]+[1, 0]+[0, 1]) self.assertEqual(unitMatrix(3), unit1.sumOfSubspaces(unit2))", "b2 = Matrix(2, 2, [0, -1]+[1, -2]) b3 = createMatrix(3,", "import nzmath.vector as vector import nzmath.rational as rational import nzmath.poly.uniutil", "-1, 2]) self.assertEqual(neg, -b2) def testHermiteNormalForm(self): already = createMatrix(4, 3,", "nzmath.test.testMatrixFiniteField import * except: from .testMatrixFiniteField import * ## for", "1), Ra(-3, 2)]) self.assertEqual(M2, M1) def testHessenbergForm(self): pass def testLUDecomposition(self):", "self.assertEqual(0, z[2, 1]) self.assertEqual(0, z[2, 2]) def testOne(self): o =", "class SquareMatrixTest(unittest.TestCase): def testIsUpperTriangularMatrix(self): UT = createMatrix(4, 4, \\ [1,", "b1 = createMatrix(2, 2, [1, 2]+[3, 4]) b2 = Matrix(2,", "d1 * d1.adjugateMatrix() == d1.determinant() * unitMatrix(d1.row) def testCofactorMatrix(self): cofact", "4)]) self.assertEqual(pow3, d1 ** (-2)) def testTriangulate(self): triangle = createMatrix(3,", "8, 9]) b6 = createMatrix(3, 3, [1, 2, 4, 0,", "self.assertEqual(0, o[2, 1]) self.assertEqual(1, o[2, 2]) def testUnitMatrix(self): \"\"\" unitMatrix()", "[-3, 0, 0, 5]) assert diag.isDiagonalMatrix() def testIsScalarMatrix(self): scaler =", "c3 = createMatrix(3, 2, [Ra(1), 2]+[2, 5]+[6, 7]) ## for", "0]+[-1, 2, -1, 0]+[0, -1, 2, -1]+[0, 0, -1, 2])", "2, [Ra(1), 2]+[2, 5]+[6, 7]) ## for FieldSquareMatrix d1 =", "2, [Ra(1), Ra(2)]+[Ra(3), Ra(4)]) d2 = createMatrix(3, 3, [Ra(1), 2,", "-6]) a3 = createMatrix(3, 2, [7, 8]+[3, -2]+[0, 10]) a4", "1, 1]+[90, 7, 54, 8, 4, 6]+\\ [7, 5, 0,", "self.assertEqual([12, 1, 1], b5.smithNormalForm()) self.assertRaises(ValueError, b6.smithNormalForm) self.assertEqual([1, 1, 1], b7.smithNormalForm())", "4]) self.assertEqual(call, a5(2)) def testMap(self): pow_two = createMatrix(1, 2, [9,", "= square.hermiteNormalForm() self.assertEqual(h.row, square.row) self.assertEqual(h.column, square.column) hermite = createMatrix(3, 3,", "= createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0]) self.assertEqual(img, c2.image()) def testRank(self): self.assertEqual(3, c2.rank()) self.assertEqual(3, d3.rank())", "class RingSquareMatrixTest(unittest.TestCase): def testPow(self): pow1 = createMatrix(2, 2, [7, 10,", "5, 0, 2, 1]+[5, 1, 2, 5, 1, 1]+[90, 7,", "V_1) smith2 = Matrix(3, 3, [9, 0, 0, 0, 3,", "d2.inverse) self.assertEqual(d3.inverse() * c3, d3.inverse(c3)) def testInverseNoChange(self): # sf bug#1849220", "\\ [1, 2, 3, 4]+[0, 5, 6, 7]+[0, 0, 8,", "* ## for RingMatrix a1 = createMatrix(1, 2, [3, 2])", "= d4.LUDecomposition() assert L * U == d4 assert L.isLowerTriangularMatrix()", "3, 2, 6]) self.assertEqual(sub2, b1 - b2) def testMul(self): mul1", "= createMatrix(1, 2, [1, Ra(2, 3)]) self.assertEqual(div, c1 / 3)", "## for RingSquareMatrix b1 = createMatrix(2, 2, [1, 2]+[3, 4])", "3]) self.assertEqual(col2, b1.getColumn(1)) def testTranspose(self): trans = createMatrix(2, 3, [7,", "10]) self.assertEqual(trans, a3.transpose()) def testGetBlock(self): block = Matrix(2, 3, [4,", "3, [1, 2, 0, 3, 4, 0, 5, 7, 1])", "an alias of one. \"\"\" self.assertEqual(self.m2z.one, self.m2z.unitMatrix()) def testRingAPI(self): m3z", "self.assertEqual(pow2, b2 ** 0) def testIsOrthogonalMatrix(self): orthogonal = createMatrix(2, 2,", "= createMatrix(1, 2, [Ra(3), Ra(2)]) c2 = createMatrix(4, 5, \\", "return suite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite())", "-2, 10]) self.assertEqual(trans, a3.transpose()) def testGetBlock(self): block = Matrix(2, 3,", "self.assertEqual(-6, b5.cofactor(1, 2)) def testCommutator(self): commutator = createMatrix(2, 2, [5,", "def testCofactor(self): self.assertEqual(-6, b5.cofactor(1, 2)) def testCommutator(self): commutator = createMatrix(2,", "def testMap(self): pow_two = createMatrix(1, 2, [9, 4]) self.assertEqual(pow_two, a1.map(lambda", "testMul(self): mul1 = createMatrix(1, 2, [2, -7]) self.assertEqual(mul1, a1 *", "b5.getBlock(2, 1, 2, 3)) def testSubMatrix(self): sub1 = createMatrix(2, 1,", "-1]+[0, 0, 5, 12, -2]+[0, 0, 1, 3, -1]+[0, 0,", "import nzmath.rational as rational import nzmath.poly.uniutil as uniutil Ra =", "vector.Vector([0, 0]) self.assertEqual(zerovec, h.getColumn(1)) square = createMatrix(3, 3, [1, 0,", "self.assertEqual(hermite, h) def testExtHermiteNormalForm(self): already = createMatrix(4, 3, [1, 0,", "1], b7.smithNormalForm()) self.assertEqual([9, 3, 1], b8.smithNormalForm()) def testExtSmithNormalForm(self): smith1 =", "self.assertEqual(sub2, b5.subMatrix([2, 3], [1, 3])) class SquareMatrixTest(unittest.TestCase): def testIsUpperTriangularMatrix(self): UT", "= createMatrix(2, 2, [1, 2]+[3, 4]) b2 = Matrix(2, 2,", "= createMatrix(6, 6, \\ [Ra(4), 2, 5, 0, 2, 1]+[5,", "testTrace(self): self.assertEqual(15, b4.trace()) def testDeterminant(self): self.assertEqual(-2, b1.determinant()) #sf.bug #1914349 self.assertTrue(isinstance(b3.determinant(),", "2, [7, 8]+[3, -2]+[0, 10]) a4 = Matrix(3, 2, [21,", "def testSub(self): sub1 = createMatrix(1, 2, [-2, 8]) self.assertEqual(sub1, a1", "createMatrix(2, 2, [7, 10, 15, 22]) self.assertEqual(pow1, b1 ** 2)", "86)]) self.assertEqual(cinverse, d3.inverse()) self.assertRaises(NoInverse, d2.inverse) self.assertEqual(d3.inverse() * c3, d3.inverse(c3)) def", "testIsSingular(self): assert b6.isSingular() def testTrace(self): self.assertEqual(15, b4.trace()) def testDeterminant(self): self.assertEqual(-2,", "testSub(self): sub1 = createMatrix(1, 2, [-2, 8]) self.assertEqual(sub1, a1 -", "Ra(2), Ra(3)]+[Ra(0), Ra(5), Ra(-2)]+[7, 1, 9]) d4 = createMatrix(6, 6,", "= createMatrix(2, 2, [0, 1, -1, 2]) self.assertEqual(neg, -b2) def", "a1 * b2) mul2 = createMatrix(3, 2, [-15, -6]+[-2, -2]+[0,", "createMatrix(2, 3, [1, 0, 0, 0, 1, 0]) U_2, h_2", "ker_2 = b1.kernelAsModule() self.assertEqual(ker_2, None) class RingSquareMatrixTest(unittest.TestCase): def testPow(self): pow1", "2]) self.assertEqual(0, z[2, 1]) self.assertEqual(0, z[2, 2]) def testOne(self): o", "1]) assert UT.isUpperTriangularMatrix() assert not notUT.isUpperTriangularMatrix() def testIsLowerTriangularMatrix(self): LT =", "= createMatrix(2, 2, [1, 0, 0, 1]) self.assertEqual(pow2, b2 **", "c3.solve, v3) def testColumnEchelonForm(self): echelon = createMatrix(4, 5,\\ [Ra(0), 0,", "= lessrank.hermiteNormalForm() self.assertEqual(h.row, lessrank.row) self.assertEqual(h.column, lessrank.column) zerovec = vector.Vector([0, 0])", "1, 0]), vector.Vector([-12, -1, 0])]) self.assertEqual(a4, lst_vect) def testGetitem(self): self.assertEqual(2,", "createMatrix(1, 2, [3, 2]) a2 = Matrix(1, 2, [5, -6])", "2)]) self.assertEqual(M2, M1) def testHessenbergForm(self): pass def testLUDecomposition(self): L, U", "lessrank.row) self.assertEqual(h.column, lessrank.column) zerovec = vector.Vector([0, 0]) self.assertEqual(zerovec, h.getColumn(1)) square", "self.assertTrue(mat6 == 0) mat7 = createMatrix(3, Q) self.assertTrue(mat7.row == mat7.column)", "def testIsSymmetricMatrix(self): symmetric = createMatrix(2, 2, [2, 3, 3, 5])", "testRingAPI(self): m3z = MatrixRing.getInstance(3, Int) m2q = MatrixRing.getInstance(2, rational.theRationalField) #", "self.assertTrue(5 in a2) def testCall(self): call = createMatrix(1, 2, [13,", "RingSquareMatrix b1 = createMatrix(2, 2, [1, 2]+[3, 4]) b2 =", "b1 ** 2) pow2 = createMatrix(2, 2, [1, 0, 0,", "self.assertTrue(not c2 * ker) def testImage(self): img = createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0]) self.assertEqual(img,", "alternate1.isAlternatingMatrix() alternate2 = createMatrix(2, [1, 2, -2, 0]) assert not", "0 ,0, 1, 0, 0, 1]) self.assertEqual(hermite, h) def testExtHermiteNormalForm(self):", "1, 0]) U_2, h_2 = lessrank.exthermiteNormalForm() self.assertEqual(h_2.row, lessrank.row) self.assertEqual(h_2.column, lessrank.column)", "3, -1]+[0, 0, 1, 2, 0]) c3 = createMatrix(3, 2,", "2, [-1, 0]+[0, -1]+[0, 0]) self.assertEqual(intersect, unit1.intersectionOfSubspaces(unit2)) class FunctionTest(unittest.TestCase): def", "self.assertTrue(mat8 == 0) def suite(suffix=\"Test\"): suite = unittest.TestSuite() all_names =", "vector import nzmath.rational as rational import nzmath.poly.uniutil as uniutil Ra", "c2.image()) def testRank(self): self.assertEqual(3, c2.rank()) self.assertEqual(3, d3.rank()) def testInverseImage(self): self.assertEqual(d6,", "Int)]) self.assertEqual(charMat, b1.characteristicMatrix()) def testCharacteristicPolynomial(self): assert d1.characteristicPolynomial() == d1.characteristicMatrix().determinant() def", "0, 0, 0, 1, 0, 0, 0, 1]) U_1, V_1,", "[Ra(1, 2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)]) self.assertEqual(M2, M1)", "9)], Q) self.assertTrue(mat3.row == mat3.column) self.assertTrue(mat3.__class__, FieldSquareMatrix) mat4 = createMatrix(2,", "2, 0]) c3 = createMatrix(3, 2, [Ra(1), 2]+[2, 5]+[6, 7])", "-2]+[0, 0, Ra(-86, 5)]) self.assertEqual(triangle, d3.triangulate()) def testDeterminant(self): self.assertEqual(Ra(-7, 15),", "None) class RingSquareMatrixTest(unittest.TestCase): def testPow(self): pow1 = createMatrix(2, 2, [7,", "createMatrix(4, 3, [1, 0, 0, 0, 1, 0, 0, 0,", "self.assertEqual(d5.cofactor(2, 3), cofact[2, 3]) def testSmithNormalForm(self): self.assertEqual([12, 1, 1], b5.smithNormalForm())", "self.assertEqual(Ra(-7, 15), d7.determinant()) def testInverse(self): cinverse = createMatrix(3, 3) cinverse.set([Ra(-47,", "1]) self.assertEqual(1, o[2, 2]) def testUnitMatrix(self): \"\"\" unitMatrix() is an", "= createMatrix(7) self.assertTrue(mat8 == 0) def suite(suffix=\"Test\"): suite = unittest.TestSuite()", "class FieldSquareMatrixTest(unittest.TestCase): def testPow(self): pow3 = createMatrix(2, 2, [Ra(11, 2),", "d2.inverseImage, unitMatrix(3)) def testSolve(self): for i in range(1, d6.column+1): self.assertEqual(d6[i],", "1]) U_1, h_1 = already.exthermiteNormalForm() self.assertEqual(h_1, already) self.assertEqual(already * U_1,", "def testSolve(self): for i in range(1, d6.column+1): self.assertEqual(d6[i], d5 *", "mat5 = createMatrix(5, 6, Int) self.assertTrue(mat5 == 0) mat6 =", "createMatrix(4, 5, \\ [Ra(0), 0, 1, 2, -1]+[0, 0, 5,", "[3, 2])) self.assertTrue(isinstance(a1 == a1, bool)) def testNonZero(self): self.assertTrue(not zeroMatrix(2,", "2, 6]) self.assertEqual(sub2, b1 - b2) def testMul(self): mul1 =", "testGetRow(self): row1 = vector.Vector([3, -2]) self.assertEqual(row1, a3.getRow(2)) row2 = vector.Vector([1,", "2), Ra(-15, 4), Ra(7, 4)]) self.assertEqual(pow3, d1 ** (-2)) def", "self.assertEqual(M_1, U_1 * b5 * V_1) smith2 = Matrix(3, 3,", "2, 3)) def testSubMatrix(self): sub1 = createMatrix(2, 1, [-12, 0])", "self.assertTrue(mat7 == 0) self.assertEqual(mat7.coeff_ring, Q) mat8 = createMatrix(7) self.assertTrue(mat8 ==", "testOne(self): o = self.m2z.one self.assertEqual(1, o[1, 1]) self.assertEqual(0, o[1, 2])", "self.assertEqual(div, c1 / 3) def testKernel(self): ker = c2.kernel() self.assertTrue(not", "all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return suite if __name__ ==", "testCreateMatrix(self): Q = rational.theRationalField mat1 = createMatrix(2, 3, [[2,3,4], [5,6,7]])", "5, 6, 1]) b8 = Matrix(3, 3, [3, 15, 12]+[2,7,5]+[1,-4,-2])", "#zero test ker_2 = b1.kernelAsModule() self.assertEqual(ker_2, None) class RingSquareMatrixTest(unittest.TestCase): def", "b2) def testSub(self): sub1 = createMatrix(1, 2, [-2, 8]) self.assertEqual(sub1,", "unitMatrix(3)) def testSolve(self): for i in range(1, d6.column+1): self.assertEqual(d6[i], d5", "= createMatrix(2, 1, [-12, 0]) self.assertEqual(sub1, a4.subMatrix(2, 1)) sub2 =", "h.getColumn(1)) square = createMatrix(3, 3, [1, 0, 0, 0, 1,", "1), 2, 3]+[0, 5, -2]+[0, 0, Ra(-86, 5)]) self.assertEqual(triangle, d3.triangulate())", "-1, 2]) d6 = createMatrix(4, 4, \\ [Ra(1), 2, 3," ]
[ "teardown' # Function Level def func_1_setup(): print 'test_func_1 setup' def", "'test_func_1_teardown' # Target Func def test_func_1(): print 'test_func_1 run' assert", "test_func_1(): print 'test_func_1 run' assert True test_func_1.setUp = func_1_setup test_func_1.tearDown", "print 'test setup' def tearDown(): print 'test teardown' # Function", "print 'test_func_1 setup' def func_1_teardown(): print 'test_func_1_teardown' # Target Func", "setup' def tearDown(): print 'test teardown' # Function Level def", "'test_func_1 setup' def func_1_teardown(): print 'test_func_1_teardown' # Target Func def", "setup' def func_1_teardown(): print 'test_func_1_teardown' # Target Func def test_func_1():", "Target Func def test_func_1(): print 'test_func_1 run' assert True test_func_1.setUp", "# Module Level def setUp(): print 'test setup' def tearDown():", "Function Level def func_1_setup(): print 'test_func_1 setup' def func_1_teardown(): print", "setUp(): print 'test setup' def tearDown(): print 'test teardown' #", "def func_1_setup(): print 'test_func_1 setup' def func_1_teardown(): print 'test_func_1_teardown' #", "def test_func_1(): print 'test_func_1 run' assert True test_func_1.setUp = func_1_setup", "# Function Level def func_1_setup(): print 'test_func_1 setup' def func_1_teardown():", "'test teardown' # Function Level def func_1_setup(): print 'test_func_1 setup'", "# Target Func def test_func_1(): print 'test_func_1 run' assert True", "Level def setUp(): print 'test setup' def tearDown(): print 'test", "func_1_teardown(): print 'test_func_1_teardown' # Target Func def test_func_1(): print 'test_func_1", "<gh_stars>0 # Module Level def setUp(): print 'test setup' def", "def tearDown(): print 'test teardown' # Function Level def func_1_setup():", "def setUp(): print 'test setup' def tearDown(): print 'test teardown'", "def func_1_teardown(): print 'test_func_1_teardown' # Target Func def test_func_1(): print", "'test_func_1 run' assert True test_func_1.setUp = func_1_setup test_func_1.tearDown = func_1_teardown", "Func def test_func_1(): print 'test_func_1 run' assert True test_func_1.setUp =", "'test setup' def tearDown(): print 'test teardown' # Function Level", "print 'test_func_1 run' assert True test_func_1.setUp = func_1_setup test_func_1.tearDown =", "Level def func_1_setup(): print 'test_func_1 setup' def func_1_teardown(): print 'test_func_1_teardown'", "func_1_setup(): print 'test_func_1 setup' def func_1_teardown(): print 'test_func_1_teardown' # Target", "print 'test teardown' # Function Level def func_1_setup(): print 'test_func_1", "Module Level def setUp(): print 'test setup' def tearDown(): print", "print 'test_func_1_teardown' # Target Func def test_func_1(): print 'test_func_1 run'", "tearDown(): print 'test teardown' # Function Level def func_1_setup(): print" ]
[ "def run(self): docs = [] url = \"%s/default/bucket/users..bucket\" % self.bucketd_host", "ThreadPoolExecutor import argparse def get_options(): parser = argparse.ArgumentParser() parser.add_argument(\"-i\", \"--sentinel-ip\",", "self._password = password r = redis.Redis(host=ip, port=port, db=0, password=password) self._ip,", "default=\"16379\", help=\"Sentinel Port\") parser.add_argument(\"-v\", \"--redis-password\", default=None, help=\"Redis AUTH Password\") parser.add_argument(\"-n\",", "userid, bucket in listbuckets: U = askRedis(**redis_conf) data = U.read('buckets',", "host def run(self): docs = [] url = \"%s/default/bucket/users..bucket\" %", "askRedis(**redis_conf) data = U.read('buckets', bucket) content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" %", "'mpuShadowBucket'+bucket, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content) executor.submit(safe_print, \"\") for userid in", "server\") return parser.parse_args() def safe_print(content): print(\"{0}\".format(content)) class askRedis(): def __init__(self,", "= dict( ip=options.sentinel_ip, port=options.sentinel_port, sentinel_cluster_name=options.sentinel_cluster_name, password=options.redis_password ) P = S3ListBuckets(options.bucketd_addr)", "P = S3ListBuckets(options.bucketd_addr) listbuckets = P.run() userids = set([x for", "threading import Thread from concurrent.futures import ThreadPoolExecutor import argparse def", "import requests import redis import json import ast import sys", "name) files = r.get(res) try: return {'files': int(files), \"total_size\": int(total_size)}", "= keys[\"key\"] r1 = re.match(\"(\\w+)..\\|..(\\w+.*)\", key) docs.append(r1.groups()) return docs return(self.userid,", "self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name) def read(self, resource, name): r =", "docs.append(r1.groups()) return docs return(self.userid, self.bucket, user, files, total_size) if __name__", "in listbuckets]) executor = ThreadPoolExecutor(max_workers=1) for userid, bucket in listbuckets:", "data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content) executor.submit(safe_print, \"\") for userid in sorted(userids):", "U.read('buckets', 'mpuShadowBucket'+bucket) content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid, 'mpuShadowBucket'+bucket,", "content = \"Account:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print,", "time import urllib import re import sys from threading import", "sys from threading import Thread from concurrent.futures import ThreadPoolExecutor import", "set([x for x, y in listbuckets]) executor = ThreadPoolExecutor(max_workers=1) for", "executor.submit(safe_print, content) executor.submit(safe_print, \"\") for userid in sorted(userids): U =", "listbuckets = P.run() userids = set([x for x, y in", "\"--sentinel-ip\", default='127.0.0.1', help=\"Sentinel IP\") parser.add_argument(\"-p\", \"--sentinel-port\", default=\"16379\", help=\"Sentinel Port\") parser.add_argument(\"-v\",", "default='http://127.0.0.1:9000', help=\"URL of the bucketd server\") return parser.parse_args() def safe_print(content):", "return {'files': int(files), \"total_size\": int(total_size)} except Exception as e: return", "content) executor.submit(safe_print, \"\") for userid in sorted(userids): U = askRedis(**redis_conf)", "= r.sentinel_get_master_addr_by_name(sentinel_cluster_name) def read(self, resource, name): r = redis.Redis(host=self._ip, port=self._port,", "== 200: payload = json.loads(r.text) for keys in payload['Contents']: key", "parser = argparse.ArgumentParser() parser.add_argument(\"-i\", \"--sentinel-ip\", default='127.0.0.1', help=\"Sentinel IP\") parser.add_argument(\"-p\", \"--sentinel-port\",", "payload = json.loads(r.text) for keys in payload['Contents']: key = keys[\"key\"]", "resource, name): r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password) res =", "files = r.get(res) try: return {'files': int(files), \"total_size\": int(total_size)} except", "= argparse.ArgumentParser() parser.add_argument(\"-i\", \"--sentinel-ip\", default='127.0.0.1', help=\"Sentinel IP\") parser.add_argument(\"-p\", \"--sentinel-port\", default=\"16379\",", "r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password) res = 's3:%s:%s:storageUtilized:counter' %", "import json import ast import sys import time import urllib", "\"--redis-password\", default=None, help=\"Redis AUTH Password\") parser.add_argument(\"-n\", \"--sentinel-cluster-name\", default='scality-s3', help=\"Redis cluster", "data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content) data = U.read('buckets', 'mpuShadowBucket'+bucket) content =", "content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid, 'mpuShadowBucket'+bucket, data[\"files\"], data[\"total_size\"])", "{'files': 0, \"total_size\": 0} class S3ListBuckets(): def __init__(self, host='127.0.0.1:9000'): self.bucketd_host", "db=0, password=self._password) res = 's3:%s:%s:storageUtilized:counter' % (resource, name) total_size =", "redis.Redis(host=ip, port=port, db=0, password=password) self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name) def read(self,", "import urllib import re import sys from threading import Thread", "r.get(res) try: return {'files': int(files), \"total_size\": int(total_size)} except Exception as", "port=options.sentinel_port, sentinel_cluster_name=options.sentinel_cluster_name, password=options.redis_password ) P = S3ListBuckets(options.bucketd_addr) listbuckets = P.run()", "bucket) content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid, bucket, data[\"files\"],", "{'files': int(files), \"total_size\": int(total_size)} except Exception as e: return {'files':", "password=self._password) res = 's3:%s:%s:storageUtilized:counter' % (resource, name) total_size = r.get(res)", "Exception as e: return {'files': 0, \"total_size\": 0} class S3ListBuckets():", "\"\") for userid in sorted(userids): U = askRedis(**redis_conf) data =", "= session.get(url, timeout=30) if r.status_code == 200: payload = json.loads(r.text)", "r1 = re.match(\"(\\w+)..\\|..(\\w+.*)\", key) docs.append(r1.groups()) return docs return(self.userid, self.bucket, user,", "self.bucket, user, files, total_size) if __name__ == '__main__': options =", "for x, y in listbuckets]) executor = ThreadPoolExecutor(max_workers=1) for userid,", "'s3:%s:%s:storageUtilized:counter' % (resource, name) total_size = r.get(res) res = 's3:%s:%s:numberOfObjects:counter'", "except Exception as e: return {'files': 0, \"total_size\": 0} class", "0} class S3ListBuckets(): def __init__(self, host='127.0.0.1:9000'): self.bucketd_host = host def", "IP\") parser.add_argument(\"-p\", \"--sentinel-port\", default=\"16379\", help=\"Sentinel Port\") parser.add_argument(\"-v\", \"--redis-password\", default=None, help=\"Redis", "help=\"URL of the bucketd server\") return parser.parse_args() def safe_print(content): print(\"{0}\".format(content))", "askRedis(): def __init__(self, ip=\"127.0.0.1\", port=\"16379\", sentinel_cluster_name=\"scality-s3\", password=<PASSWORD>): self._password = password", "def get_options(): parser = argparse.ArgumentParser() parser.add_argument(\"-i\", \"--sentinel-ip\", default='127.0.0.1', help=\"Sentinel IP\")", "dict( ip=options.sentinel_ip, port=options.sentinel_port, sentinel_cluster_name=options.sentinel_cluster_name, password=options.redis_password ) P = S3ListBuckets(options.bucketd_addr) listbuckets", "urllib import re import sys from threading import Thread from", "int(files), \"total_size\": int(total_size)} except Exception as e: return {'files': 0,", "name): r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password) res = 's3:%s:%s:storageUtilized:counter'", "sentinel_cluster_name=\"scality-s3\", password=<PASSWORD>): self._password = password r = redis.Redis(host=ip, port=port, db=0,", "= requests.Session() r = session.get(url, timeout=30) if r.status_code == 200:", "return parser.parse_args() def safe_print(content): print(\"{0}\".format(content)) class askRedis(): def __init__(self, ip=\"127.0.0.1\",", "session.get(url, timeout=30) if r.status_code == 200: payload = json.loads(r.text) for", "parser.add_argument(\"-i\", \"--sentinel-ip\", default='127.0.0.1', help=\"Sentinel IP\") parser.add_argument(\"-p\", \"--sentinel-port\", default=\"16379\", help=\"Sentinel Port\")", "= ThreadPoolExecutor(max_workers=1) for userid, bucket in listbuckets: U = askRedis(**redis_conf)", "argparse.ArgumentParser() parser.add_argument(\"-i\", \"--sentinel-ip\", default='127.0.0.1', help=\"Sentinel IP\") parser.add_argument(\"-p\", \"--sentinel-port\", default=\"16379\", help=\"Sentinel", "self.bucketd_host session = requests.Session() r = session.get(url, timeout=30) if r.status_code", "files, total_size) if __name__ == '__main__': options = get_options() redis_conf", "U = askRedis(**redis_conf) data = U.read('accounts', userid) content = \"Account:%s|NumberOFfiles:%s|StorageCapacity:%s", "session = requests.Session() r = session.get(url, timeout=30) if r.status_code ==", "= \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid, bucket, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print,", "in payload['Contents']: key = keys[\"key\"] r1 = re.match(\"(\\w+)..\\|..(\\w+.*)\", key) docs.append(r1.groups())", "= get_options() redis_conf = dict( ip=options.sentinel_ip, port=options.sentinel_port, sentinel_cluster_name=options.sentinel_cluster_name, password=options.redis_password )", "bucket, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content) data = U.read('buckets', 'mpuShadowBucket'+bucket) content", "Port\") parser.add_argument(\"-v\", \"--redis-password\", default=None, help=\"Redis AUTH Password\") parser.add_argument(\"-n\", \"--sentinel-cluster-name\", default='scality-s3',", "default='scality-s3', help=\"Redis cluster name\") parser.add_argument(\"-b\", \"--bucketd-addr\", default='http://127.0.0.1:9000', help=\"URL of the", "cluster name\") parser.add_argument(\"-b\", \"--bucketd-addr\", default='http://127.0.0.1:9000', help=\"URL of the bucketd server\")", "get_options() redis_conf = dict( ip=options.sentinel_ip, port=options.sentinel_port, sentinel_cluster_name=options.sentinel_cluster_name, password=options.redis_password ) P", "concurrent.futures import ThreadPoolExecutor import argparse def get_options(): parser = argparse.ArgumentParser()", "parser.add_argument(\"-n\", \"--sentinel-cluster-name\", default='scality-s3', help=\"Redis cluster name\") parser.add_argument(\"-b\", \"--bucketd-addr\", default='http://127.0.0.1:9000', help=\"URL", "AUTH Password\") parser.add_argument(\"-n\", \"--sentinel-cluster-name\", default='scality-s3', help=\"Redis cluster name\") parser.add_argument(\"-b\", \"--bucketd-addr\",", "redis_conf = dict( ip=options.sentinel_ip, port=options.sentinel_port, sentinel_cluster_name=options.sentinel_cluster_name, password=options.redis_password ) P =", "\"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid, bucket, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content)", "res = 's3:%s:%s:storageUtilized:counter' % (resource, name) total_size = r.get(res) res", "r = session.get(url, timeout=30) if r.status_code == 200: payload =", "import ThreadPoolExecutor import argparse def get_options(): parser = argparse.ArgumentParser() parser.add_argument(\"-i\",", "= json.loads(r.text) for keys in payload['Contents']: key = keys[\"key\"] r1", "password=<PASSWORD>): self._password = password r = redis.Redis(host=ip, port=port, db=0, password=password)", "import time import urllib import re import sys from threading", "def safe_print(content): print(\"{0}\".format(content)) class askRedis(): def __init__(self, ip=\"127.0.0.1\", port=\"16379\", sentinel_cluster_name=\"scality-s3\",", "= r.get(res) try: return {'files': int(files), \"total_size\": int(total_size)} except Exception", "payload['Contents']: key = keys[\"key\"] r1 = re.match(\"(\\w+)..\\|..(\\w+.*)\", key) docs.append(r1.groups()) return", "(resource, name) files = r.get(res) try: return {'files': int(files), \"total_size\":", "bucket in listbuckets: U = askRedis(**redis_conf) data = U.read('buckets', bucket)", "content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid, bucket, data[\"files\"], data[\"total_size\"])", "( userid, bucket, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content) data = U.read('buckets',", "self.bucketd_host = host def run(self): docs = [] url =", "= [] url = \"%s/default/bucket/users..bucket\" % self.bucketd_host session = requests.Session()", "data[\"total_size\"]) executor.submit(safe_print, content) data = U.read('buckets', 'mpuShadowBucket'+bucket) content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s", "try: return {'files': int(files), \"total_size\": int(total_size)} except Exception as e:", "ip=options.sentinel_ip, port=options.sentinel_port, sentinel_cluster_name=options.sentinel_cluster_name, password=options.redis_password ) P = S3ListBuckets(options.bucketd_addr) listbuckets =", "P.run() userids = set([x for x, y in listbuckets]) executor", "parser.add_argument(\"-b\", \"--bucketd-addr\", default='http://127.0.0.1:9000', help=\"URL of the bucketd server\") return parser.parse_args()", "password r = redis.Redis(host=ip, port=port, db=0, password=password) self._ip, self._port =", "= re.match(\"(\\w+)..\\|..(\\w+.*)\", key) docs.append(r1.groups()) return docs return(self.userid, self.bucket, user, files,", "as e: return {'files': 0, \"total_size\": 0} class S3ListBuckets(): def", "= host def run(self): docs = [] url = \"%s/default/bucket/users..bucket\"", "\"--bucketd-addr\", default='http://127.0.0.1:9000', help=\"URL of the bucketd server\") return parser.parse_args() def", "import sys from threading import Thread from concurrent.futures import ThreadPoolExecutor", "argparse def get_options(): parser = argparse.ArgumentParser() parser.add_argument(\"-i\", \"--sentinel-ip\", default='127.0.0.1', help=\"Sentinel", "re import sys from threading import Thread from concurrent.futures import", "return {'files': 0, \"total_size\": 0} class S3ListBuckets(): def __init__(self, host='127.0.0.1:9000'):", "= S3ListBuckets(options.bucketd_addr) listbuckets = P.run() userids = set([x for x,", "userids = set([x for x, y in listbuckets]) executor =", "\" % ( userid, 'mpuShadowBucket'+bucket, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content) executor.submit(safe_print,", "= 's3:%s:%s:storageUtilized:counter' % (resource, name) total_size = r.get(res) res =", "class S3ListBuckets(): def __init__(self, host='127.0.0.1:9000'): self.bucketd_host = host def run(self):", "userid in sorted(userids): U = askRedis(**redis_conf) data = U.read('accounts', userid)", "key) docs.append(r1.groups()) return docs return(self.userid, self.bucket, user, files, total_size) if", "parser.add_argument(\"-v\", \"--redis-password\", default=None, help=\"Redis AUTH Password\") parser.add_argument(\"-n\", \"--sentinel-cluster-name\", default='scality-s3', help=\"Redis", "\"total_size\": 0} class S3ListBuckets(): def __init__(self, host='127.0.0.1:9000'): self.bucketd_host = host", "in sorted(userids): U = askRedis(**redis_conf) data = U.read('accounts', userid) content", "import re import sys from threading import Thread from concurrent.futures", "from threading import Thread from concurrent.futures import ThreadPoolExecutor import argparse", "S3ListBuckets(): def __init__(self, host='127.0.0.1:9000'): self.bucketd_host = host def run(self): docs", "password=options.redis_password ) P = S3ListBuckets(options.bucketd_addr) listbuckets = P.run() userids =", "'mpuShadowBucket'+bucket) content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid, 'mpuShadowBucket'+bucket, data[\"files\"],", "import argparse def get_options(): parser = argparse.ArgumentParser() parser.add_argument(\"-i\", \"--sentinel-ip\", default='127.0.0.1',", "def read(self, resource, name): r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password)", "total_size) if __name__ == '__main__': options = get_options() redis_conf =", "int(total_size)} except Exception as e: return {'files': 0, \"total_size\": 0}", "res = 's3:%s:%s:numberOfObjects:counter' % (resource, name) files = r.get(res) try:", "% self.bucketd_host session = requests.Session() r = session.get(url, timeout=30) if", "= askRedis(**redis_conf) data = U.read('accounts', userid) content = \"Account:%s|NumberOFfiles:%s|StorageCapacity:%s \"", "requests import redis import json import ast import sys import", "help=\"Sentinel IP\") parser.add_argument(\"-p\", \"--sentinel-port\", default=\"16379\", help=\"Sentinel Port\") parser.add_argument(\"-v\", \"--redis-password\", default=None,", "in listbuckets: U = askRedis(**redis_conf) data = U.read('buckets', bucket) content", "= redis.Redis(host=self._ip, port=self._port, db=0, password=self._password) res = 's3:%s:%s:storageUtilized:counter' % (resource,", "get_options(): parser = argparse.ArgumentParser() parser.add_argument(\"-i\", \"--sentinel-ip\", default='127.0.0.1', help=\"Sentinel IP\") parser.add_argument(\"-p\",", "__init__(self, host='127.0.0.1:9000'): self.bucketd_host = host def run(self): docs = []", "ThreadPoolExecutor(max_workers=1) for userid, bucket in listbuckets: U = askRedis(**redis_conf) data", "port=\"16379\", sentinel_cluster_name=\"scality-s3\", password=<PASSWORD>): self._password = password r = redis.Redis(host=ip, port=port,", "\"--sentinel-port\", default=\"16379\", help=\"Sentinel Port\") parser.add_argument(\"-v\", \"--redis-password\", default=None, help=\"Redis AUTH Password\")", "json.loads(r.text) for keys in payload['Contents']: key = keys[\"key\"] r1 =", "ip=\"127.0.0.1\", port=\"16379\", sentinel_cluster_name=\"scality-s3\", password=<PASSWORD>): self._password = password r = redis.Redis(host=ip,", "== '__main__': options = get_options() redis_conf = dict( ip=options.sentinel_ip, port=options.sentinel_port,", "redis.Redis(host=self._ip, port=self._port, db=0, password=self._password) res = 's3:%s:%s:storageUtilized:counter' % (resource, name)", "r.sentinel_get_master_addr_by_name(sentinel_cluster_name) def read(self, resource, name): r = redis.Redis(host=self._ip, port=self._port, db=0,", "run(self): docs = [] url = \"%s/default/bucket/users..bucket\" % self.bucketd_host session", "Password\") parser.add_argument(\"-n\", \"--sentinel-cluster-name\", default='scality-s3', help=\"Redis cluster name\") parser.add_argument(\"-b\", \"--bucketd-addr\", default='http://127.0.0.1:9000',", "return(self.userid, self.bucket, user, files, total_size) if __name__ == '__main__': options", "timeout=30) if r.status_code == 200: payload = json.loads(r.text) for keys", "import ast import sys import time import urllib import re", "= password r = redis.Redis(host=ip, port=port, db=0, password=password) self._ip, self._port", "keys in payload['Contents']: key = keys[\"key\"] r1 = re.match(\"(\\w+)..\\|..(\\w+.*)\", key)", "safe_print(content): print(\"{0}\".format(content)) class askRedis(): def __init__(self, ip=\"127.0.0.1\", port=\"16379\", sentinel_cluster_name=\"scality-s3\", password=<PASSWORD>):", "0, \"total_size\": 0} class S3ListBuckets(): def __init__(self, host='127.0.0.1:9000'): self.bucketd_host =", "executor.submit(safe_print, \"\") for userid in sorted(userids): U = askRedis(**redis_conf) data", "json import ast import sys import time import urllib import", "total_size = r.get(res) res = 's3:%s:%s:numberOfObjects:counter' % (resource, name) files", "options = get_options() redis_conf = dict( ip=options.sentinel_ip, port=options.sentinel_port, sentinel_cluster_name=options.sentinel_cluster_name, password=options.redis_password", "U.read('buckets', bucket) content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid, bucket,", "\" % ( userid, bucket, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content) data", "( userid, 'mpuShadowBucket'+bucket, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content) executor.submit(safe_print, \"\") for", "r = redis.Redis(host=ip, port=port, db=0, password=password) self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name)", ") P = S3ListBuckets(options.bucketd_addr) listbuckets = P.run() userids = set([x", "for userid in sorted(userids): U = askRedis(**redis_conf) data = U.read('accounts',", "name\") parser.add_argument(\"-b\", \"--bucketd-addr\", default='http://127.0.0.1:9000', help=\"URL of the bucketd server\") return", "\"total_size\": int(total_size)} except Exception as e: return {'files': 0, \"total_size\":", "S3ListBuckets(options.bucketd_addr) listbuckets = P.run() userids = set([x for x, y", "e: return {'files': 0, \"total_size\": 0} class S3ListBuckets(): def __init__(self,", "help=\"Sentinel Port\") parser.add_argument(\"-v\", \"--redis-password\", default=None, help=\"Redis AUTH Password\") parser.add_argument(\"-n\", \"--sentinel-cluster-name\",", "host='127.0.0.1:9000'): self.bucketd_host = host def run(self): docs = [] url", "port=self._port, db=0, password=self._password) res = 's3:%s:%s:storageUtilized:counter' % (resource, name) total_size", "content) data = U.read('buckets', 'mpuShadowBucket'+bucket) content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" %", "% ( userid, bucket, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content) data =", "if __name__ == '__main__': options = get_options() redis_conf = dict(", "= askRedis(**redis_conf) data = U.read('buckets', bucket) content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \"", "\"--sentinel-cluster-name\", default='scality-s3', help=\"Redis cluster name\") parser.add_argument(\"-b\", \"--bucketd-addr\", default='http://127.0.0.1:9000', help=\"URL of", "def __init__(self, ip=\"127.0.0.1\", port=\"16379\", sentinel_cluster_name=\"scality-s3\", password=<PASSWORD>): self._password = password r", "% (resource, name) total_size = r.get(res) res = 's3:%s:%s:numberOfObjects:counter' %", "ast import sys import time import urllib import re import", "if r.status_code == 200: payload = json.loads(r.text) for keys in", "x, y in listbuckets]) executor = ThreadPoolExecutor(max_workers=1) for userid, bucket", "= 's3:%s:%s:numberOfObjects:counter' % (resource, name) files = r.get(res) try: return", "user, files, total_size) if __name__ == '__main__': options = get_options()", "default=None, help=\"Redis AUTH Password\") parser.add_argument(\"-n\", \"--sentinel-cluster-name\", default='scality-s3', help=\"Redis cluster name\")", "for keys in payload['Contents']: key = keys[\"key\"] r1 = re.match(\"(\\w+)..\\|..(\\w+.*)\",", "db=0, password=password) self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name) def read(self, resource, name):", "Thread from concurrent.futures import ThreadPoolExecutor import argparse def get_options(): parser", "port=port, db=0, password=password) self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name) def read(self, resource,", "y in listbuckets]) executor = ThreadPoolExecutor(max_workers=1) for userid, bucket in", "url = \"%s/default/bucket/users..bucket\" % self.bucketd_host session = requests.Session() r =", "data = U.read('buckets', 'mpuShadowBucket'+bucket) content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" % (", "executor = ThreadPoolExecutor(max_workers=1) for userid, bucket in listbuckets: U =", "sys import time import urllib import re import sys from", "__name__ == '__main__': options = get_options() redis_conf = dict( ip=options.sentinel_ip,", "= P.run() userids = set([x for x, y in listbuckets])", "for userid, bucket in listbuckets: U = askRedis(**redis_conf) data =", "= redis.Redis(host=ip, port=port, db=0, password=password) self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name) def", "default='127.0.0.1', help=\"Sentinel IP\") parser.add_argument(\"-p\", \"--sentinel-port\", default=\"16379\", help=\"Sentinel Port\") parser.add_argument(\"-v\", \"--redis-password\",", "userid, bucket, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content) data = U.read('buckets', 'mpuShadowBucket'+bucket)", "re.match(\"(\\w+)..\\|..(\\w+.*)\", key) docs.append(r1.groups()) return docs return(self.userid, self.bucket, user, files, total_size)", "= \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid, 'mpuShadowBucket'+bucket, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print,", "password=password) self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name) def read(self, resource, name): r", "= r.get(res) res = 's3:%s:%s:numberOfObjects:counter' % (resource, name) files =", "r.get(res) res = 's3:%s:%s:numberOfObjects:counter' % (resource, name) files = r.get(res)", "= \"%s/default/bucket/users..bucket\" % self.bucketd_host session = requests.Session() r = session.get(url,", "requests.Session() r = session.get(url, timeout=30) if r.status_code == 200: payload", "import Thread from concurrent.futures import ThreadPoolExecutor import argparse def get_options():", "listbuckets: U = askRedis(**redis_conf) data = U.read('buckets', bucket) content =", "askRedis(**redis_conf) data = U.read('accounts', userid) content = \"Account:%s|NumberOFfiles:%s|StorageCapacity:%s \" %", "executor.submit(safe_print, content) data = U.read('buckets', 'mpuShadowBucket'+bucket) content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \"", "class askRedis(): def __init__(self, ip=\"127.0.0.1\", port=\"16379\", sentinel_cluster_name=\"scality-s3\", password=<PASSWORD>): self._password =", "(resource, name) total_size = r.get(res) res = 's3:%s:%s:numberOfObjects:counter' % (resource,", "parser.add_argument(\"-p\", \"--sentinel-port\", default=\"16379\", help=\"Sentinel Port\") parser.add_argument(\"-v\", \"--redis-password\", default=None, help=\"Redis AUTH", "__init__(self, ip=\"127.0.0.1\", port=\"16379\", sentinel_cluster_name=\"scality-s3\", password=<PASSWORD>): self._password = password r =", "'__main__': options = get_options() redis_conf = dict( ip=options.sentinel_ip, port=options.sentinel_port, sentinel_cluster_name=options.sentinel_cluster_name,", "docs = [] url = \"%s/default/bucket/users..bucket\" % self.bucketd_host session =", "\"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid, 'mpuShadowBucket'+bucket, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content)", "200: payload = json.loads(r.text) for keys in payload['Contents']: key =", "return docs return(self.userid, self.bucket, user, files, total_size) if __name__ ==", "% ( userid, 'mpuShadowBucket'+bucket, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content) executor.submit(safe_print, \"\")", "import redis import json import ast import sys import time", "data = U.read('accounts', userid) content = \"Account:%s|NumberOFfiles:%s|StorageCapacity:%s \" % (", "key = keys[\"key\"] r1 = re.match(\"(\\w+)..\\|..(\\w+.*)\", key) docs.append(r1.groups()) return docs", "docs return(self.userid, self.bucket, user, files, total_size) if __name__ == '__main__':", "listbuckets]) executor = ThreadPoolExecutor(max_workers=1) for userid, bucket in listbuckets: U", "userid, 'mpuShadowBucket'+bucket, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content) executor.submit(safe_print, \"\") for userid", "'s3:%s:%s:numberOfObjects:counter' % (resource, name) files = r.get(res) try: return {'files':", "data[\"total_size\"]) executor.submit(safe_print, content) executor.submit(safe_print, \"\") for userid in sorted(userids): U", "def __init__(self, host='127.0.0.1:9000'): self.bucketd_host = host def run(self): docs =", "sorted(userids): U = askRedis(**redis_conf) data = U.read('accounts', userid) content =", "the bucketd server\") return parser.parse_args() def safe_print(content): print(\"{0}\".format(content)) class askRedis():", "name) total_size = r.get(res) res = 's3:%s:%s:numberOfObjects:counter' % (resource, name)", "self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name) def read(self, resource, name): r = redis.Redis(host=self._ip,", "keys[\"key\"] r1 = re.match(\"(\\w+)..\\|..(\\w+.*)\", key) docs.append(r1.groups()) return docs return(self.userid, self.bucket,", "= set([x for x, y in listbuckets]) executor = ThreadPoolExecutor(max_workers=1)", "userid) content = \"Account:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid, data[\"files\"], data[\"total_size\"])", "[] url = \"%s/default/bucket/users..bucket\" % self.bucketd_host session = requests.Session() r", "print(\"{0}\".format(content)) class askRedis(): def __init__(self, ip=\"127.0.0.1\", port=\"16379\", sentinel_cluster_name=\"scality-s3\", password=<PASSWORD>): self._password", "import sys import time import urllib import re import sys", "r.status_code == 200: payload = json.loads(r.text) for keys in payload['Contents']:", "bucketd server\") return parser.parse_args() def safe_print(content): print(\"{0}\".format(content)) class askRedis(): def", "from concurrent.futures import ThreadPoolExecutor import argparse def get_options(): parser =", "= \"Account:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid, data[\"files\"], data[\"total_size\"]) executor.submit(safe_print, content)", "= U.read('buckets', bucket) content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid,", "help=\"Redis AUTH Password\") parser.add_argument(\"-n\", \"--sentinel-cluster-name\", default='scality-s3', help=\"Redis cluster name\") parser.add_argument(\"-b\",", "= U.read('buckets', 'mpuShadowBucket'+bucket) content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid,", "redis import json import ast import sys import time import", "data = U.read('buckets', bucket) content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s \" % (", "\"%s/default/bucket/users..bucket\" % self.bucketd_host session = requests.Session() r = session.get(url, timeout=30)", "% (resource, name) files = r.get(res) try: return {'files': int(files),", "help=\"Redis cluster name\") parser.add_argument(\"-b\", \"--bucketd-addr\", default='http://127.0.0.1:9000', help=\"URL of the bucketd", "sentinel_cluster_name=options.sentinel_cluster_name, password=options.redis_password ) P = S3ListBuckets(options.bucketd_addr) listbuckets = P.run() userids", "parser.parse_args() def safe_print(content): print(\"{0}\".format(content)) class askRedis(): def __init__(self, ip=\"127.0.0.1\", port=\"16379\",", "U.read('accounts', userid) content = \"Account:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid, data[\"files\"],", "read(self, resource, name): r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password) res", "of the bucketd server\") return parser.parse_args() def safe_print(content): print(\"{0}\".format(content)) class", "= U.read('accounts', userid) content = \"Account:%s|NumberOFfiles:%s|StorageCapacity:%s \" % ( userid,", "U = askRedis(**redis_conf) data = U.read('buckets', bucket) content = \"Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s" ]
[ "= None attentions: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None", "[4]), dtype=torch.long, device=device) # We can provide a self-attention mask", "logger = logging.getLogger(__name__) SkimformerEncoderOutput = namedtuple( \"SkimformerEncoderOutput\", [\"hidden_states\", \"all_hidden_states\"], )", "0]) embeddings = ( left_position_embeddings + upper_position_embeddings + right_position_embeddings +", "for all layers in BertModel forward() function) attention_scores = attention_scores", "return outputs return SkimformerModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, attentions=skim_attention_output if output_attentions else", "= ( left_position_embeddings + upper_position_embeddings + right_position_embeddings + lower_position_embeddings +", "module.bias.data.zero_() class SkimmingMaskPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights", "self.use_1d_positions: pos_embedding_output = self.one_dim_pos_embeddings( input_shape=input_shape, device=device, position_ids=position_ids, ) else: pos_embedding_output", "weights after the attention softmax, used to compute the weighted", "else: raise ValueError(\"You have to specify either input_ids or inputs_embeds\")", "is not None else inputs_embeds.device if token_type_ids is None: token_type_ids", "if bbox is None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long,", "None else self.config.use_return_dict outputs = self.skimformer( input_ids, bbox, attention_mask=attention_mask, token_type_ids=token_type_ids,", "probabilities only: Softmax(QK^T/sqrt(d)) return attention_probs class SkimformerSelfAttention(nn.Module): def __init__(self, config):", "head_mask=head_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output)", "not in self.pruned_heads] attention_probs = torch.index_select(attention_probs, 1, indices) self_output =", "LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, bbox=None): if self.degrade_2d_positions:", "hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states =", ") from transformers.modeling_outputs import ( BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput, ) from", "input_shape[1] device = input_ids.device if input_ids is not None else", "text embeddings left_position_embeddings = self.x_position_projection(left_position_embeddings) upper_position_embeddings = self.y_position_projection(upper_position_embeddings) right_position_embeddings =", "BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize", "head_mask=None, ): attention_output = self.attention( hidden_states, attention_probs, head_mask, ) layer_output", "right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])", "and module.bias is not None: module.bias.data.zero_() class BertWithSkimEmbedPreTrainedModel(PreTrainedModel): \"\"\" An", "None: module.bias.data.zero_() class BertWithSkimEmbedPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle", "input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device =", "elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise", "( len(input_shape) == 2 ), \"`input_ids` has to be of", "return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings def forward(", "= config self.layer = nn.ModuleList([SkimformerLayer(config) for _ in range(config.num_hidden_layers)]) def", "self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,", "hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, max_position_embeddings=config.max_2d_position_embeddings, initializer_range=config.initializer_range, layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.gradient_checkpointing, ) ) self.skim_attention =", "pooled_output) if output_attentions: outputs = outputs + (skim_attention_output, ) if", "Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`): Sequence of", "specify either input_ids or inputs_embeds\") assert ( len(input_shape) == 2", "self.text_embeddings( input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) if self.use_1d_positions: pos_embedding_output = self.one_dim_pos_embeddings(", "Attentions weights after the attention softmax, used to compute the", "self.use_1d_positions: self.one_dim_pos_embeddings = Skimformer1DPositionEmbeddings(config) else: self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions =", "module.bias is not None: module.bias.data.zero_() class BertWithSkimEmbedPreTrainedModel(PreTrainedModel): \"\"\" An abstract", "output_hidden_states: outputs = outputs + encoder_outputs[1:] return outputs return SkimformerModelOutput(", "return_dict: output = (logits,) + outputs[2:] return ((loss,) + output)", "attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size)", "initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is", "getattr(config, \"position_embedding_type\", \"absolute\") self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob)", "(prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is", "torch.FloatTensor = None pooler_output: torch.FloatTensor = None attentions: Optional[torch.FloatTensor] =", "eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None): if", "labels in ``[0, ..., config.vocab_size]`` \"\"\" return_dict = return_dict if", "layer_output class SkimformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config", "hidden_states=encoder_outputs.all_hidden_states, ) class BertWithSkimEmbedModel(BertWithSkimEmbedPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config", "class PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def", "inputs_embeds.device if position_ids is None: position_ids = self.position_ids[:, :seq_length] if", "token of the sequence (classification token) further processed by a", "return_dict=None, ): return_dict = return_dict if return_dict is not None", "labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing", "assert ( len(input_shape) == 2 ), \"`input_ids` has to be", "self.dropout(attention_probs) # return the attention probabilities only: Softmax(QK^T/sqrt(d)) return attention_probs", "skim_attention_output = self.skim_attention( spatial_pos_embedding_output, attention_mask=extended_attention_mask, ) topk_idx = torch.topk(skim_attention_output, self.top_k,", "by a Linear layer and a Tanh activation function. The", "self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None", "import math import torch from torch import nn from torch.nn", "torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1,", "# return the attention probabilities only: Softmax(QK^T/sqrt(d)) return attention_probs class", "// 2 x_center_position_embeddings = self.x_position_embeddings(x_center) y_center_position_embeddings = self.y_position_embeddings(y_center) except IndexError", "config.hidden_layout_size) self.degrade_2d_positions = config.degrade_2d_positions if hasattr(config, \"degrade_2d_positions\") else False self.LayerNorm", "nn.Linear(all_head_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def", "with labels in ``[0, ..., config.vocab_size]`` \"\"\" return_dict = return_dict", "torch.zeros(skim_attention_output.shape, device=device) skim_attention_mask = skim_attention_mask.scatter(-1, topk_idx, 1) skim_attention_mask = skim_attention_mask", "config.hidden_layout_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.x_position_projection", "self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states,", "= SkimformerModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels)", "2 x_center_position_embeddings = self.x_position_embeddings(x_center) y_center_position_embeddings = self.y_position_embeddings(y_center) except IndexError as", "SkimformerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) ==", "last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`): Sequence of hidden-states", "attention_probs has shape bsz x n_heads x N x N", "nn.Embedding)): # Slightly different from the TF version which uses", "\"\"\" config_class = SkimformerConfig base_model_prefix = \"skimformer\" def _init_weights(self, module):", "= value def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the", "`[batch_size, sequence_length]`, but got shape: {}\".format(input_shape) if bbox is not", "function. The Linear layer weights are trained from the next", "self.value = nn.Linear(config.hidden_size, self.all_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1]", "dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads", "embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs", "hidden_layout_states, attention_mask=None, ): key_layer = self.transpose_for_scores(self.key(hidden_layout_states)) query_layer = self.transpose_for_scores(self.query(hidden_layout_states)) #", "labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels),", "return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states,", "+ (hidden_states,) if not return_dict: return tuple( v for v", "getattr(config, \"position_embedding_type\", \"absolute\") def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] +", "attention_probs, head_mask, ) layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output", "has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask", "input_shape = input_ids.size() device = input_ids.device else: input_shape = inputs_embeds.size()[:-1]", "self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) return layer_output def feed_forward_chunk(self, attention_output):", "r\"predictions.decoder.bias\"] def __init__(self, config): super().__init__(config) self.skimformer = SkimformerModel(config, add_pooling_layer=False) self.cls", "output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if", "<reponame>recitalAI/skim-attention<gh_stars>1-10 from collections import namedtuple import logging from dataclasses import", "config.num_labels self.skimformer = SkimformerModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier =", "seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) text_embedding_output = self.text_embeddings( input_ids=input_ids, token_type_ids=token_type_ids,", "= SkimformerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads)", "0] + bbox[:, :, 2]) // 2 y_center = (bbox[:,", "prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned", "last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None attentions: Optional[torch.FloatTensor]", "box coordinates.\"\"\" def __init__(self, config): super().__init__() self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size)", "the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True``", "position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, extended_attention_mask, head_mask=head_mask,", "config): super().__init__(config) self.num_labels = config.num_labels self.skimming_mask_model = SkimmingMaskModel(config, add_pooling_layer=False) self.dropout", "text_embedding_output, skim_attention_output, head_mask=head_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output", "function) attention_scores = attention_scores + attention_mask # Normalize the attention", "attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ):", "else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class", "= x_center_position_embeddings + y_center_position_embeddings else: try: left_position_embeddings = self.x_position_embeddings(bbox[:, :,", "nn.Embedding(config.max_position_embeddings, config.hidden_layout_size) self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\") self.LayerNorm = LayerNorm(config.hidden_layout_size,", "[r\"pooler\"] _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"] def __init__(self, config): super().__init__(config) self.skimformer", "= BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings):", "both input_ids and inputs_embeds at the same time\") elif input_ids", "= padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not", "inputs_embeds is not None: raise ValueError(\"You cannot specify both input_ids", "+ right_position_embeddings + lower_position_embeddings + h_position_embeddings + w_position_embeddings ) embeddings", "bbox_shape = bbox.size() assert ( len(bbox_shape) == 3 ), \"`bbox`", "all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for", "= self.x_position_projection(left_position_embeddings) upper_position_embeddings = self.y_position_projection(upper_position_embeddings) right_position_embeddings = self.x_position_projection(right_position_embeddings) lower_position_embeddings =", "https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module,", "Labels for computing the masked language modeling loss. Indices should", "``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then ``num_predict`` corresponds to ``sequence_length``.", "which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)", "models. \"\"\" config_class = SkimmingMaskConfig base_model_prefix = \"skimmingmask\" _keys_to_ignore_on_load_missing =", "have to specify either input_ids or inputs_embeds\") device = input_ids.device", "class BertWithSkimEmbedEmbeddings(nn.Module): \"\"\"Construct the embeddings from word, position and token_type", "value): self.text_embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads", "bbox=None): if self.degrade_2d_positions: try: x_center = (bbox[:, :, 0] +", "self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0:", "of shape :obj:`(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the", ") from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, )", "inputs_embeds.device if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)", "def get_input_embeddings(self): return self.text_embeddings.word_embeddings def set_input_embeddings(self, value): self.text_embeddings.word_embeddings = value", "isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version", "= input_ids.size() batch_size, seq_length = input_shape elif inputs_embeds is not", ") class SkimmingMaskModel(SkimmingMaskPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config =", "): r\"\"\" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels", "output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions", "embeddings class BertWithSkimEmbedEmbeddings(nn.Module): \"\"\"Construct the embeddings from word, position and", "= self.skimformer( input_ids, bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions,", "4]`, but got shape: {}\".format(bbox_shape) device = input_ids.device if input_ids", "idx not in self.pruned_heads] attention_probs = torch.index_select(attention_probs, 1, indices) self_output", "= nn.Embedding(config.max_position_embeddings, config.hidden_layout_size) self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\") self.LayerNorm =", "( BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead, ) from", "import ( SkimformerConfig, BertWithSkimEmbedConfig, SkimmingMaskConfig, ) logger = logging.getLogger(__name__) SkimformerEncoderOutput", "module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_()", "collections import namedtuple import logging from dataclasses import dataclass from", "return_dict: return tuple( v for v in [ hidden_states, all_hidden_states,", "hidden_size)`. Hidden-states of the model at the output of each", "== 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1,", "= config.degrade_2d_positions if hasattr(config, \"degrade_2d_positions\") else False self.LayerNorm = LayerNorm(config.hidden_layout_size,", "last_hidden_state=sequence_output, pooler_output=pooled_output, attentions=skim_attention_output if output_attentions else None, hidden_states=encoder_outputs.all_hidden_states, ) class", "= attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask)", "is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is", "embedding_output, extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0]", "\"position_embedding_type\", \"absolute\") def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads,", "isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @dataclass class", "3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return", "self.transpose_for_scores(self.value(hidden_states)) # Mask heads if we want to if head_mask", "torch.nn import CrossEntropyLoss, LayerNorm from torch.autograd.function import Function from transformers.file_utils", "this layer} See base class PreTrainedModel \"\"\" for layer, heads", "None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss]", "is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError(\"You have", ":class:`~SkimformerModel`. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`): Sequence", "is not None: raise ValueError(\"You cannot specify both input_ids and", "IndexError(\"The :obj:`bbox` coordinate values should be within 0-1000 range.\") from", "Slightly different from the TF version which uses truncated_normal for", "= nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings,", "from collections import namedtuple import logging from dataclasses import dataclass", "0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :,", "input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) else: text_embedding_output = self.embeddings( input_ids=input_ids,", "SkimformerSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size =", "context_layer class SkimformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() all_head_size = config.num_attention_heads", "eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward( self,", "encoder_outputs = self.encoder( text_embedding_output, skim_attention_output, head_mask=head_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output", "else inputs_embeds.device if position_ids is None: position_ids = self.position_ids[:, :seq_length]", "__init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.skim_attention_head_size self.all_head_size", "hidden_states) return attention_output class SkimformerLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward", "None attentions: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class", "if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(", "embeddings = inputs_embeds + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings =", "transformers.modeling_outputs import ( BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput, ) from transformers.models.bert.modeling_bert import", "= value def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None,", "find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers", "return attention_output class SkimformerLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward =", "elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias", "not None: bbox_shape = bbox.size() assert ( len(bbox_shape) == 3", "for downloading and loading pretrained models. \"\"\" config_class = BertWithSkimEmbedConfig", "to ``-100`` are ignored (masked), the loss is only computed", "of shape `[batch_size, sequence_length, 4]`, but got shape: {}\".format(bbox_shape) device", "the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This", "if input_ids is not None and inputs_embeds is not None:", "Prunes heads of the model. heads_to_prune: dict of {layer_num: list", "config self.embeddings = BertWithSkimEmbedEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config)", "== 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size,", "= self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads", "= self.dropout(embeddings) return embeddings class SkimAttention(nn.Module): def __init__(self, config): super().__init__()", "set to ``-100`` are ignored (masked), the loss is only", "store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size =", "num_hidden_layers=config.num_hidden_layers_layout_encoder, num_attention_heads=config.num_attention_heads_layout_encoder, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, max_position_embeddings=config.max_2d_position_embeddings, initializer_range=config.initializer_range, layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.gradient_checkpointing,", "Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions if self.contextualize_2d_positions: self.layout_encoder = BertEncoder( BertConfig(", "self.pruned_heads ) # Prune linear layers self.self.value = prune_linear_layer(self.self.value, index)", "attention_probs class SkimformerSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads", "for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, input_ids=None,", "pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions,", "None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids", "def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the model. heads_to_prune:", "attention_output class SkimformerLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward", "BertOnlyMLMHead, ) from transformers.models.layoutlm.modeling_layoutlm import LayoutLMEmbeddings from .configuration_skim import (", "[None] * self.config.num_hidden_layers embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids,", "def __init__(self, config): super().__init__() self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings =", "= outputs + (skim_attention_output, ) if output_hidden_states: outputs = outputs", "with indices set to ``-100`` are ignored (masked), the loss", "or inputs_embeds\") assert ( len(input_shape) == 2 ), \"`input_ids` has", "input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) spatial_pos_embedding_output = self.two_dim_pos_embeddings(bbox=bbox) if", "head_mask, ) layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output )", "compute the weighted average in the self-attention heads. \"\"\" last_hidden_state:", "torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids)", "None else None if not return_dict: return (sequence_output, pooled_output) +", "math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention", "add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def", "not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask", "range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_probs, head_mask=None, output_hidden_states=False, return_dict=None, ):", "import nn from torch.nn import CrossEntropyLoss, LayerNorm from torch.autograd.function import", "seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if self.core_model_type == \"bert\": text_embedding_output", "inputs_embeds=inputs_embeds, ) else: text_embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids,", "skim_attention_mask = (1.0 - skim_attention_mask) * -10000.0 encoder_outputs = self.encoder(", "self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_layout_size, self.all_head_size) self.key = nn.Linear(config.hidden_layout_size,", "= None if labels is not None: loss_fct = CrossEntropyLoss()", "import ( BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput, ) from transformers.models.bert.modeling_bert import (", "SkimmingMaskConfig, ) logger = logging.getLogger(__name__) SkimformerEncoderOutput = namedtuple( \"SkimformerEncoderOutput\", [\"hidden_states\",", "has to be of shape `[batch_size, sequence_length, 4]`, but got", "for idx in range(num_attention_heads) if idx not in self.pruned_heads] attention_probs", "return position_embeddings class Skimformer2DPositionEmbeddings(nn.Module): \"\"\"Construct the layout embeddings from the", "from word, position and token_type embeddings.\"\"\" def __init__(self, config): super().__init__()", "self.pruned_heads.union(heads) def forward( self, hidden_states, attention_probs, head_mask=None, ): if len(self.pruned_heads)", "(1.0 - skim_attention_mask) * -10000.0 encoder_outputs = self.encoder( text_embedding_output, skim_attention_mask,", "return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SkimformerForMaskedLM(SkimformerPreTrainedModel):", "loading pretrained models. \"\"\" config_class = SkimformerConfig base_model_prefix = \"skimformer\"", "class SkimformerLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim", "config): super().__init__() self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size)", "input_shape=input_shape, device=device, position_ids=position_ids, ) else: pos_embedding_output = self.two_dim_pos_embeddings( bbox=bbox, )", "loss. Indices should be in ``[0, ..., config.num_labels - 1]``.", "else: try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:,", "def forward( self, hidden_layout_states, attention_mask=None, ): key_layer = self.transpose_for_scores(self.key(hidden_layout_states)) query_layer", "* head_mask # Softmax(QK^T/sqrt(d)) . V context_layer = torch.matmul(attention_probs, value_layer)", "not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions,", "forward( self, hidden_states, attention_probs, head_mask=None, ): if len(self.pruned_heads) > 0:", "value): self.skimming_mask_model.embeddings.word_embeddings = value def forward( self, input_ids=None, bbox=None, attention_mask=None,", "outputs = self.bert_with_skim_embed( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds,", "device = input_ids.device else: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device", "head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() ==", "is not None: loss_fct = CrossEntropyLoss() if attention_mask is not", "loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits,", "self.skimming_mask_model.embeddings.word_embeddings = value def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None,", ") ) self.skim_attention = SkimAttention(config) self.encoder = SkimformerEncoder(config) self.pooler =", "sequence (classification token) further processed by a Linear layer and", "self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder(", "the weighted average in the self-attention heads. \"\"\" last_hidden_state: torch.FloatTensor", "eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states =", "to compute the weighted average in the self-attention heads. \"\"\"", "is not None else None if not return_dict: outputs =", ") spatial_pos_embedding_output = self.two_dim_pos_embeddings(bbox=bbox) if self.contextualize_2d_positions: spatial_pos_embedding_output = self.layout_encoder(hidden_states=spatial_pos_embedding_output)[0] skim_attention_output", "used to compute the weighted average in the self-attention heads.", "of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case", "the model. ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``,", "e embeddings = x_center_position_embeddings + y_center_position_embeddings else: try: left_position_embeddings =", "config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100``", "Skimformer1DPositionEmbeddings(nn.Module): \"\"\"Construct sequential position embeddings.\"\"\" def __init__(self, config): super().__init__() self.position_embeddings", "if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss,", "= nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward( self,", "V context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1,", "return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits =", ") if self.contextualize_2d_positions: pos_embedding_output = self.layout_encoder( hidden_states=pos_embedding_output, )[0] skim_attention_output =", "or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output", "self.text_embeddings.word_embeddings def set_input_embeddings(self, value): self.text_embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune):", "), \"`bbox` has to be of shape `[batch_size, sequence_length, 4]`,", "self.num_labels = config.num_labels self.bert_with_skim_embed = BertWithSkimEmbedModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob)", "left_position_embeddings = self.x_position_projection(left_position_embeddings) upper_position_embeddings = self.y_position_projection(upper_position_embeddings) right_position_embeddings = self.x_position_projection(right_position_embeddings) lower_position_embeddings", "= self.skimming_mask_model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions,", "we want to if head_mask is not None: attention_probs =", "nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size,", "( SkimformerConfig, BertWithSkimEmbedConfig, SkimmingMaskConfig, ) logger = logging.getLogger(__name__) SkimformerEncoderOutput =", "not None: input_shape = input_ids.size() batch_size, seq_length = input_shape elif", "def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.skimming_mask_model = SkimmingMaskModel(config,", "def forward( self, hidden_states, attention_probs, head_mask=None, ): if len(self.pruned_heads) >", "len(bbox_shape) == 3 ), \"`bbox` has to be of shape", "attention_probs, head_mask=None, ): value_layer = self.transpose_for_scores(self.value(hidden_states)) # Mask heads if", "device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)", "super().__init__() self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_layout_size) self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")", "of shape `[batch_size, sequence_length]`, but got shape: {}\".format(input_shape) if bbox", "= config.use_1d_positions self.text_embeddings = SkimformerTextEmbeddings(config) if self.use_1d_positions: self.one_dim_pos_embeddings = Skimformer1DPositionEmbeddings(config)", "= torch.index_select(attention_probs, 1, indices) self_output = self.self( hidden_states, attention_probs, head_mask,", "= self.word_embeddings(input_ids) words_embeddings = inputs_embeds position_embeddings = self.position_embeddings(position_ids) try: left_position_embeddings", "self.token_type_embeddings(token_type_ids) embeddings = ( words_embeddings + position_embeddings + two_dim_pos_embeddings +", "None: input_shape = input_ids.size() batch_size, seq_length = input_shape elif inputs_embeds", "input_ids is not None and inputs_embeds is not None: raise", "else None, hidden_states=encoder_outputs.all_hidden_states, ) class BertWithSkimEmbedModel(BertWithSkimEmbedPreTrainedModel): def __init__(self, config, add_pooling_layer=True):", "self.self = SkimformerSelfAttention(config) self.output = SkimformerSelfOutput(config) self.pruned_heads = set() def", "inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) words_embeddings = inputs_embeds position_embeddings", "skim_attention_output = self.skim_attention( pos_embedding_output, attention_mask=extended_attention_mask, ) encoder_outputs = self.encoder( text_embedding_output,", "if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if", "self.degrade_2d_positions: try: x_center = (bbox[:, :, 0] + bbox[:, :,", "0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads", "BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead, ) from transformers.models.layoutlm.modeling_layoutlm import LayoutLMEmbeddings", "want to if head_mask is not None: attention_probs = attention_probs", "embeddings = self.dropout(embeddings) return embeddings class Skimformer1DPositionEmbeddings(nn.Module): \"\"\"Construct sequential position", "not None: module.bias.data.zero_() class BertWithSkimEmbedPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to", "hidden_states: Optional[Tuple[torch.FloatTensor]] = None class SkimformerModel(SkimformerPreTrainedModel): def __init__(self, config, add_pooling_layer=True):", "self.x_position_projection(left_position_embeddings) upper_position_embeddings = self.y_position_projection(upper_position_embeddings) right_position_embeddings = self.x_position_projection(right_position_embeddings) lower_position_embeddings = self.y_position_projection(lower_position_embeddings)", "if attention_mask is not None: # Apply the attention mask", "IndexError as e: raise IndexError(\"The :obj:`bbox`coordinate values should be within", "return SkimformerModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, attentions=skim_attention_output if output_attentions else None, hidden_states=encoder_outputs.all_hidden_states,", "head_mask=None, ): if len(self.pruned_heads) > 0: num_attention_heads = attention_probs.shape[1] indices", "self.y_position_projection(upper_position_embeddings) right_position_embeddings = self.x_position_projection(right_position_embeddings) lower_position_embeddings = self.y_position_projection(lower_position_embeddings) h_position_embeddings = self.h_position_projection(h_position_embeddings)", "self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class SkimAttention(nn.Module): def __init__(self,", "output_hidden_states=None, return_dict=None, ): r\"\"\" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`,", "token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) else: text_embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids,", "+ position_embeddings + two_dim_pos_embeddings + token_type_embeddings ) embeddings = self.LayerNorm(embeddings)", "/ math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the", "from transformers.file_utils import ( ModelOutput, ) from transformers.modeling_utils import (", "= input_shape[1] if position_ids is None: position_ids = torch.arange( 0,", "= config.num_attention_heads self.attention_head_size = config.attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size", "self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, bbox=None):", "self.config.use_return_dict outputs = self.bert_with_skim_embed( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask,", "``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the", "is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states,", "if labels is not None: loss_fct = CrossEntropyLoss() # -100", "module(*inputs) return custom_forward layer_output = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_probs, layer_head_mask,", "self.get_head_mask(head_mask, self.config.num_hidden_layers) if self.core_model_type == \"bert\": text_embedding_output = self.embeddings( input_ids=input_ids,", "dropping out entire tokens to attend to, which might #", "attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions", "attention_probs, head_mask=None, output_hidden_states=False, return_dict=None, ): all_hidden_states = () if output_hidden_states", "class SkimmingMaskForTokenClassification(SkimmingMaskPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self, config): super().__init__(config) self.num_labels", "LayoutLMEmbeddings from .configuration_skim import ( SkimformerConfig, BertWithSkimEmbedConfig, SkimmingMaskConfig, ) logger", "raise ValueError(\"You have to specify either input_ids or inputs_embeds\") assert", "is not None: module.bias.data.zero_() @dataclass class SkimformerModelOutput(ModelOutput): \"\"\" Output type", "attention_probs = self.dropout(attention_probs) # return the attention probabilities only: Softmax(QK^T/sqrt(d))", "hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class SkimformerAttention(nn.Module): def", "__init__(self, config): super().__init__() all_head_size = config.num_attention_heads * config.attention_head_size self.dense =", "else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:]", "forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states", "SkimformerEncoderOutput( hidden_states=hidden_states, all_hidden_states=all_hidden_states, ) class SkimformerPreTrainedModel(PreTrainedModel): \"\"\" An abstract class", "if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads,", "namedtuple( \"SkimformerEncoderOutput\", [\"hidden_states\", \"all_hidden_states\"], ) class SkimformerTextEmbeddings(nn.Module): \"\"\"Construct the text", "input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None,", "self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class Skimformer1DPositionEmbeddings(nn.Module): \"\"\"Construct sequential", "# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]", ") attention_output = self.output(self_output, hidden_states) return attention_output class SkimformerLayer(nn.Module): def", "to if head_mask is not None: attention_probs = attention_probs *", "def get_input_embeddings(self): return self.skimming_mask_model.embeddings.word_embeddings def set_input_embeddings(self, value): self.skimming_mask_model.embeddings.word_embeddings = value", "output_attentions=None, output_hidden_states=None, return_dict=None, ): r\"\"\" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,", "context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class SkimformerSelfOutput(nn.Module):", "ourselves in which case we just need to make it", ":obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output", "typing import Optional, Tuple import math import torch from torch", "= self.layout_encoder(hidden_states=spatial_pos_embedding_output)[0] skim_attention_output = self.skim_attention( spatial_pos_embedding_output, attention_mask=extended_attention_mask, ) topk_idx =", "= self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings =", "w_position_embeddings ) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = ( words_embeddings +", "Take the dot product between \"query\" and \"key\" to get", "(:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):", "else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states", "= torch.zeros(input_shape, dtype=torch.long, device=device) if bbox is None: bbox =", "config.contextualize_2d_positions if self.contextualize_2d_positions: self.layout_encoder = BertEncoder( BertConfig( hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder, num_attention_heads=config.num_attention_heads_layout_encoder,", "= self.position_embeddings(position_ids) position_embeddings = self.LayerNorm(position_embeddings) position_embeddings = self.dropout(position_embeddings) return position_embeddings", "= input_shape[1] device = input_ids.device if input_ids is not None", "None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds", "+ upper_position_embeddings + right_position_embeddings + lower_position_embeddings + h_position_embeddings + w_position_embeddings", "in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask", "a Linear layer and a Tanh activation function. The Linear", "\"bertwithskimembed\" _keys_to_ignore_on_load_missing = [r\"position_ids\"] def _init_weights(self, module): \"\"\" Initialize the", "else None if getattr(self.config, \"gradient_checkpointing\", False): def create_custom_forward(module): def custom_forward(*inputs):", "SkimAttention(config) self.top_k = config.top_k self.encoder = BertEncoder(config) self.pooler = BertPooler(config)", "= nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward( self, input_ids=None, bbox=None, attention_mask=None,", "heads to prune in this layer} See base class PreTrainedModel", "of heads to prune in this layer} See base class", "= CrossEntropyLoss() # Only keep active parts of the loss", "self.use_1d_positions = config.use_1d_positions self.text_embeddings = SkimformerTextEmbeddings(config) if self.use_1d_positions: self.one_dim_pos_embeddings =", "self.skimming_mask_model = SkimmingMaskModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size,", "= torch.zeros(skim_attention_output.shape, device=device) skim_attention_mask = skim_attention_mask.scatter(-1, topk_idx, 1) skim_attention_mask =", "= 1 self.attention = SkimformerAttention(config) self.intermediate = BertIntermediate(config) self.output =", "bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) # We can", "position_ids=position_ids, ) else: pos_embedding_output = self.two_dim_pos_embeddings( bbox=bbox, ) if self.contextualize_2d_positions:", ") class SkimformerForMaskedLM(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]", "input_ids is not None: input_shape = input_ids.size() device = input_ids.device", "if input_ids is not None else inputs_embeds.device if token_type_ids is", "self, hidden_states, attention_probs, head_mask=None, ): attention_output = self.attention( hidden_states, attention_probs,", "inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if", "SkimformerModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights()", "__init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([SkimformerLayer(config) for", "outputs = outputs + (skim_attention_output, ) if output_hidden_states: outputs =", "class BertWithSkimEmbedPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights initialization", "None: bbox_shape = bbox.size() assert ( len(bbox_shape) == 3 ),", "self.x_position_embeddings(x_center) y_center_position_embeddings = self.y_position_embeddings(y_center) except IndexError as e: raise IndexError(\"The", "if head_mask is not None: if head_mask.dim() == 1: head_mask", "output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class BertWithSkimEmbedForTokenClassification(BertWithSkimEmbedPreTrainedModel):", "SkimformerTextEmbeddings(config) if self.use_1d_positions: self.one_dim_pos_embeddings = Skimformer1DPositionEmbeddings(config) else: self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config)", "= nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor):", "= config.skim_attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_layout_size,", "-10000.0 if head_mask is not None: if head_mask.dim() == 1:", "when ``config.output_attentions=True``): :obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions", "else: head_mask = [None] * self.config.num_hidden_layers embedding_output = self.embeddings( input_ids=input_ids,", "shape `[batch_size, sequence_length, 4]`, but got shape: {}\".format(bbox_shape) device =", "from word and token_type embeddings.\"\"\" def __init__(self, config): super().__init__() self.max_position_embeddings", "self.x_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.y_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.h_position_projection =", "embeddings.\"\"\" def __init__(self, config): super().__init__() self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_layout_size) self.position_embedding_type", "indices = [idx for idx in range(num_attention_heads) if idx not", "try: x_center = (bbox[:, :, 0] + bbox[:, :, 2])", "return_dict if return_dict is not None else self.config.use_return_dict outputs =", "token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) spatial_pos_embedding_output = self.two_dim_pos_embeddings(bbox=bbox) if self.contextualize_2d_positions: spatial_pos_embedding_output =", "= prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update", "attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids =", "{}\".format(input_shape) if bbox is not None: bbox_shape = bbox.size() assert", "of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked", "inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if", "is not None: loss_fct = CrossEntropyLoss() # Only keep active", "= inputs_embeds.size()[:-1] batch_size, seq_length = input_shape else: raise ValueError(\"You have", "def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): \"\"\"", "return attention_probs class SkimformerSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads =", "token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if bbox is None: bbox", "is not None else inputs_embeds.device if position_ids is None: position_ids", "layer of the model. ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping``", "= (bbox[:, :, 1] + bbox[:, :, 3]) // 2", "if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF", "each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`,", "if attention_mask is not None: active_loss = attention_mask.view(-1) == 1", "to handle weights initialization and a simple interface for downloading", "encoder_outputs[1:] return outputs return SkimformerModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, attentions=skim_attention_output if output_attentions", "same time\") elif input_ids is not None: input_shape = input_ids.size()", "hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SkimformerForTokenClassification(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self,", "of the sequence (classification token) further processed by a Linear", "Mask heads if we want to if head_mask is not", "= self.y_position_embeddings(y_center) except IndexError as e: raise IndexError(\"The :obj:`bbox` coordinate", ") ) self.skim_attention = SkimAttention(config) self.top_k = config.top_k self.encoder =", ") loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels),", "0]) # project into same dimension as text embeddings left_position_embeddings", "has shape bsz x n_heads x N x N #", "attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when", "within 0-1000 range.\") from e embeddings = x_center_position_embeddings + y_center_position_embeddings", "\"key\" to get the raw attention scores. attention_scores = torch.matmul(query_layer,", "None if labels is not None: loss_fct = CrossEntropyLoss() #", "heads if we want to if head_mask is not None:", "and a Tanh activation function. The Linear layer weights are", "ModelOutput, ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer,", "= self.dropout(position_embeddings) return position_embeddings class Skimformer2DPositionEmbeddings(nn.Module): \"\"\"Construct the layout embeddings", "else: pos_embedding_output = self.two_dim_pos_embeddings( bbox=bbox, ) if self.contextualize_2d_positions: pos_embedding_output =", "self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states,", "if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class", "[\"hidden_states\", \"all_hidden_states\"], ) class SkimformerTextEmbeddings(nn.Module): \"\"\"Construct the text embeddings from", ") position_ids = position_ids.unsqueeze(0).expand(input_shape) position_embeddings = self.position_embeddings(position_ids) position_embeddings = self.LayerNorm(position_embeddings)", "self.key = nn.Linear(config.hidden_layout_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config,", "v for v in [ hidden_states, all_hidden_states, ] if v", "# Apply the attention mask is (precomputed for all layers", "): attention_output = self.attention( hidden_states, attention_probs, head_mask, ) layer_output =", "self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_shape, device, position_ids=None): seq_length =", "): key_layer = self.transpose_for_scores(self.key(hidden_layout_states)) query_layer = self.transpose_for_scores(self.query(hidden_layout_states)) # Take the", "the tokens with labels in ``[0, ..., config.vocab_size]`` \"\"\" return_dict", "None, :, :] skim_attention_mask = (1.0 - skim_attention_mask) * -10000.0", "nn.Linear(config.hidden_layout_size, config.hidden_size) self.y_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.h_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size)", "if getattr(self.config, \"gradient_checkpointing\", False): def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs)", "head mask if needed # 1.0 in head_mask indicate we", "forward(self, bbox=None): if self.degrade_2d_positions: try: x_center = (bbox[:, :, 0]", "(precomputed for all layers in BertModel forward() function) attention_scores =", "None else None if not return_dict: outputs = (sequence_output, pooled_output)", "position_embeddings = self.LayerNorm(position_embeddings) position_embeddings = self.dropout(position_embeddings) return position_embeddings class Skimformer2DPositionEmbeddings(nn.Module):", "def set_input_embeddings(self, value): self.skimming_mask_model.embeddings.word_embeddings = value def forward( self, input_ids=None,", "+ token_type_embeddings ) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return", "if return_dict is not None else self.config.use_return_dict outputs = self.skimming_mask_model(", "output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output)", "= self.bert_with_skim_embed( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions,", "is None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) extended_attention_mask", "self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\") self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout", "None: position_ids = torch.arange( 0, seq_length, dtype=torch.long, device=device ) position_ids", "attention_output) return layer_output class SkimformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config", "= apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) return layer_output def", "..., config.vocab_size]`` \"\"\" return_dict = return_dict if return_dict is not", "__init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.attention_head_size self.all_head_size", "output = (logits,) + outputs[2:] return ((loss,) + output) if", "should be within 0-1000 range.\") from e embeddings = x_center_position_embeddings", "= config.max_position_embeddings self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size,", "in self.pruned_heads] attention_probs = torch.index_select(attention_probs, 1, indices) self_output = self.self(", "= BertWithSkimEmbedConfig base_model_prefix = \"bertwithskimembed\" _keys_to_ignore_on_load_missing = [r\"position_ids\"] def _init_weights(self,", "attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SkimformerForMaskedLM(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] _keys_to_ignore_on_load_missing =", "= logging.getLogger(__name__) SkimformerEncoderOutput = namedtuple( \"SkimformerEncoderOutput\", [\"hidden_states\", \"all_hidden_states\"], ) class", "(see ``input_ids`` docstring) Tokens with indices set to ``-100`` are", "(classification) objective during pretraining. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True``", ") from transformers.models.layoutlm.modeling_layoutlm import LayoutLMEmbeddings from .configuration_skim import ( SkimformerConfig,", "have to specify either input_ids or inputs_embeds\") assert ( len(input_shape)", ") class BertWithSkimEmbedForTokenClassification(BertWithSkimEmbedPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self, config): super().__init__(config)", "layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.gradient_checkpointing, ) ) self.skim_attention = SkimAttention(config) self.encoder = SkimformerEncoder(config)", "* config.attention_head_size self.dense = nn.Linear(all_head_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)", "self.config.use_return_dict if input_ids is not None and inputs_embeds is not", "the self-attention heads. \"\"\" last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor", "- bbox[:, :, 0]) # project into same dimension as", "= self.transpose_for_scores(self.key(hidden_layout_states)) query_layer = self.transpose_for_scores(self.query(hidden_layout_states)) # Take the dot product", "[ hidden_states, all_hidden_states, ] if v is not None )", "len(input_shape) == 2 ), \"`input_ids` has to be of shape", "position_embeddings + two_dim_pos_embeddings + token_type_embeddings ) embeddings = self.LayerNorm(embeddings) embeddings", "LayoutLMEmbeddings(config) self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions if config.contextualize_2d_positions: self.layout_encoder", "labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r\"\"\" labels (:obj:`torch.LongTensor` of shape", "= self.get_extended_attention_mask(attention_mask, input_shape, device) # Prepare head mask if needed", "= self.position_embeddings(position_ids) try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings =", "\"skimmingmask\" _keys_to_ignore_on_load_missing = [r\"position_ids\"] def _init_weights(self, module): \"\"\" Initialize the", "gradient_checkpointing=config.gradient_checkpointing, ) ) self.skim_attention = SkimAttention(config) self.top_k = config.top_k self.encoder", ") class SkimformerTextEmbeddings(nn.Module): \"\"\"Construct the text embeddings from word and", "base class PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads)", "except IndexError as e: raise IndexError(\"The :obj:`bbox`coordinate values should be", "= BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states, attention_probs,", "= torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_probs, layer_head_mask, ) else: layer_output =", "sequence_length)`, `optional`): Labels for computing the token classification loss. Indices", "values should be within 0-1000 range.\") from e h_position_embeddings =", "= output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states", "= \"skimformer\" def _init_weights(self, module): \"\"\" Initialize the weights \"\"\"", "right_position_embeddings + lower_position_embeddings + h_position_embeddings + w_position_embeddings ) embeddings =", "output) if loss is not None else output return TokenClassifierOutput(", "scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores /", "+ (skim_attention_output, ) if output_hidden_states: outputs = outputs + encoder_outputs[1:]", "= SkimmingMaskModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels)", "token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = ( words_embeddings + position_embeddings +", "and module.bias is not None: module.bias.data.zero_() @dataclass class SkimformerModelOutput(ModelOutput): \"\"\"", "`[batch_size, sequence_length, 4]`, but got shape: {}\".format(bbox_shape) device = input_ids.device", "head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.to(dtype=next(self.parameters()).dtype) else: head_mask = [None]", "loss is only computed for the tokens with labels in", "token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds", "new_embeddings): self.cls.predictions.decoder = new_embeddings def forward( self, input_ids=None, bbox=None, attention_mask=None,", "self, hidden_states, attention_probs, head_mask=None, output_hidden_states=False, return_dict=None, ): all_hidden_states = ()", "token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output =", "\"bert\": text_embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) else:", ":, 1] + bbox[:, :, 3]) // 2 x_center_position_embeddings =", "+ w_position_embeddings ) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return", "lower_position_embeddings = self.y_position_projection(lower_position_embeddings) h_position_embeddings = self.h_position_projection(h_position_embeddings) w_position_embeddings = self.w_position_projection(w_position_embeddings) two_dim_pos_embeddings", "return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss =", "super().__init__() self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.h_position_embeddings", "input_ids is not None: input_shape = input_ids.size() else: input_shape =", "token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions,", "x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) text_embedding_output =", "config.num_labels - 1]``. \"\"\" return_dict = return_dict if return_dict is", "inputs_embeds position_embeddings = self.position_embeddings(position_ids) try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])", "the attention softmax, used to compute the weighted average in", "set_input_embeddings(self, value): self.skimming_mask_model.embeddings.word_embeddings = value def forward( self, input_ids=None, bbox=None,", "skim_attention_mask.scatter(-1, topk_idx, 1) skim_attention_mask = skim_attention_mask * attention_mask[:, None, :,", "3]) except IndexError as e: raise IndexError(\"The :obj:`bbox` coordinate values", "at the output of each layer plus the initial embedding", "attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None:", "broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)", "the first token of the sequence (classification token) further processed", "else self.config.use_return_dict outputs = self.bert_with_skim_embed( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids,", "``target_mapping`` is ``None``, then ``num_predict`` corresponds to ``sequence_length``. pooler_output (:obj:`torch.FloatTensor`", "if not return_dict: outputs = (sequence_output, pooled_output) if output_attentions: outputs", "paper. attention_probs = self.dropout(attention_probs) # return the attention probabilities only:", "None else self.config.use_return_dict outputs = self.skimming_mask_model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids,", "= self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except", "not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output,", "self.embeddings = BertEmbeddings(config) if self.core_model_type == \"bert\" else LayoutLMEmbeddings(config) self.two_dim_pos_embeddings", "* self.attention_head_size self.query = nn.Linear(config.hidden_layout_size, self.all_head_size) self.key = nn.Linear(config.hidden_layout_size, self.all_head_size)", "layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model", "SkimformerSelfAttention(config) self.output = SkimformerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads):", "dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we", "config.num_labels) self.init_weights() def get_input_embeddings(self): return self.skimming_mask_model.embeddings.word_embeddings def set_input_embeddings(self, value): self.skimming_mask_model.embeddings.word_embeddings", "position_embeddings = self.dropout(position_embeddings) return position_embeddings class Skimformer2DPositionEmbeddings(nn.Module): \"\"\"Construct the layout", "get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def", "LayerNorm from torch.autograd.function import Function from transformers.file_utils import ( ModelOutput,", "inputs_embeds.size()[:-1] batch_size, seq_length = input_shape else: raise ValueError(\"You have to", "input_ids=None, token_type_ids=None, inputs_embeds=None): if input_ids is not None: input_shape =", "self.LayerNorm(position_embeddings) position_embeddings = self.dropout(position_embeddings) return position_embeddings class Skimformer2DPositionEmbeddings(nn.Module): \"\"\"Construct the", "embeddings = ( left_position_embeddings + upper_position_embeddings + right_position_embeddings + lower_position_embeddings", "the TF version which uses truncated_normal for initialization # cf", "None: raise ValueError(\"You cannot specify both input_ids and inputs_embeds at", "hidden_states=hidden_states, all_hidden_states=all_hidden_states, ) class SkimformerPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to", "activation function. The Linear layer weights are trained from the", "self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward( self, input_ids=None,", "to make it broadcastable to all heads. extended_attention_mask: torch.Tensor =", "= nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def get_input_embeddings(self): return self.bert_with_skim_embed.embeddings.word_embeddings def set_input_embeddings(self,", "is (precomputed for all layers in BertModel forward() function) attention_scores", "super().__init__(config) self.num_labels = config.num_labels self.bert_with_skim_embed = BertWithSkimEmbedModel(config, add_pooling_layer=False) self.dropout =", "output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return", "and loading pretrained models. \"\"\" config_class = SkimformerConfig base_model_prefix =", "def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output)", "self.transpose_for_scores(self.key(hidden_layout_states)) query_layer = self.transpose_for_scores(self.query(hidden_layout_states)) # Take the dot product between", "input_shape = input_ids.size() elif inputs_embeds is not None: input_shape =", "loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output =", "torch import nn from torch.nn import CrossEntropyLoss, LayerNorm from torch.autograd.function", "(bbox[:, :, 1] + bbox[:, :, 3]) // 2 x_center_position_embeddings", "class SkimformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() all_head_size = config.num_attention_heads *", "nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to", "head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if", "range.\") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:,", "Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions if config.contextualize_2d_positions: self.layout_encoder = BertEncoder( BertConfig(", "or [num_hidden_layers x num_heads] # and head_mask is converted to", "head_mask is not None: if head_mask.dim() == 1: head_mask =", "keep the head # attention_probs has shape bsz x n_heads", "prune in this layer} See base class PreTrainedModel \"\"\" for", "self.config.use_return_dict outputs = self.skimformer( input_ids, bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask,", "average in the self-attention heads. \"\"\" last_hidden_state: torch.FloatTensor = None", "and inputs_embeds at the same time\") elif input_ids is not", "self.seq_len_dim, attention_output ) return layer_output def feed_forward_chunk(self, attention_output): intermediate_output =", "all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask", "= logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) )", "be in ``[0, ..., config.num_labels - 1]``. \"\"\" return_dict =", "BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead, ) from transformers.models.layoutlm.modeling_layoutlm import", "is not None else None if getattr(self.config, \"gradient_checkpointing\", False): def", "model. heads_to_prune: dict of {layer_num: list of heads to prune", "0, seq_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).expand(input_shape) position_embeddings =", "nn.Linear(config.hidden_layout_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")", ":, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as", "be of shape `[batch_size, sequence_length]`, but got shape: {}\".format(input_shape) if", "nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states", "bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )", "output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SkimformerForTokenClassification(SkimformerPreTrainedModel):", "bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )", "self.init_weights() def get_input_embeddings(self): return self.bert_with_skim_embed.embeddings.word_embeddings def set_input_embeddings(self, value): self.bert_with_skim_embed.embeddings.word_embeddings =", "= self.LayerNorm(position_embeddings) position_embeddings = self.dropout(position_embeddings) return position_embeddings class Skimformer2DPositionEmbeddings(nn.Module): \"\"\"Construct", ":obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax,", "self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings =", "return SkimformerEncoderOutput( hidden_states=hidden_states, all_hidden_states=all_hidden_states, ) class SkimformerPreTrainedModel(PreTrainedModel): \"\"\" An abstract", "dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).expand(input_shape) position_embeddings = self.position_embeddings(position_ids) position_embeddings", "for downloading and loading pretrained models. \"\"\" config_class = SkimmingMaskConfig", "= torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if", "== 3 ), \"`bbox` has to be of shape `[batch_size,", "@dataclass class SkimformerModelOutput(ModelOutput): \"\"\" Output type of :class:`~SkimformerModel`. Args: last_hidden_state", "config.hidden_size) self.h_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.w_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.LayerNorm", "is not None else self.config.output_hidden_states ) return_dict = return_dict if", "None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape,", "self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings =", "SkimformerModelOutput(ModelOutput): \"\"\" Output type of :class:`~SkimformerModel`. Args: last_hidden_state (:obj:`torch.FloatTensor` of", "sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to", "outputs = self.skimming_mask_model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds,", "self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) spatial_pos_embedding_output = self.two_dim_pos_embeddings(bbox=bbox)", "logging.getLogger(__name__) SkimformerEncoderOutput = namedtuple( \"SkimformerEncoderOutput\", [\"hidden_states\", \"all_hidden_states\"], ) class SkimformerTextEmbeddings(nn.Module):", "head # attention_probs has shape bsz x n_heads x N", "position_embeddings = self.position_embeddings(position_ids) position_embeddings = self.LayerNorm(position_embeddings) position_embeddings = self.dropout(position_embeddings) return", "labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss =", "return_dict = return_dict if return_dict is not None else self.config.use_return_dict", ":obj:`bbox` coordinate values should be within 0-1000 range.\") from e", "self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_layout_size, self.all_head_size) self.key", "not None else None if not return_dict: return (sequence_output, pooled_output)", "+ attention_mask # Normalize the attention scores to probabilities. attention_probs", "3]) // 2 x_center_position_embeddings = self.x_position_embeddings(x_center) y_center_position_embeddings = self.y_position_embeddings(y_center) except", "outputs[2:] return ((loss,) + output) if loss is not None", "BertWithSkimEmbedPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights initialization and", "if labels is not None: loss_fct = CrossEntropyLoss() # Only", "return_dict is not None else self.config.use_return_dict if input_ids is not", "weights initialization and a simple interface for downloading and loading", "class SkimformerAttention(nn.Module): def __init__(self, config): super().__init__() self.self = SkimformerSelfAttention(config) self.output", ") sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is", "= LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_shape, device,", "x_center_position_embeddings = self.x_position_embeddings(x_center) y_center_position_embeddings = self.y_position_embeddings(y_center) except IndexError as e:", "self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))", "None: input_shape = input_ids.size() device = input_ids.device else: input_shape =", "models. \"\"\" config_class = SkimformerConfig base_model_prefix = \"skimformer\" def _init_weights(self,", "attention_probs, head_mask, ) attention_output = self.output(self_output, hidden_states) return attention_output class", "(:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at", "= BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None self.init_weights()", "to shape [num_hidden_layers x batch x num_heads x seq_length x", "\"skimformer\" def _init_weights(self, module): \"\"\" Initialize the weights \"\"\" if", "None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None:", "== 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.to(dtype=next(self.parameters()).dtype) else: head_mask", "output_attentions else None, hidden_states=encoder_outputs.all_hidden_states, ) class BertWithSkimEmbedModel(BertWithSkimEmbedPreTrainedModel): def __init__(self, config,", "+ input_tensor) return hidden_states class SkimformerAttention(nn.Module): def __init__(self, config): super().__init__()", "+ outputs[2:] return ((loss,) + output) if loss is not", "\"gradient_checkpointing\", False): def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward", "hidden-state of the first token of the sequence (classification token)", "BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead, ) from transformers.models.layoutlm.modeling_layoutlm import LayoutLMEmbeddings from", "not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions,", "self.skim_attention( pos_embedding_output, attention_mask=extended_attention_mask, ) encoder_outputs = self.encoder( text_embedding_output, skim_attention_output, head_mask=head_mask,", "self.dropout(embeddings) return embeddings class SkimAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads", "return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SkimmingMaskForTokenClassification(SkimmingMaskPreTrainedModel): _keys_to_ignore_on_load_unexpected", "\"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self,", "= self.num_attention_heads * self.attention_head_size self.value = nn.Linear(config.hidden_size, self.all_head_size) def transpose_for_scores(self,", "= BertWithSkimEmbedEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer", "= self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class Skimformer1DPositionEmbeddings(nn.Module): \"\"\"Construct", "skim_attention_mask = skim_attention_mask.scatter(-1, topk_idx, 1) skim_attention_mask = skim_attention_mask * attention_mask[:,", "def __init__(self, config): super().__init__() self.self = SkimformerSelfAttention(config) self.output = SkimformerSelfOutput(config)", "(torch.sum(attention_mask, dim=-1)).view(-1) > 0 else: active_loss = attention_mask.view(-1) == 1", "import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.modeling_outputs import", "masked language modeling loss. Indices should be in ``[-100, 0,", "input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states", "None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )", "= input_ids.device else: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device device", "2 y_center = (bbox[:, :, 1] + bbox[:, :, 3])", "None if getattr(self.config, \"gradient_checkpointing\", False): def create_custom_forward(module): def custom_forward(*inputs): return", "config): super().__init__() self.config = config self.layer = nn.ModuleList([SkimformerLayer(config) for _", "``[0, ..., config.vocab_size]`` \"\"\" return_dict = return_dict if return_dict is", "x N # input head_mask has shape [num_heads] or [num_hidden_layers", "self.embeddings = BertWithSkimEmbedEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if", "return_dict is not None else self.config.use_return_dict outputs = self.skimming_mask_model( input_ids=input_ids,", "input_ids is not None: input_shape = input_ids.size() batch_size, seq_length =", "bounding box coordinates.\"\"\" def __init__(self, config): super().__init__() self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings,", "shape :obj:`(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last", "self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout =", "= self.output(intermediate_output, attention_output) return layer_output class SkimformerEncoder(nn.Module): def __init__(self, config):", "CrossEntropyLoss() if attention_mask is not None: active_loss = attention_mask.view(-1) ==", "[r\"pooler\"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.skimformer =", "heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # Prepare head", "self.classifier(sequence_output) loss = None if labels is not None: loss_fct", "import LayoutLMEmbeddings from .configuration_skim import ( SkimformerConfig, BertWithSkimEmbedConfig, SkimmingMaskConfig, )", "inputs_embeds + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return", "device) # Prepare head mask if needed # 1.0 in", "output_hidden_states else None for i, layer_module in enumerate(self.layer): if output_hidden_states:", "docstring) Tokens with indices set to ``-100`` are ignored (masked),", "nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def", "( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.modeling_outputs import (", "Prune linear layers self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense,", "from torch import nn from torch.nn import CrossEntropyLoss, LayerNorm from", "nn.Linear(config.hidden_layout_size, config.hidden_size) self.w_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)", "bbox=None, token_type_ids=None, position_ids=None, inputs_embeds=None, ): if input_ids is not None:", "[r\"position_ids\", r\"predictions.decoder.bias\"] def __init__(self, config): super().__init__(config) self.skimformer = SkimformerModel(config, add_pooling_layer=False)", "is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None:", "but got shape: {}\".format(bbox_shape) device = input_ids.device if input_ids is", "{layer_num: list of heads to prune in this layer} See", "if needed # 1.0 in head_mask indicate we keep the", "1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1,", "token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output", ") topk_idx = torch.topk(skim_attention_output, self.top_k, -1).indices skim_attention_mask = torch.zeros(skim_attention_output.shape, device=device)", "= (logits,) + outputs[2:] return ((loss,) + output) if loss", "we keep the head # attention_probs has shape bsz x", "self.cls = BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self,", "self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.x_position_projection =", "super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.skim_attention_head_size self.all_head_size = self.num_attention_heads", "self.skimformer = SkimformerModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size,", "if config.contextualize_2d_positions: self.layout_encoder = BertEncoder( BertConfig( hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder, num_attention_heads=config.num_attention_heads_layout_encoder, intermediate_size=config.intermediate_size,", "\"\"\" Initialize the weights \"\"\" if isinstance(module, (nn.Linear, nn.Embedding)): #", "(hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else", "bsz x n_heads x N x N # input head_mask", "forward() function) attention_scores = attention_scores + attention_mask # Normalize the", "returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): :obj:`torch.FloatTensor` of", "CrossEntropyLoss, LayerNorm from torch.autograd.function import Function from transformers.file_utils import (", "value def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the model.", "right_position_embeddings + lower_position_embeddings + h_position_embeddings + w_position_embeddings ) token_type_embeddings =", "mask is (precomputed for all layers in BertModel forward() function)", "embeddings.\"\"\" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)", "heads of the model. heads_to_prune: dict of {layer_num: list of", "to be of shape `[batch_size, sequence_length, 4]`, but got shape:", "self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss", ":, 3]) except IndexError as e: raise IndexError(\"The :obj:`bbox`coordinate values", "= skim_attention_mask * attention_mask[:, None, :, :] skim_attention_mask = (1.0", "isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is", ":, 0] + bbox[:, :, 2]) // 2 y_center =", "is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None:", "= SkimformerSelfAttention(config) self.output = SkimformerSelfOutput(config) self.pruned_heads = set() def prune_heads(self,", "= getattr(config, \"position_embedding_type\", \"absolute\") self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout =", "None else inputs_embeds.device if token_type_ids is None: token_type_ids = torch.zeros(input_shape,", "active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels", "logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SkimformerForTokenClassification(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def", "config.top_k self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else", "in this layer} See base class PreTrainedModel \"\"\" for layer,", "left_position_embeddings + upper_position_embeddings + right_position_embeddings + lower_position_embeddings + h_position_embeddings +", "SkimformerLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim =", "extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # Prepare head mask", "of the first token of the sequence (classification token) further", ") embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class", "1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])", "bbox[:, :, 2]) // 2 y_center = (bbox[:, :, 1]", "not None: loss_fct = CrossEntropyLoss() # -100 index = padding", "except IndexError as e: raise IndexError(\"The :obj:`bbox` coordinate values should", "1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape)", "pooler_output: torch.FloatTensor = None attentions: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]]", "CrossEntropyLoss() if attention_mask is not None: if attention_mask.dim() == 3:", "is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs)", ") else: pos_embedding_output = self.two_dim_pos_embeddings( bbox=bbox, ) if self.contextualize_2d_positions: pos_embedding_output", "BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SkimmingMaskModel(SkimmingMaskPreTrainedModel): def", "modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]``", "= self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings", "config.hidden_size) self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.h_position_embeddings", "add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BertWithSkimEmbedEmbeddings(config) self.encoder =", "config): super().__init__() self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_layout_size) self.position_embedding_type = getattr(config, \"position_embedding_type\",", "-100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))", "base_model_prefix = \"bertwithskimembed\" _keys_to_ignore_on_load_missing = [r\"position_ids\"] def _init_weights(self, module): \"\"\"", "weights are trained from the next sentence prediction (classification) objective", "range.\") from e embeddings = x_center_position_embeddings + y_center_position_embeddings else: try:", "pos_embedding_output = self.one_dim_pos_embeddings( input_shape=input_shape, device=device, position_ids=position_ids, ) else: pos_embedding_output =", "self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions if config.contextualize_2d_positions: self.layout_encoder =", "def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert_with_skim_embed = BertWithSkimEmbedModel(config,", "config self.layer = nn.ModuleList([SkimformerLayer(config) for _ in range(config.num_hidden_layers)]) def forward(", "hidden_size)`): Sequence of hidden-states at the last layer of the", "head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.to(dtype=next(self.parameters()).dtype) else: head_mask = [None] * self.config.num_hidden_layers", "layer_output = layer_module( hidden_states, attention_probs, layer_head_mask, ) hidden_states = layer_output", "self.layout_encoder( hidden_states=pos_embedding_output, )[0] skim_attention_output = self.skim_attention( pos_embedding_output, attention_mask=extended_attention_mask, ) encoder_outputs", "namedtuple import logging from dataclasses import dataclass from typing import", "config, add_pooling_layer=True): super().__init__(config) self.config = config self.use_1d_positions = config.use_1d_positions self.text_embeddings", "config): super().__init__() self.self = SkimformerSelfAttention(config) self.output = SkimformerSelfOutput(config) self.pruned_heads =", "BertConfig( hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder, num_attention_heads=config.num_attention_heads_layout_encoder, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, max_position_embeddings=config.max_2d_position_embeddings, initializer_range=config.initializer_range,", "_ in range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_probs, head_mask=None, output_hidden_states=False,", "and \"key\" to get the raw attention scores. attention_scores =", "embeddings class SkimAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads", "self.num_labels = config.num_labels self.skimformer = SkimformerModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob)", "MaskedLMOutput, TokenClassifierOutput, ) from transformers.models.bert.modeling_bert import ( BertConfig, BertEmbeddings, BertIntermediate,", "1) skim_attention_mask = skim_attention_mask * attention_mask[:, None, :, :] skim_attention_mask", "self.attention_head_size self.value = nn.Linear(config.hidden_size, self.all_head_size) def transpose_for_scores(self, x): new_x_shape =", "get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))", "self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and", "position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions", "hidden_size)`): Last layer hidden-state of the first token of the", "None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if bbox is None:", "def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings", "= self.x_position_embeddings(x_center) y_center_position_embeddings = self.y_position_embeddings(y_center) except IndexError as e: raise", "attention_mask.dim() == 3: active_loss = (torch.sum(attention_mask, dim=-1)).view(-1) > 0 else:", "((loss,) + output) if loss is not None else output", "dtype=torch.long, device=device) # We can provide a self-attention mask of", "== 2 ), \"`input_ids` has to be of shape `[batch_size,", "= ( words_embeddings + position_embeddings + two_dim_pos_embeddings + token_type_embeddings )", "to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) #", "input_ids is not None else inputs_embeds.device if token_type_ids is None:", "# ourselves in which case we just need to make", "return_dict is not None else self.config.use_return_dict outputs = self.bert_with_skim_embed( input_ids=input_ids,", "None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError(\"You have to specify", "= self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings embeddings = self.LayerNorm(embeddings)", "= inputs_embeds position_embeddings = self.position_embeddings(position_ids) try: left_position_embeddings = self.x_position_embeddings(bbox[:, :,", "version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0,", "= [r\"pooler\"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.skimming_mask_model", "heads_to_prune: dict of {layer_num: list of heads to prune in", "self.position_embeddings(position_ids) position_embeddings = self.LayerNorm(position_embeddings) position_embeddings = self.dropout(position_embeddings) return position_embeddings class", "it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,", "and head_mask is converted to shape [num_hidden_layers x batch x", "which might # seem a bit unusual, but is taken", "* self.attention_head_size self.value = nn.Linear(config.hidden_size, self.all_head_size) def transpose_for_scores(self, x): new_x_shape", "heads. \"\"\" last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None", "-1).indices skim_attention_mask = torch.zeros(skim_attention_output.shape, device=device) skim_attention_mask = skim_attention_mask.scatter(-1, topk_idx, 1)", "def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None): if input_ids is not None:", "extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0", "* -10000.0 if head_mask is not None: if head_mask.dim() ==", "# Prepare head mask if needed # 1.0 in head_mask", "base_model_prefix = \"skimformer\" def _init_weights(self, module): \"\"\" Initialize the weights", "self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.attention_head_size self.all_head_size = self.num_attention_heads *", "* -10000.0 encoder_outputs = self.encoder( text_embedding_output, skim_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states,", "= self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) else: text_embedding_output =", "= nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings,", "attention mask is (precomputed for all layers in BertModel forward()", "self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune):", "= context_layer.view(*new_context_layer_shape) return context_layer class SkimformerSelfOutput(nn.Module): def __init__(self, config): super().__init__()", "module): \"\"\" Initialize the weights \"\"\" if isinstance(module, (nn.Linear, nn.Embedding)):", "..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to", "= loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,)", "[batch_size, from_seq_length, to_seq_length] # ourselves in which case we just", "attention_probs_dropout_prob=config.attention_probs_dropout_prob, max_position_embeddings=config.max_2d_position_embeddings, initializer_range=config.initializer_range, layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.gradient_checkpointing, ) ) self.skim_attention = SkimAttention(config)", "masked_lm_loss = None if labels is not None: loss_fct =", "x_center = (bbox[:, :, 0] + bbox[:, :, 2]) //", "self.config.num_hidden_layers) text_embedding_output = self.text_embeddings( input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) if self.use_1d_positions:", "in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None,", "= loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if", "the token classification loss. Indices should be in ``[0, ...,", "extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 if head_mask", "# seem a bit unusual, but is taken from the", "self.contextualize_2d_positions: spatial_pos_embedding_output = self.layout_encoder(hidden_states=spatial_pos_embedding_output)[0] skim_attention_output = self.skim_attention( spatial_pos_embedding_output, attention_mask=extended_attention_mask, )", "output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict", "return the attention probabilities only: Softmax(QK^T/sqrt(d)) return attention_probs class SkimformerSelfAttention(nn.Module):", "= nn.Linear(all_head_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob)", "= input_shape elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1]", "self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states,", "from the next sentence prediction (classification) objective during pretraining. hidden_states", "cross_attentions=encoder_outputs.cross_attentions, ) class SkimmingMaskModel(SkimmingMaskPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config", "> 0: num_attention_heads = attention_probs.shape[1] indices = [idx for idx", "dim=-1)).view(-1) > 0 else: active_loss = attention_mask.view(-1) == 1 active_logits", "gradient_checkpointing=config.gradient_checkpointing, ) ) self.skim_attention = SkimAttention(config) self.encoder = SkimformerEncoder(config) self.pooler", "head_mask, ) attention_output = self.output(self_output, hidden_states) return attention_output class SkimformerLayer(nn.Module):", "pretrained models. \"\"\" config_class = SkimmingMaskConfig base_model_prefix = \"skimmingmask\" _keys_to_ignore_on_load_missing", "= encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None", "nn.Linear(config.hidden_layout_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer(\"position_ids\",", "layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) return layer_output", "is converted to shape [num_hidden_layers x batch x num_heads x", "# Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores)", "num_predict, hidden_size)`): Sequence of hidden-states at the last layer of", "should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)", "initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_()", "self.contextualize_2d_positions = config.contextualize_2d_positions if config.contextualize_2d_positions: self.layout_encoder = BertEncoder( BertConfig( hidden_size=config.hidden_layout_size,", "= config.num_attention_heads self.attention_head_size = config.skim_attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size", "0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set", "hidden_states, attention_probs, head_mask=None, ): if len(self.pruned_heads) > 0: num_attention_heads =", "token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions =", "= config self.core_model_type = config.core_model_type self.embeddings = BertEmbeddings(config) if self.core_model_type", "shape `[batch_size, sequence_length]`, but got shape: {}\".format(input_shape) if bbox is", "x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_layout_states,", "= nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.degrade_2d_positions = config.degrade_2d_positions", "= self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape,", "config.num_attention_heads self.attention_head_size = config.skim_attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.query", "not None else None if getattr(self.config, \"gradient_checkpointing\", False): def create_custom_forward(module):", "be within 0-1000 range.\") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :,", "indicate we keep the head # attention_probs has shape bsz", "attention_probs, layer_head_mask, ) hidden_states = layer_output if output_hidden_states: all_hidden_states =", "upper_position_embeddings = self.y_position_projection(upper_position_embeddings) right_position_embeddings = self.x_position_projection(right_position_embeddings) lower_position_embeddings = self.y_position_projection(lower_position_embeddings) h_position_embeddings", "def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None,", "hidden_states, attention_probs, layer_head_mask, ) else: layer_output = layer_module( hidden_states, attention_probs,", "= [r\"position_ids\"] def _init_weights(self, module): \"\"\" Initialize the weights \"\"\"", "input_ids and inputs_embeds at the same time\") elif input_ids is", "add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.text_embeddings.word_embeddings def set_input_embeddings(self,", "config.core_model_type self.embeddings = BertEmbeddings(config) if self.core_model_type == \"bert\" else LayoutLMEmbeddings(config)", ":, 3] - bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :,", "returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of", "self.dropout(position_embeddings) return position_embeddings class Skimformer2DPositionEmbeddings(nn.Module): \"\"\"Construct the layout embeddings from", "loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) +", "config.num_labels) self.init_weights() def get_input_embeddings(self): return self.bert_with_skim_embed.embeddings.word_embeddings def set_input_embeddings(self, value): self.bert_with_skim_embed.embeddings.word_embeddings", "all_hidden_states, ] if v is not None ) return SkimformerEncoderOutput(", "), \"`input_ids` has to be of shape `[batch_size, sequence_length]`, but", "forward(self, input_shape, device, position_ids=None): seq_length = input_shape[1] if position_ids is", "config.hidden_layout_size) self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\") self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps)", "self.output = SkimformerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if", "self.init_weights() def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,", "for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_probs, head_mask=None,", "sequence_length, hidden_size)`. Hidden-states of the model at the output of", "skim_attention_mask = skim_attention_mask * attention_mask[:, None, :, :] skim_attention_mask =", "for the output of the embeddings and one for the", "= LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, bbox=None): if", "the bounding box coordinates.\"\"\" def __init__(self, config): super().__init__() self.x_position_embeddings =", "not None: input_shape = input_ids.size() device = input_ids.device else: input_shape", "= self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings", "if not return_dict: output = (logits,) + outputs[2:] return ((loss,)", "attentions=outputs.attentions, ) class BertWithSkimEmbedForTokenClassification(BertWithSkimEmbedPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self, config):", "self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss =", "attention_scores + attention_mask # Normalize the attention scores to probabilities.", "hidden_states, attention_probs, layer_head_mask, ) hidden_states = layer_output if output_hidden_states: all_hidden_states", "# cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)", "= config.num_labels self.skimformer = SkimformerModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier", "else False self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def", "try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :,", "upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])", "= layer_output if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if", ":, 0]) # project into same dimension as text embeddings", ") token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = ( words_embeddings + position_embeddings", "= self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class SkimformerEncoder(nn.Module):", "(logits,) + outputs[2:] return ((loss,) + output) if loss is", "SkimformerTextEmbeddings(nn.Module): \"\"\"Construct the text embeddings from word and token_type embeddings.\"\"\"", "actually dropping out entire tokens to attend to, which might", "self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.x_position_embeddings =", "and loading pretrained models. \"\"\" config_class = SkimmingMaskConfig base_model_prefix =", "module.bias is not None: module.bias.data.zero_() class SkimmingMaskPreTrainedModel(PreTrainedModel): \"\"\" An abstract", "seq_length = input_shape elif inputs_embeds is not None: input_shape =", "0: num_attention_heads = attention_probs.shape[1] indices = [idx for idx in", "= config.contextualize_2d_positions if self.contextualize_2d_positions: self.layout_encoder = BertEncoder( BertConfig( hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder,", "not return_dict: outputs = (sequence_output, pooled_output) if output_attentions: outputs =", "= nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.x_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.y_position_projection = nn.Linear(config.hidden_layout_size,", "import dataclass from typing import Optional, Tuple import math import", "from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from", "layer_output = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_probs, layer_head_mask, ) else: layer_output", "for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm):", "nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def get_input_embeddings(self): return self.bert_with_skim_embed.embeddings.word_embeddings def set_input_embeddings(self, value):", "context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous()", "device=device ) position_ids = position_ids.unsqueeze(0).expand(input_shape) position_embeddings = self.position_embeddings(position_ids) position_embeddings =", "None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)", "context_layer = context_layer.view(*new_context_layer_shape) return context_layer class SkimformerSelfOutput(nn.Module): def __init__(self, config):", "TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SkimmingMaskForTokenClassification(SkimmingMaskPreTrainedModel): _keys_to_ignore_on_load_unexpected =", "torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long,", "pos_embedding_output = self.two_dim_pos_embeddings( bbox=bbox, ) if self.contextualize_2d_positions: pos_embedding_output = self.layout_encoder(", "input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is", "if output_hidden_states else None for i, layer_module in enumerate(self.layer): if", "self.y_position_projection(lower_position_embeddings) h_position_embeddings = self.h_position_projection(h_position_embeddings) w_position_embeddings = self.w_position_projection(w_position_embeddings) two_dim_pos_embeddings = (", "x.permute(0, 2, 1, 3) def forward( self, hidden_layout_states, attention_mask=None, ):", "self.h_position_projection(h_position_embeddings) w_position_embeddings = self.w_position_projection(w_position_embeddings) two_dim_pos_embeddings = ( left_position_embeddings + upper_position_embeddings", "class SkimformerModelOutput(ModelOutput): \"\"\" Output type of :class:`~SkimformerModel`. Args: last_hidden_state (:obj:`torch.FloatTensor`", ") if output_hidden_states: outputs = outputs + encoder_outputs[1:] return outputs", "output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states =", "bbox is None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)", "> 0 else: active_loss = attention_mask.view(-1) == 1 active_logits =", "inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() device", "def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([SkimformerLayer(config)", "class to handle weights initialization and a simple interface for", "config self.core_model_type = config.core_model_type self.embeddings = BertEmbeddings(config) if self.core_model_type ==", "module.bias.data.zero_() class BertWithSkimEmbedPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights", "position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is", "= self.one_dim_pos_embeddings( input_shape=input_shape, device=device, position_ids=position_ids, ) else: pos_embedding_output = self.two_dim_pos_embeddings(", "= config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = SkimformerAttention(config) self.intermediate =", "self_output = self.self( hidden_states, attention_probs, head_mask, ) attention_output = self.output(self_output,", "input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) if self.use_1d_positions: pos_embedding_output = self.one_dim_pos_embeddings( input_shape=input_shape,", "attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is", "def __init__(self, config): super().__init__() self.max_position_embeddings = config.max_position_embeddings self.word_embeddings = nn.Embedding(config.vocab_size,", "config.attention_head_size self.dense = nn.Linear(all_head_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout", "# 1.0 in head_mask indicate we keep the head #", "if self.contextualize_2d_positions: spatial_pos_embedding_output = self.layout_encoder(hidden_states=spatial_pos_embedding_output)[0] skim_attention_output = self.skim_attention( spatial_pos_embedding_output, attention_mask=extended_attention_mask,", "= Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions if self.contextualize_2d_positions: self.layout_encoder = BertEncoder(", "torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask", "outputs return SkimformerModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, attentions=skim_attention_output if output_attentions else None,", "__init__(self, config): super().__init__(config) self.skimformer = SkimformerModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config)", "else self.config.use_return_dict outputs = self.skimformer( input_ids, bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids,", "initializer_range=config.initializer_range, layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.gradient_checkpointing, ) ) self.skim_attention = SkimAttention(config) self.encoder =", "is not None: input_shape = input_ids.size() device = input_ids.device else:", "torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape,", "out entire tokens to attend to, which might # seem", "self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:]", "1]``. \"\"\" return_dict = return_dict if return_dict is not None", "v in [ hidden_states, all_hidden_states, ] if v is not", "return ((loss,) + output) if loss is not None else", "= outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels", "bbox[:, :, 0]) # project into same dimension as text", "self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.value = prune_linear_layer(self.self.value,", "= attention_probs.shape[1] indices = [idx for idx in range(num_attention_heads) if", "tokens to attend to, which might # seem a bit", "is not None else self.config.use_return_dict outputs = self.bert_with_skim_embed( input_ids=input_ids, bbox=bbox,", "def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.use_1d_positions =", "torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask =", "\"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\" def", "config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention =", "(hidden_states,) if not return_dict: return tuple( v for v in", "= ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states", "config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)", "= head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2:", "position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict =", "nn.Dropout(config.hidden_dropout_prob) def forward(self, input_shape, device, position_ids=None): seq_length = input_shape[1] if", "(1.0 - extended_attention_mask) * -10000.0 if head_mask is not None:", "input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict,", "TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617", "= new_embeddings def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None,", "None: loss_fct = CrossEntropyLoss() if attention_mask is not None: if", "v is not None ) return SkimformerEncoderOutput( hidden_states=hidden_states, all_hidden_states=all_hidden_states, )", "corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then ``num_predict`` corresponds", "to_seq_length] # ourselves in which case we just need to", ":seq_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)", "``sequence_length``. pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state", "input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else:", "seq_length = input_shape else: raise ValueError(\"You have to specify either", "= self.transpose_for_scores(self.query(hidden_layout_states)) # Take the dot product between \"query\" and", "self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.h_position_embeddings =", ":, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:,", "position_embeddings = self.position_embeddings(position_ids) try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings", "range(num_attention_heads) if idx not in self.pruned_heads] attention_probs = torch.index_select(attention_probs, 1,", "from_seq_length, to_seq_length] # ourselves in which case we just need", "spatial_pos_embedding_output, attention_mask=extended_attention_mask, ) topk_idx = torch.topk(skim_attention_output, self.top_k, -1).indices skim_attention_mask =", "\"\"\" return_dict = return_dict if return_dict is not None else", "input_shape[1] if position_ids is None: position_ids = torch.arange( 0, seq_length,", "if self.degrade_2d_positions: try: x_center = (bbox[:, :, 0] + bbox[:,", "active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels", "output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not", "not None: loss_fct = CrossEntropyLoss() if attention_mask is not None:", "but is taken from the original Transformer paper. attention_probs =", ":, 3]) except IndexError as e: raise IndexError(\"The :obj:`bbox` coordinate", "config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BertWithSkimEmbedEmbeddings(config) self.encoder", "config.num_attention_heads * config.attention_head_size self.dense = nn.Linear(all_head_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size,", "prune_heads(self, heads): if len(heads) == 0: return heads, index =", "def set_input_embeddings(self, value): self.text_embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): \"\"\"", "the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.", "model at the output of each layer plus the initial", "self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_probs, head_mask=None, ):", "all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not", "position_ids is None: position_ids = torch.arange( 0, seq_length, dtype=torch.long, device=device", "self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) #", "attentions: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class SkimformerModel(SkimformerPreTrainedModel):", "pos_embedding_output = self.layout_encoder( hidden_states=pos_embedding_output, )[0] skim_attention_output = self.skim_attention( pos_embedding_output, attention_mask=extended_attention_mask,", "((masked_lm_loss,) + output) if masked_lm_loss is not None else output", "not return_dict: output = (logits,) + outputs[2:] return ((loss,) +", "else self.config.use_return_dict outputs = self.skimming_mask_model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids,", "then ``num_predict`` corresponds to ``sequence_length``. pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,", "__init__(self, config): super().__init__() self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_layout_size) self.position_embedding_type = getattr(config,", "value): self.bert_with_skim_embed.embeddings.word_embeddings = value def forward( self, input_ids=None, bbox=None, attention_mask=None,", "nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states =", "sequence_length)`, `optional`): Labels for computing the masked language modeling loss.", "get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings def", "logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class BertWithSkimEmbedForTokenClassification(BertWithSkimEmbedPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def", "a Tanh activation function. The Linear layer weights are trained", "config.num_labels self.bert_with_skim_embed = BertWithSkimEmbedModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier =", "[r\"pooler\"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert_with_skim_embed =", "for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states", "right_position_embeddings = self.x_position_projection(right_position_embeddings) lower_position_embeddings = self.y_position_projection(lower_position_embeddings) h_position_embeddings = self.h_position_projection(h_position_embeddings) w_position_embeddings", "else self.config.use_return_dict if input_ids is not None and inputs_embeds is", "all_hidden_states = () if output_hidden_states else None for i, layer_module", "extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 -", "inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output =", "self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.skim_attention_head_size self.all_head_size = self.num_attention_heads *", "= input_shape else: raise ValueError(\"You have to specify either input_ids", "eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, bbox=None): if self.degrade_2d_positions: try:", "= inputs_embeds.device device = input_ids.device if input_ids is not None", "loading pretrained models. \"\"\" config_class = BertWithSkimEmbedConfig base_model_prefix = \"bertwithskimembed\"", "if bbox is not None: bbox_shape = bbox.size() assert (", "torch.FloatTensor = None attentions: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] =", "heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) #", "output_hidden_states=False, return_dict=None, ): all_hidden_states = () if output_hidden_states else None", "self.top_k = config.top_k self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if", "def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.skim_attention_head_size", "attention_mask is not None: if attention_mask.dim() == 3: active_loss =", "(:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the", "language modeling loss. Indices should be in ``[-100, 0, ...,", "getattr(self.config, \"gradient_checkpointing\", False): def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return", "time\") elif input_ids is not None: input_shape = input_ids.size() elif", "self.position_embeddings(position_ids) try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:,", "self.config.num_hidden_layers) if self.core_model_type == \"bert\": text_embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids,", "= BertOutput(config) def forward( self, hidden_states, attention_probs, head_mask=None, ): attention_output", "return layer_output def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output =", "active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss", "forward( self, hidden_states, attention_probs, head_mask=None, ): value_layer = self.transpose_for_scores(self.value(hidden_states)) #", "position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) else: text_embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox,", "= self.two_dim_pos_embeddings( bbox=bbox, ) if self.contextualize_2d_positions: pos_embedding_output = self.layout_encoder( hidden_states=pos_embedding_output,", "logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else:", "be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens", "attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output", "head_mask[i] if head_mask is not None else None if getattr(self.config,", "next sentence prediction (classification) objective during pretraining. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`,", "-10000.0 encoder_outputs = self.encoder( text_embedding_output, skim_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict,", "input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1]", "if input_ids is not None else inputs_embeds.device if position_ids is", "if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) words_embeddings = inputs_embeds", "= () if output_hidden_states else None for i, layer_module in", "\"absolute\") self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self,", "SkimmingMaskConfig base_model_prefix = \"skimmingmask\" _keys_to_ignore_on_load_missing = [r\"position_ids\"] def _init_weights(self, module):", "idx in range(num_attention_heads) if idx not in self.pruned_heads] attention_probs =", "self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class SkimformerAttention(nn.Module):", "if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids", "def forward( self, hidden_states, attention_probs, head_mask=None, output_hidden_states=False, return_dict=None, ): all_hidden_states", "num_attention_heads=config.num_attention_heads_layout_encoder, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, max_position_embeddings=config.max_2d_position_embeddings, initializer_range=config.initializer_range, layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.gradient_checkpointing, )", "self.one_dim_pos_embeddings = Skimformer1DPositionEmbeddings(config) else: self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions", "output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits", "Indices should be in ``[0, ..., config.num_labels - 1]``. \"\"\"", "self.output = BertOutput(config) def forward( self, hidden_states, attention_probs, head_mask=None, ):", "need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor", "None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length =", "= inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids", "self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.x_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.y_position_projection =", "= self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class", "models. \"\"\" config_class = BertWithSkimEmbedConfig base_model_prefix = \"bertwithskimembed\" _keys_to_ignore_on_load_missing =", "inputs_embeds.size()[:-1] else: raise ValueError(\"You have to specify either input_ids or", "in ``[0, ..., config.vocab_size]`` \"\"\" return_dict = return_dict if return_dict", "outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None", "0 else: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1,", "head_mask = head_mask.to(dtype=next(self.parameters()).dtype) else: head_mask = [None] * self.config.num_hidden_layers embedding_output", "( len(bbox_shape) == 3 ), \"`bbox` has to be of", "from transformers.models.bert.modeling_bert import ( BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder,", "head_mask = [None] * self.config.num_hidden_layers embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox,", "None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) extended_attention_mask =", "encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class", "layer_module( hidden_states, attention_probs, layer_head_mask, ) hidden_states = layer_output if output_hidden_states:", "from transformers.models.layoutlm.modeling_layoutlm import LayoutLMEmbeddings from .configuration_skim import ( SkimformerConfig, BertWithSkimEmbedConfig,", "+ bbox[:, :, 2]) // 2 y_center = (bbox[:, :,", "self.seq_len_dim = 1 self.attention = SkimformerAttention(config) self.intermediate = BertIntermediate(config) self.output", "text_embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) else: text_embedding_output", "+ output) if masked_lm_loss is not None else output return", "return_dict=None, ): r\"\"\" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):", "LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward(", "= LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1))) def", "add_pooling_layer=True): super().__init__(config) self.config = config self.use_1d_positions = config.use_1d_positions self.text_embeddings =", "__init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BertWithSkimEmbedEmbeddings(config)", "type of :class:`~SkimformerModel`. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict,", "def __init__(self, config): super().__init__(config) self.skimformer = SkimformerModel(config, add_pooling_layer=False) self.cls =", "shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is", "= nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob)", "import ( ModelOutput, ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward,", "embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class Skimformer1DPositionEmbeddings(nn.Module):", "An abstract class to handle weights initialization and a simple", "__init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings =", "self.max_position_embeddings = config.max_position_embeddings self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings =", "= [None] * self.config.num_hidden_layers embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids,", "(bbox[:, :, 0] + bbox[:, :, 2]) // 2 y_center", "token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings embeddings =", "shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification", "context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer", "nn.Dropout(config.hidden_dropout_prob) def forward(self, bbox=None): if self.degrade_2d_positions: try: x_center = (bbox[:,", "MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SkimformerForTokenClassification(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected =", "module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear)", "labels is not None: loss_fct = CrossEntropyLoss() if attention_mask is", "(skim_attention_output, ) if output_hidden_states: outputs = outputs + encoder_outputs[1:] return", "num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) text_embedding_output", "not None else inputs_embeds.device if token_type_ids is None: token_type_ids =", "head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores", "forward( self, hidden_states, attention_probs, head_mask=None, output_hidden_states=False, return_dict=None, ): all_hidden_states =", "labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return", "self, hidden_states, attention_probs, head_mask=None, ): if len(self.pruned_heads) > 0: num_attention_heads", "``config.output_attentions=True``): :obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights", ") from transformers.models.bert.modeling_bert import ( BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler,", "num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if", "hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads -", "= nn.Linear(config.hidden_layout_size, self.all_head_size) self.key = nn.Linear(config.hidden_layout_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob)", "head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim()", "self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None,", "self.config = config self.core_model_type = config.core_model_type self.embeddings = BertEmbeddings(config) if", "if labels is not None: loss_fct = CrossEntropyLoss() if attention_mask", "return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SkimformerForTokenClassification(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected", ":obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after", "input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape else: raise ValueError(\"You", "outputs = outputs + encoder_outputs[1:] return outputs return SkimformerModelOutput( last_hidden_state=sequence_output,", "BertEncoder( BertConfig( hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder, num_attention_heads=config.num_attention_heads_layout_encoder, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, max_position_embeddings=config.max_2d_position_embeddings,", "= head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.to(dtype=next(self.parameters()).dtype) else: head_mask = [None] *", "all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(", "= logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels)", "shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention", "def forward( self, input_ids=None, bbox=None, token_type_ids=None, position_ids=None, inputs_embeds=None, ): if", "layer_head_mask, ) hidden_states = layer_output if output_hidden_states: all_hidden_states = all_hidden_states", "= nn.Linear(config.hidden_layout_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, \"position_embedding_type\",", "input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] #", "2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e:", "def __init__(self, config): super().__init__() self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_layout_size) self.position_embedding_type =", "= prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store", "= outputs + encoder_outputs[1:] return outputs return SkimformerModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output,", "attentions=skim_attention_output if output_attentions else None, hidden_states=encoder_outputs.all_hidden_states, ) class BertWithSkimEmbedModel(BertWithSkimEmbedPreTrainedModel): def", "= torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape", "loss_fct = CrossEntropyLoss() # Only keep active parts of the", "None: module.bias.data.zero_() @dataclass class SkimformerModelOutput(ModelOutput): \"\"\" Output type of :class:`~SkimformerModel`.", "config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1,", "None: inputs_embeds = self.word_embeddings(input_ids) words_embeddings = inputs_embeds position_embeddings = self.position_embeddings(position_ids)", "a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves", "if self.core_model_type == \"bert\": text_embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids,", "math import torch from torch import nn from torch.nn import", "pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SkimmingMaskModel(SkimmingMaskPreTrainedModel): def __init__(self, config,", "+ [4]), dtype=torch.long, device=device) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)", "heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(", "# Update hyper params and store pruned heads self.self.num_attention_heads =", "BertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.text_embeddings.word_embeddings", "head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r\"\"\" labels (:obj:`torch.LongTensor`", "BertWithSkimEmbedConfig base_model_prefix = \"bertwithskimembed\" _keys_to_ignore_on_load_missing = [r\"position_ids\"] def _init_weights(self, module):", "key_layer = self.transpose_for_scores(self.key(hidden_layout_states)) query_layer = self.transpose_for_scores(self.query(hidden_layout_states)) # Take the dot", "() if output_hidden_states else None for i, layer_module in enumerate(self.layer):", "device = inputs_embeds.device device = input_ids.device if input_ids is not", "= self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError(\"The", "new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return", "[num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask", "config): super().__init__(config) self.num_labels = config.num_labels self.bert_with_skim_embed = BertWithSkimEmbedModel(config, add_pooling_layer=False) self.dropout", "sentence prediction (classification) objective during pretraining. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned", ")[0] skim_attention_output = self.skim_attention( pos_embedding_output, attention_mask=extended_attention_mask, ) encoder_outputs = self.encoder(", "__init__(self, config): super().__init__() self.self = SkimformerSelfAttention(config) self.output = SkimformerSelfOutput(config) self.pruned_heads", "self.degrade_2d_positions = config.degrade_2d_positions if hasattr(config, \"degrade_2d_positions\") else False self.LayerNorm =", "outputs + encoder_outputs[1:] return outputs return SkimformerModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, attentions=skim_attention_output", "= nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings,", "None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape", "left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])", "Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class SkimformerModel(SkimformerPreTrainedModel): def", "(masked), the loss is only computed for the tokens with", "self.config = config self.embeddings = BertWithSkimEmbedEmbeddings(config) self.encoder = BertEncoder(config) self.pooler", "device, position_ids=None): seq_length = input_shape[1] if position_ids is None: position_ids", "-1))) def forward( self, input_ids=None, bbox=None, token_type_ids=None, position_ids=None, inputs_embeds=None, ):", "if return_dict is not None else self.config.use_return_dict if input_ids is", "token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if bbox", "taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) #", "config.hidden_layout_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.degrade_2d_positions = config.degrade_2d_positions if hasattr(config,", "(classification token) further processed by a Linear layer and a", "token classification loss. Indices should be in ``[0, ..., config.num_labels", "self.top_k, -1).indices skim_attention_mask = torch.zeros(skim_attention_output.shape, device=device) skim_attention_mask = skim_attention_mask.scatter(-1, topk_idx,", "if input_ids is not None else inputs_embeds.device if attention_mask is", "to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then ``num_predict`` corresponds to", "SkimformerModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, attentions=skim_attention_output if output_attentions else None, hidden_states=encoder_outputs.all_hidden_states, )", "nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size)", "position and token_type embeddings.\"\"\" def __init__(self, config): super().__init__() self.word_embeddings =", "None else inputs_embeds.device if position_ids is None: position_ids = self.position_ids[:,", "only: Softmax(QK^T/sqrt(d)) return attention_probs class SkimformerSelfAttention(nn.Module): def __init__(self, config): super().__init__()", "See base class PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items():", "self.text_embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of", "is not None else self.config.use_return_dict if input_ids is not None", "layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, input_ids=None, bbox=None,", "if attention_mask is not None: if attention_mask.dim() == 3: active_loss", "self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\") def transpose_for_scores(self, x): new_x_shape =", "self.layout_encoder = BertEncoder( BertConfig( hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder, num_attention_heads=config.num_attention_heads_layout_encoder, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, hidden_dropout_prob=config.hidden_dropout_prob,", "hidden_states, attention_probs, head_mask=None, ): value_layer = self.transpose_for_scores(self.value(hidden_states)) # Mask heads", "PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.modeling_outputs import ( BaseModelOutputWithPoolingAndCrossAttentions,", "return_dict: outputs = (sequence_output, pooled_output) if output_attentions: outputs = outputs", "not None else self.config.use_return_dict outputs = self.skimformer( input_ids, bbox, attention_mask=attention_mask,", "bbox[:, :, 3]) // 2 x_center_position_embeddings = self.x_position_embeddings(x_center) y_center_position_embeddings =", "nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None): if input_ids is not", "__init__(self, config): super().__init__() self.max_position_embeddings = config.max_position_embeddings self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size,", "= self.layout_encoder( hidden_states=pos_embedding_output, )[0] skim_attention_output = self.skim_attention( pos_embedding_output, attention_mask=extended_attention_mask, )", "for the output of each layer) of shape :obj:`(batch_size, sequence_length,", "is not None: attention_probs = attention_probs * head_mask # Softmax(QK^T/sqrt(d))", "): if input_ids is not None: input_shape = input_ids.size() else:", "= self.dropout(attention_probs) # return the attention probabilities only: Softmax(QK^T/sqrt(d)) return", "return module(*inputs) return custom_forward layer_output = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_probs,", "indices) self_output = self.self( hidden_states, attention_probs, head_mask, ) attention_output =", "Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads", "uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif", "in which case we just need to make it broadcastable", "not None else inputs_embeds.device if attention_mask is None: attention_mask =", "= SkimformerConfig base_model_prefix = \"skimformer\" def _init_weights(self, module): \"\"\" Initialize", "e: raise IndexError(\"The :obj:`bbox` coordinate values should be within 0-1000", "find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.modeling_outputs import ( BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput,", "inputs_embeds\") assert ( len(input_shape) == 2 ), \"`input_ids` has to", "to ``sequence_length``. pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`): Last layer", "= extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 if", "inputs_embeds = self.word_embeddings(input_ids) words_embeddings = inputs_embeds position_embeddings = self.position_embeddings(position_ids) try:", "# and head_mask is converted to shape [num_hidden_layers x batch", "= self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) # project", "inputs_embeds=None, ): if input_ids is not None: input_shape = input_ids.size()", "of the model at the output of each layer plus", "_init_weights(self, module): \"\"\" Initialize the weights \"\"\" if isinstance(module, (nn.Linear,", "class SkimmingMaskPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights initialization", "forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None,", "import Function from transformers.file_utils import ( ModelOutput, ) from transformers.modeling_utils", "We can provide a self-attention mask of dimensions [batch_size, from_seq_length,", "output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output)", "= self.pooler(sequence_output) if self.pooler is not None else None if", "# project into same dimension as text embeddings left_position_embeddings =", "self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None,", "(sequence_output, pooled_output) if output_attentions: outputs = outputs + (skim_attention_output, )", "custom_forward layer_output = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_probs, layer_head_mask, ) else:", "`optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple", "if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids)", "None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return", "= input_ids.device if input_ids is not None else inputs_embeds.device if", "pretrained models. \"\"\" config_class = BertWithSkimEmbedConfig base_model_prefix = \"bertwithskimembed\" _keys_to_ignore_on_load_missing", "head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.to(dtype=next(self.parameters()).dtype) else:", "can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]", "when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor`", "super().__init__() self.max_position_embeddings = config.max_position_embeddings self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings", "index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune", "elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds", "= self.encoder( embedding_output, extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output", "raise IndexError(\"The :obj:`bbox`coordinate values should be within 0-1000 range.\") from", "token_type embeddings.\"\"\" def __init__(self, config): super().__init__() self.max_position_embeddings = config.max_position_embeddings self.word_embeddings", "if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.text_embeddings.word_embeddings def", "loss_fct = CrossEntropyLoss() if attention_mask is not None: active_loss =", "layer hidden-state of the first token of the sequence (classification", "if v is not None ) return SkimformerEncoderOutput( hidden_states=hidden_states, all_hidden_states=all_hidden_states,", "= self.token_type_embeddings(token_type_ids) embeddings = ( words_embeddings + position_embeddings + two_dim_pos_embeddings", "token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds", "create_custom_forward(layer_module), hidden_states, attention_probs, layer_head_mask, ) else: layer_output = layer_module( hidden_states,", "self.bert_with_skim_embed.embeddings.word_embeddings = value def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None,", "input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids", "if self.core_model_type == \"bert\" else LayoutLMEmbeddings(config) self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions", "head_mask=None, output_hidden_states=False, return_dict=None, ): all_hidden_states = () if output_hidden_states else", "two_dim_pos_embeddings + token_type_embeddings ) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings)", "self.y_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.h_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.w_position_projection =", "(nn.Linear, nn.Embedding)): # Slightly different from the TF version which", "SkimformerEncoderOutput = namedtuple( \"SkimformerEncoderOutput\", [\"hidden_states\", \"all_hidden_states\"], ) class SkimformerTextEmbeddings(nn.Module): \"\"\"Construct", "= nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None): if input_ids is", "3) def forward( self, hidden_states, attention_probs, head_mask=None, ): value_layer =", "self.self( hidden_states, attention_probs, head_mask, ) attention_output = self.output(self_output, hidden_states) return", "shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token", "outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None", "self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings", "self.LayerNorm(hidden_states + input_tensor) return hidden_states class SkimformerAttention(nn.Module): def __init__(self, config):", "self.skimming_mask_model.embeddings.word_embeddings def set_input_embeddings(self, value): self.skimming_mask_model.embeddings.word_embeddings = value def forward( self,", "seq_length)), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long,", ") return layer_output def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output", "return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output,", "= (sequence_output, pooled_output) if output_attentions: outputs = outputs + (skim_attention_output,", "config_class = SkimformerConfig base_model_prefix = \"skimformer\" def _init_weights(self, module): \"\"\"", "config): super().__init__(config) self.num_labels = config.num_labels self.skimformer = SkimformerModel(config, add_pooling_layer=False) self.dropout", "pos_embedding_output, attention_mask=extended_attention_mask, ) encoder_outputs = self.encoder( text_embedding_output, skim_attention_output, head_mask=head_mask, output_hidden_states=output_hidden_states,", "class SkimformerModel(SkimformerPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config", "values should be within 0-1000 range.\") from e embeddings =", "else: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss]", "None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size,", "bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None,", "= SkimAttention(config) self.top_k = config.top_k self.encoder = BertEncoder(config) self.pooler =", "inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError(\"You", "raise ValueError(\"You have to specify either input_ids or inputs_embeds\") device", "Skimformer2DPositionEmbeddings(nn.Module): \"\"\"Construct the layout embeddings from the bounding box coordinates.\"\"\"", "def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings =", "None ) return SkimformerEncoderOutput( hidden_states=hidden_states, all_hidden_states=all_hidden_states, ) class SkimformerPreTrainedModel(PreTrainedModel): \"\"\"", "input_ids or inputs_embeds\") assert ( len(input_shape) == 2 ), \"`input_ids`", "sequential position embeddings.\"\"\" def __init__(self, config): super().__init__() self.position_embeddings = nn.Embedding(config.max_position_embeddings,", "embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed", "= config self.embeddings = BertWithSkimEmbedEmbeddings(config) self.encoder = BertEncoder(config) self.pooler =", "intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class", "= head_mask.to(dtype=next(self.parameters()).dtype) else: head_mask = [None] * self.config.num_hidden_layers embedding_output =", "loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not", "_keys_to_ignore_on_load_unexpected = [r\"pooler\"] _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"] def __init__(self, config):", "scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually", "self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.w_position_embeddings =", "+ encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, )", "= config.top_k self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer", "1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :,", "self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError", "self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def", "output of each layer plus the initial embedding outputs. attentions", "return_dict=None, ): output_attentions = output_attentions if output_attentions is not None", "cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if", "position embeddings.\"\"\" def __init__(self, config): super().__init__() self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_layout_size)", "IndexError(\"The :obj:`bbox`coordinate values should be within 0-1000 range.\") from e", "position_embeddings class Skimformer2DPositionEmbeddings(nn.Module): \"\"\"Construct the layout embeddings from the bounding", "self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.degrade_2d_positions =", "context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] +", "the output of each layer plus the initial embedding outputs.", "of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token", "add_pooling_layer=True): super().__init__(config) self.config = config self.core_model_type = config.core_model_type self.embeddings =", "= config.contextualize_2d_positions if config.contextualize_2d_positions: self.layout_encoder = BertEncoder( BertConfig( hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder,", ":, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :,", "layer and a Tanh activation function. The Linear layer weights", "value def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,", "labels is not None: loss_fct = CrossEntropyLoss() # -100 index", "is not None: loss_fct = CrossEntropyLoss() # -100 index =", "= labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1,", "upper_position_embeddings + right_position_embeddings + lower_position_embeddings + h_position_embeddings + w_position_embeddings )", "not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError(\"You have to", ") else: layer_output = layer_module( hidden_states, attention_probs, layer_head_mask, ) hidden_states", "converted to shape [num_hidden_layers x batch x num_heads x seq_length", "# Mask heads if we want to if head_mask is", "self.init_weights() def get_input_embeddings(self): return self.skimming_mask_model.embeddings.word_embeddings def set_input_embeddings(self, value): self.skimming_mask_model.embeddings.word_embeddings =", "None: # Apply the attention mask is (precomputed for all", "to prune in this layer} See base class PreTrainedModel \"\"\"", "is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is", "after the attention softmax, used to compute the weighted average", "Linear layer and a Tanh activation function. The Linear layer", "= input_ids.size() device = input_ids.device else: input_shape = inputs_embeds.size()[:-1] device", "in head_mask indicate we keep the head # attention_probs has", "product between \"query\" and \"key\" to get the raw attention", "If ``target_mapping`` is ``None``, then ``num_predict`` corresponds to ``sequence_length``. pooler_output", "== 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss,", ") class SkimformerForTokenClassification(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self, config): super().__init__(config)", "shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language", "def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1", "Hidden-states of the model at the output of each layer", "= head_mask[i] if head_mask is not None else None if", "super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.attention_head_size self.all_head_size = self.num_attention_heads", "= context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class", "layers in BertModel forward() function) attention_scores = attention_scores + attention_mask", ":obj:`bbox`coordinate values should be within 0-1000 range.\") from e h_position_embeddings", "index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if", "= nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm =", "= self.skim_attention( pos_embedding_output, attention_mask=extended_attention_mask, ) encoder_outputs = self.encoder( text_embedding_output, skim_attention_output,", "attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output", "case we just need to make it broadcastable to all", "def custom_forward(*inputs): return module(*inputs) return custom_forward layer_output = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module),", "if output_attentions is not None else self.config.output_attentions output_hidden_states = (", "* attention_mask[:, None, :, :] skim_attention_mask = (1.0 - skim_attention_mask)", "else inputs_embeds.device if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long,", "if return_dict is not None else self.config.use_return_dict outputs = self.bert_with_skim_embed(", "Optional, Tuple import math import torch from torch import nn", "BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None self.init_weights() def", "self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions if self.contextualize_2d_positions: self.layout_encoder =", ") self.skim_attention = SkimAttention(config) self.encoder = SkimformerEncoder(config) self.pooler = BertPooler(config)", "def forward(self, bbox=None): if self.degrade_2d_positions: try: x_center = (bbox[:, :,", "head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and", "__init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert_with_skim_embed = BertWithSkimEmbedModel(config, add_pooling_layer=False)", "``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices", "to attend to, which might # seem a bit unusual,", "mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which", "nn.Linear(config.hidden_layout_size, self.all_head_size) self.key = nn.Linear(config.hidden_layout_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type", "SkimformerForTokenClassification(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self, config): super().__init__(config) self.num_labels =", "= (bbox[:, :, 0] + bbox[:, :, 2]) // 2", "= nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states", "but got shape: {}\".format(input_shape) if bbox is not None: bbox_shape", "inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions", "weights \"\"\" if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from", "N # input head_mask has shape [num_heads] or [num_hidden_layers x", "heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, input_ids=None, bbox=None, attention_mask=None,", "= Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions if config.contextualize_2d_positions: self.layout_encoder = BertEncoder(", "= nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None,", "if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers,", "head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask", "self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:,", "self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) return layer_output def feed_forward_chunk(self, attention_output): intermediate_output", "super().__init__(config) self.skimformer = SkimformerModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.init_weights() def", "Softmax(QK^T/sqrt(d)) . V context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0,", "= nn.ModuleList([SkimformerLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states,", "= skim_attention_mask.scatter(-1, topk_idx, 1) skim_attention_mask = skim_attention_mask * attention_mask[:, None,", "in [ hidden_states, all_hidden_states, ] if v is not None", "layer_output def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output,", ") class SkimformerPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights", "None: module.bias.data.zero_() class SkimmingMaskPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle", "trained from the next sentence prediction (classification) objective during pretraining.", "input_ids.size() device = input_ids.device else: input_shape = inputs_embeds.size()[:-1] device =", "is not None: bbox_shape = bbox.size() assert ( len(bbox_shape) ==", "Tanh activation function. The Linear layer weights are trained from", "return_dict if return_dict is not None else self.config.use_return_dict if input_ids", "index, dim=1) # Update hyper params and store pruned heads", "= self.skim_attention( spatial_pos_embedding_output, attention_mask=extended_attention_mask, ) topk_idx = torch.topk(skim_attention_output, self.top_k, -1).indices", "if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids", "embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertWithSkimEmbedEmbeddings(nn.Module):", "seq_length = input_shape[1] device = input_ids.device if input_ids is not", "BertWithSkimEmbedEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else", "transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.modeling_outputs", "torch.arange(config.max_position_embeddings).expand((1, -1))) def forward( self, input_ids=None, bbox=None, token_type_ids=None, position_ids=None, inputs_embeds=None,", "x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) text_embedding_output = self.text_embeddings( input_ids=input_ids,", "seq_length = input_shape[1] if position_ids is None: position_ids = torch.arange(", "layer_head_mask = head_mask[i] if head_mask is not None else None", "the original Transformer paper. attention_probs = self.dropout(attention_probs) # return the", "words_embeddings + position_embeddings + two_dim_pos_embeddings + token_type_embeddings ) embeddings =", "text embeddings from word and token_type embeddings.\"\"\" def __init__(self, config):", "x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states,", "project into same dimension as text embeddings left_position_embeddings = self.x_position_projection(left_position_embeddings)", "Optional[Tuple[torch.FloatTensor]] = None class SkimformerModel(SkimformerPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config)", "hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SkimmingMaskForTokenClassification(SkimmingMaskPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self,", "initialization and a simple interface for downloading and loading pretrained", "inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device)", "torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else:", "is None: inputs_embeds = self.word_embeddings(input_ids) words_embeddings = inputs_embeds position_embeddings =", "is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if bbox is", "inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states,", "lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise", "corresponds to ``sequence_length``. pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`): Last", "self.skim_attention = SkimAttention(config) self.encoder = SkimformerEncoder(config) self.pooler = BertPooler(config) if", "is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape", "class BertWithSkimEmbedForTokenClassification(BertWithSkimEmbedPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self, config): super().__init__(config) self.num_labels", "the head # attention_probs has shape bsz x n_heads x", "loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SkimmingMaskForTokenClassification(SkimmingMaskPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"]", "is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for", "topk_idx, 1) skim_attention_mask = skim_attention_mask * attention_mask[:, None, :, :]", "same dimension as text embeddings left_position_embeddings = self.x_position_projection(left_position_embeddings) upper_position_embeddings =", "( words_embeddings + position_embeddings + two_dim_pos_embeddings + token_type_embeddings ) embeddings", "self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states)", "encoder_outputs = self.encoder( text_embedding_output, skim_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )", "= self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings", "and a simple interface for downloading and loading pretrained models.", "within 0-1000 range.\") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3]", "self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) else: text_embedding_output = self.embeddings(", "= return_dict if return_dict is not None else self.config.use_return_dict outputs", "of the embeddings and one for the output of each", "SkimmingMaskPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights initialization and", "output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if", "if self.use_1d_positions: pos_embedding_output = self.one_dim_pos_embeddings( input_shape=input_shape, device=device, position_ids=position_ids, ) else:", "= all_hidden_states + (hidden_states,) if not return_dict: return tuple( v", "raise IndexError(\"The :obj:`bbox` coordinate values should be within 0-1000 range.\")", "= \"bertwithskimembed\" _keys_to_ignore_on_load_missing = [r\"position_ids\"] def _init_weights(self, module): \"\"\" Initialize", "active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits,", "+ (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class SkimformerSelfOutput(nn.Module): def", "None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )", "attention_probs * head_mask # Softmax(QK^T/sqrt(d)) . V context_layer = torch.matmul(attention_probs,", "heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.value", "else None self.init_weights() def get_input_embeddings(self): return self.text_embeddings.word_embeddings def set_input_embeddings(self, value):", "value_layer = self.transpose_for_scores(self.value(hidden_states)) # Mask heads if we want to", "Linear layer weights are trained from the next sentence prediction", ") return SkimformerEncoderOutput( hidden_states=hidden_states, all_hidden_states=all_hidden_states, ) class SkimformerPreTrainedModel(PreTrainedModel): \"\"\" An", "= BertWithSkimEmbedModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels)", "None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None:", "set_input_embeddings(self, value): self.bert_with_skim_embed.embeddings.word_embeddings = value def forward( self, input_ids=None, bbox=None,", "input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device device = input_ids.device if", "= config.num_labels self.bert_with_skim_embed = BertWithSkimEmbedModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier", "-1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)", "import ( BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead, )", "SkimformerModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder", "return context_layer class SkimformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() all_head_size =", "probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out", "( BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput, ) from transformers.models.bert.modeling_bert import ( BertConfig,", "else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not", "self.encoder( embedding_output, extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output =", "just need to make it broadcastable to all heads. extended_attention_mask:", "self.attention( hidden_states, attention_probs, head_mask, ) layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward,", "): return_dict = return_dict if return_dict is not None else", "token_type_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size()", "layer weights are trained from the next sentence prediction (classification)", "x_center_position_embeddings + y_center_position_embeddings else: try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])", "# -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size),", "= find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear", "self.output(self_output, hidden_states) return attention_output class SkimformerLayer(nn.Module): def __init__(self, config): super().__init__()", "attention_output = self.output(self_output, hidden_states) return attention_output class SkimformerLayer(nn.Module): def __init__(self,", "forward( self, hidden_states, attention_probs, head_mask=None, ): attention_output = self.attention( hidden_states,", "at the same time\") elif input_ids is not None: input_shape", "= self.two_dim_pos_embeddings(bbox=bbox) if self.contextualize_2d_positions: spatial_pos_embedding_output = self.layout_encoder(hidden_states=spatial_pos_embedding_output)[0] skim_attention_output = self.skim_attention(", "y_center_position_embeddings else: try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings =", "is not None: # Apply the attention mask is (precomputed", "= self.cls(sequence_output) masked_lm_loss = None if labels is not None:", "self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of", "= self.h_position_projection(h_position_embeddings) w_position_embeddings = self.w_position_projection(w_position_embeddings) two_dim_pos_embeddings = ( left_position_embeddings +", "= Skimformer1DPositionEmbeddings(config) else: self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions if", "attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SkimmingMaskModel(SkimmingMaskPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config)", "of the loss if attention_mask is not None: active_loss =", "sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss", "not None else self.config.use_return_dict outputs = self.bert_with_skim_embed( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask,", "output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states", "active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits,", "inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r\"\"\" labels (:obj:`torch.LongTensor` of", "# attention_probs has shape bsz x n_heads x N x", "position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r\"\"\" labels", "lower_position_embeddings + h_position_embeddings + w_position_embeddings ) embeddings = self.LayerNorm(embeddings) embeddings", "-1, -1, -1, -1) elif head_mask.dim() == 2: head_mask =", "nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size)", "\"\"\" config_class = SkimmingMaskConfig base_model_prefix = \"skimmingmask\" _keys_to_ignore_on_load_missing = [r\"position_ids\"]", "== 3: active_loss = (torch.sum(attention_mask, dim=-1)).view(-1) > 0 else: active_loss", "and module.bias is not None: module.bias.data.zero_() class SkimmingMaskPreTrainedModel(PreTrainedModel): \"\"\" An", "0-1000 range.\") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] -", "hidden_states, attention_probs, head_mask=None, ): attention_output = self.attention( hidden_states, attention_probs, head_mask,", "pooler_output=pooled_output, attentions=skim_attention_output if output_attentions else None, hidden_states=encoder_outputs.all_hidden_states, ) class BertWithSkimEmbedModel(BertWithSkimEmbedPreTrainedModel):", "from .configuration_skim import ( SkimformerConfig, BertWithSkimEmbedConfig, SkimmingMaskConfig, ) logger =", "Skimformer1DPositionEmbeddings(config) else: self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions if self.contextualize_2d_positions:", "input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if", "(one for the output of the embeddings and one for", "return tuple( v for v in [ hidden_states, all_hidden_states, ]", "= BertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return", "heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None,", "which case we just need to make it broadcastable to", "= config.core_model_type self.embeddings = BertEmbeddings(config) if self.core_model_type == \"bert\" else", "__init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.use_1d_positions = config.use_1d_positions", "None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class SkimformerModel(SkimformerPreTrainedModel): def __init__(self, config,", "inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is", "The Linear layer weights are trained from the next sentence", "pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SkimformerForMaskedLM(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"]", "nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def get_input_embeddings(self): return self.skimming_mask_model.embeddings.word_embeddings", "return x.permute(0, 2, 1, 3) def forward( self, hidden_layout_states, attention_mask=None,", "= nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.x_position_projection = nn.Linear(config.hidden_layout_size,", "= self.classifier(sequence_output) loss = None if labels is not None:", "None if labels is not None: loss_fct = CrossEntropyLoss() if", "coordinate values should be within 0-1000 range.\") from e embeddings", ":obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of", "shape: {}\".format(input_shape) if bbox is not None: bbox_shape = bbox.size()", "1, 3) def forward( self, hidden_states, attention_probs, head_mask=None, ): value_layer", "loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss", "else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not", "BertWithSkimEmbedConfig, SkimmingMaskConfig, ) logger = logging.getLogger(__name__) SkimformerEncoderOutput = namedtuple( \"SkimformerEncoderOutput\",", "None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is", "nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def get_input_embeddings(self): return self.skimming_mask_model.embeddings.word_embeddings def set_input_embeddings(self, value):", "output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if", "dataclasses import dataclass from typing import Optional, Tuple import math", "word and token_type embeddings.\"\"\" def __init__(self, config): super().__init__() self.max_position_embeddings =", "dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings", "class SkimmingMaskModel(SkimmingMaskPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config", "raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the", "be of shape `[batch_size, sequence_length, 4]`, but got shape: {}\".format(bbox_shape)", "= self.LayerNorm(hidden_states + input_tensor) return hidden_states class SkimformerAttention(nn.Module): def __init__(self,", "attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: token_type_ids", "attentions=outputs.attentions, ) class SkimformerForTokenClassification(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self, config):", "= (1.0 - extended_attention_mask) * -10000.0 if head_mask is not", "import logging from dataclasses import dataclass from typing import Optional,", "Output type of :class:`~SkimformerModel`. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,", "Softmax(QK^T/sqrt(d)) return attention_probs class SkimformerSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads", "- 1]``. \"\"\" return_dict = return_dict if return_dict is not", "`optional`): Labels for computing the masked language modeling loss. Indices", "= config self.use_1d_positions = config.use_1d_positions self.text_embeddings = SkimformerTextEmbeddings(config) if self.use_1d_positions:", "input_shape, device) # Prepare head mask if needed # 1.0", "head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if self.core_model_type == \"bert\": text_embedding_output =", "self.config = config self.layer = nn.ModuleList([SkimformerLayer(config) for _ in range(config.num_hidden_layers)])", "forward( self, hidden_layout_states, attention_mask=None, ): key_layer = self.transpose_for_scores(self.key(hidden_layout_states)) query_layer =", "and one for the output of each layer) of shape", "layer_output if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not", ":obj:`(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last layer", "None: attention_probs = attention_probs * head_mask # Softmax(QK^T/sqrt(d)) . V", "attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where(", "def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward layer_output =", "to specify either input_ids or inputs_embeds\") assert ( len(input_shape) ==", "layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned", "are ignored (masked), the loss is only computed for the", "self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm", "attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids", "+ lower_position_embeddings + h_position_embeddings + w_position_embeddings ) embeddings = self.LayerNorm(embeddings)", "= nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def get_input_embeddings(self): return self.skimming_mask_model.embeddings.word_embeddings def set_input_embeddings(self,", "+ two_dim_pos_embeddings + token_type_embeddings ) embeddings = self.LayerNorm(embeddings) embeddings =", "elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length", "(self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class SkimformerSelfOutput(nn.Module): def __init__(self,", "self.skimformer = SkimformerModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self):", "input_shape = input_ids.size() batch_size, seq_length = input_shape elif inputs_embeds is", "coordinates.\"\"\" def __init__(self, config): super().__init__() self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings", "= head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif", "config.vocab_size]`` \"\"\" return_dict = return_dict if return_dict is not None", "def get_input_embeddings(self): return self.bert_with_skim_embed.embeddings.word_embeddings def set_input_embeddings(self, value): self.bert_with_skim_embed.embeddings.word_embeddings = value", "self.output(intermediate_output, attention_output) return layer_output class SkimformerEncoder(nn.Module): def __init__(self, config): super().__init__()", "def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states)", "PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward(", "x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)", "torch.arange( 0, seq_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).expand(input_shape) position_embeddings", "None self.init_weights() def get_input_embeddings(self): return self.text_embeddings.word_embeddings def set_input_embeddings(self, value): self.text_embeddings.word_embeddings", "return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self,", "attention_mask[:, None, :, :] skim_attention_mask = (1.0 - skim_attention_mask) *", "apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) return layer_output def feed_forward_chunk(self,", "each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the", "get_input_embeddings(self): return self.skimming_mask_model.embeddings.word_embeddings def set_input_embeddings(self, value): self.skimming_mask_model.embeddings.word_embeddings = value def", "\"absolute\") def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)", "first token of the sequence (classification token) further processed by", "+ right_position_embeddings + lower_position_embeddings + h_position_embeddings + w_position_embeddings ) token_type_embeddings", "a simple interface for downloading and loading pretrained models. \"\"\"", "def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x", "hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states", "config.num_labels) self.init_weights() def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None,", "of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`,", "Sequence of hidden-states at the last layer of the model.", "= self.attention( hidden_states, attention_probs, head_mask, ) layer_output = apply_chunking_to_forward( self.feed_forward_chunk,", "= loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,)", "active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict:", "self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward( self, input_ids=None, bbox=None, token_type_ids=None, position_ids=None,", "input_ids or inputs_embeds\") device = input_ids.device if input_ids is not", ") encoder_outputs = self.encoder( embedding_output, extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict,", "= position_ids.unsqueeze(0).expand(input_shape) position_embeddings = self.position_embeddings(position_ids) position_embeddings = self.LayerNorm(position_embeddings) position_embeddings =", "None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings", "apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.modeling_outputs import ( BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput,", "is not None: module.bias.data.zero_() class SkimmingMaskPreTrainedModel(PreTrainedModel): \"\"\" An abstract class", "def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.core_model_type =", "= nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def get_input_embeddings(self): return", "linear layers self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index,", "config.hidden_size) self.w_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout", "hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when", "active parts of the loss if attention_mask is not None:", "get_input_embeddings(self): return self.text_embeddings.word_embeddings def set_input_embeddings(self, value): self.text_embeddings.word_embeddings = value def", "self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.value =", "i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states +", "masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output =", "from transformers.modeling_outputs import ( BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput, ) from transformers.models.bert.modeling_bert", "self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward(", "heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size *", "make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask,", "new_embeddings def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,", "logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SkimmingMaskForTokenClassification(SkimmingMaskPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def", "not None: # Apply the attention mask is (precomputed for", "= self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) spatial_pos_embedding_output =", "{}\".format(bbox_shape) device = input_ids.device if input_ids is not None else", "SkimAttention(config) self.encoder = SkimformerEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else", "config.skim_attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_layout_size, self.all_head_size)", "nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not", "self.attention_head_size = config.attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.value =", "if we want to if head_mask is not None: attention_probs", "= attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels =", "embeddings class Skimformer1DPositionEmbeddings(nn.Module): \"\"\"Construct sequential position embeddings.\"\"\" def __init__(self, config):", ":, 3]) // 2 x_center_position_embeddings = self.x_position_embeddings(x_center) y_center_position_embeddings = self.y_position_embeddings(y_center)", "\"\"\" last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None attentions:", "elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.to(dtype=next(self.parameters()).dtype)", "SkimformerModel(SkimformerPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.use_1d_positions", "dimension as text embeddings left_position_embeddings = self.x_position_projection(left_position_embeddings) upper_position_embeddings = self.y_position_projection(upper_position_embeddings)", "dataclass from typing import Optional, Tuple import math import torch", "= nn.Dropout(config.hidden_dropout_prob) self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward( self, input_ids=None, bbox=None,", "( left_position_embeddings + upper_position_embeddings + right_position_embeddings + lower_position_embeddings + h_position_embeddings", ") layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) return", "self.contextualize_2d_positions: pos_embedding_output = self.layout_encoder( hidden_states=pos_embedding_output, )[0] skim_attention_output = self.skim_attention( pos_embedding_output,", "eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_shape, device, position_ids=None): seq_length", "attention_probs, head_mask=None, ): if len(self.pruned_heads) > 0: num_attention_heads = attention_probs.shape[1]", "self.all_head_size = self.num_attention_heads * self.attention_head_size self.value = nn.Linear(config.hidden_size, self.all_head_size) def", "self.pooler is not None else None if not return_dict: return", "x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if self.core_model_type", "self.two_dim_pos_embeddings( bbox=bbox, ) if self.contextualize_2d_positions: pos_embedding_output = self.layout_encoder( hidden_states=pos_embedding_output, )[0]", "further processed by a Linear layer and a Tanh activation", "position_ids = position_ids.unsqueeze(0).expand(input_shape) position_embeddings = self.position_embeddings(position_ids) position_embeddings = self.LayerNorm(position_embeddings) position_embeddings", "\"\"\" Prunes heads of the model. heads_to_prune: dict of {layer_num:", "= namedtuple( \"SkimformerEncoderOutput\", [\"hidden_states\", \"all_hidden_states\"], ) class SkimformerTextEmbeddings(nn.Module): \"\"\"Construct the", "return_dict is not None else self.config.use_return_dict outputs = self.skimformer( input_ids,", "in BertModel forward() function) attention_scores = attention_scores + attention_mask #", "pretraining. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or", "is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)", "w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) #", "if len(self.pruned_heads) > 0: num_attention_heads = attention_probs.shape[1] indices = [idx", "attention_mask=extended_attention_mask, ) topk_idx = torch.topk(skim_attention_output, self.top_k, -1).indices skim_attention_mask = torch.zeros(skim_attention_output.shape,", "hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder, num_attention_heads=config.num_attention_heads_layout_encoder, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, max_position_embeddings=config.max_2d_position_embeddings, initializer_range=config.initializer_range, layer_norm_eps=config.layer_norm_eps,", "self.h_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.w_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.LayerNorm =", "ignored (masked), the loss is only computed for the tokens", "should be in ``[0, ..., config.num_labels - 1]``. \"\"\" return_dict", "( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states )", "else None if not return_dict: outputs = (sequence_output, pooled_output) if", "model. ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then", "= self.transpose_for_scores(self.value(hidden_states)) # Mask heads if we want to if", "config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = SkimformerAttention(config) self.intermediate = BertIntermediate(config)", "[r\"pooler\"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.skimming_mask_model =", "seq_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).expand(input_shape) position_embeddings = self.position_embeddings(position_ids)", "dict of {layer_num: list of heads to prune in this", "= nn.Embedding(config.type_vocab_size, config.hidden_size) self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings,", "the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores", "self.query = nn.Linear(config.hidden_layout_size, self.all_head_size) self.key = nn.Linear(config.hidden_layout_size, self.all_head_size) self.dropout =", "TokenClassifierOutput, ) from transformers.models.bert.modeling_bert import ( BertConfig, BertEmbeddings, BertIntermediate, BertOutput,", "= self.dropout(embeddings) return embeddings class BertWithSkimEmbedEmbeddings(nn.Module): \"\"\"Construct the embeddings from", "sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if", "self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(", "else: self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions if self.contextualize_2d_positions: self.layout_encoder", "value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads", "SkimformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer =", "class SkimformerForMaskedLM(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"] def", "nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.degrade_2d_positions = config.degrade_2d_positions if hasattr(config, \"degrade_2d_positions\") else False", "+ h_position_embeddings + w_position_embeddings ) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings =", "is actually dropping out entire tokens to attend to, which", "at the last layer of the model. ``num_predict`` corresponds to", "\"query\" and \"key\" to get the raw attention scores. attention_scores", "BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder", "\"\"\"Construct the text embeddings from word and token_type embeddings.\"\"\" def", "if self.contextualize_2d_positions: self.layout_encoder = BertEncoder( BertConfig( hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder, num_attention_heads=config.num_attention_heads_layout_encoder, intermediate_size=config.intermediate_size,", ") encoder_outputs = self.encoder( text_embedding_output, skim_attention_output, head_mask=head_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, )", "None class SkimformerModel(SkimformerPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config =", "dtype=torch.long, device=device) if bbox is None: bbox = torch.zeros(tuple(list(input_shape) +", "is only computed for the tokens with labels in ``[0,", "= nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens", "import namedtuple import logging from dataclasses import dataclass from typing", "w_position_embeddings ) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings", "key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is", "self.config = config self.use_1d_positions = config.use_1d_positions self.text_embeddings = SkimformerTextEmbeddings(config) if", "skim_attention_mask * attention_mask[:, None, :, :] skim_attention_mask = (1.0 -", "batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask,", "nn.Linear(config.hidden_layout_size, config.hidden_size) self.h_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.w_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size)", "+ bbox[:, :, 3]) // 2 x_center_position_embeddings = self.x_position_embeddings(x_center) y_center_position_embeddings", "= nn.Linear(config.hidden_layout_size, config.hidden_size) self.y_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.h_position_projection = nn.Linear(config.hidden_layout_size,", "set_input_embeddings(self, value): self.text_embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): \"\"\" Prunes", "Tokens with indices set to ``-100`` are ignored (masked), the", "): if len(self.pruned_heads) > 0: num_attention_heads = attention_probs.shape[1] indices =", "is not None else self.config.use_return_dict outputs = self.skimformer( input_ids, bbox,", "self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_layout_size) self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\") self.LayerNorm", "SkimformerAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward( self,", "feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return", "context_layer.view(*new_context_layer_shape) return context_layer class SkimformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() all_head_size", "def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings def forward( self, input_ids=None,", "else: layer_output = layer_module( hidden_states, attention_probs, layer_head_mask, ) hidden_states =", "attention_output ) return layer_output def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output)", "r\"\"\" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for", "= SkimformerAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward(", "keep active parts of the loss if attention_mask is not", "3) def forward( self, hidden_layout_states, attention_mask=None, ): key_layer = self.transpose_for_scores(self.key(hidden_layout_states))", "downloading and loading pretrained models. \"\"\" config_class = SkimmingMaskConfig base_model_prefix", "= self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self,", "skim_attention_mask = torch.zeros(skim_attention_output.shape, device=device) skim_attention_mask = skim_attention_mask.scatter(-1, topk_idx, 1) skim_attention_mask", "= self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_layout_size, self.all_head_size) self.key =", "if idx not in self.pruned_heads] attention_probs = torch.index_select(attention_probs, 1, indices)", "\"`bbox` has to be of shape `[batch_size, sequence_length, 4]`, but", "def forward(self, input_shape, device, position_ids=None): seq_length = input_shape[1] if position_ids", "2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer =", "self.w_position_projection(w_position_embeddings) two_dim_pos_embeddings = ( left_position_embeddings + upper_position_embeddings + right_position_embeddings +", "self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_shape,", "the embeddings and one for the output of each layer)", ":obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss.", "= torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape,", "device = input_ids.device if input_ids is not None else inputs_embeds.device", "if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,)", "1] + bbox[:, :, 3]) // 2 x_center_position_embeddings = self.x_position_embeddings(x_center)", "+ h_position_embeddings + w_position_embeddings ) embeddings = self.LayerNorm(embeddings) embeddings =", "Function from transformers.file_utils import ( ModelOutput, ) from transformers.modeling_utils import", "-1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) head_mask =", "super().__init__(config) self.config = config self.core_model_type = config.core_model_type self.embeddings = BertEmbeddings(config)", "else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value):", "inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings", "= torch.arange( 0, seq_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).expand(input_shape)", "not None: module.bias.data.zero_() @dataclass class SkimformerModelOutput(ModelOutput): \"\"\" Output type of", ") if self.use_1d_positions: pos_embedding_output = self.one_dim_pos_embeddings( input_shape=input_shape, device=device, position_ids=position_ids, )", "import CrossEntropyLoss, LayerNorm from torch.autograd.function import Function from transformers.file_utils import", "else LayoutLMEmbeddings(config) self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions if config.contextualize_2d_positions:", "passed or when ``config.output_attentions=True``): :obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, sequence_length,", "= attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: #", "self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) embeddings = (", "not None: input_shape = input_ids.size() elif inputs_embeds is not None:", "h_position_embeddings + w_position_embeddings ) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings)", "of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of", "is None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) #", "None pooler_output: torch.FloatTensor = None attentions: Optional[torch.FloatTensor] = None hidden_states:", "labels is not None: loss_fct = CrossEntropyLoss() # Only keep", "y_center = (bbox[:, :, 1] + bbox[:, :, 3]) //", "+ w_position_embeddings ) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = ( words_embeddings", "\"SkimformerEncoderOutput\", [\"hidden_states\", \"all_hidden_states\"], ) class SkimformerTextEmbeddings(nn.Module): \"\"\"Construct the text embeddings", "LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_shape, device, position_ids=None):", "embeddings from word and token_type embeddings.\"\"\" def __init__(self, config): super().__init__()", "Last layer hidden-state of the first token of the sequence", "create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward layer_output = torch.utils.checkpoint.checkpoint(", "is ``None``, then ``num_predict`` corresponds to ``sequence_length``. pooler_output (:obj:`torch.FloatTensor` of", "self.skim_attention = SkimAttention(config) self.top_k = config.top_k self.encoder = BertEncoder(config) self.pooler", "2]) // 2 y_center = (bbox[:, :, 1] + bbox[:,", "the text embeddings from word and token_type embeddings.\"\"\" def __init__(self,", "SkimmingMaskForTokenClassification(SkimmingMaskPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self, config): super().__init__(config) self.num_labels =", "head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1,", "of {layer_num: list of heads to prune in this layer}", "hidden_states, attention_probs, head_mask=None, output_hidden_states=False, return_dict=None, ): all_hidden_states = () if", "``num_predict`` corresponds to ``sequence_length``. pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`):", "if output_attentions else None, hidden_states=encoder_outputs.all_hidden_states, ) class BertWithSkimEmbedModel(BertWithSkimEmbedPreTrainedModel): def __init__(self,", "masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores,", "class BertWithSkimEmbedModel(BertWithSkimEmbedPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config", "self.num_labels = config.num_labels self.skimming_mask_model = SkimmingMaskModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob)", "return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SkimmingMaskModel(SkimmingMaskPreTrainedModel):", ":obj:`torch.FloatTensor` (one for the output of the embeddings and one", "head_mask # Softmax(QK^T/sqrt(d)) . V context_layer = torch.matmul(attention_probs, value_layer) context_layer", "is not None: module.bias.data.zero_() class BertWithSkimEmbedPreTrainedModel(PreTrainedModel): \"\"\" An abstract class", "not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states", "self.dropout(embeddings) return embeddings class BertWithSkimEmbedEmbeddings(nn.Module): \"\"\"Construct the embeddings from word,", "TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class BertWithSkimEmbedForTokenClassification(BertWithSkimEmbedPreTrainedModel): _keys_to_ignore_on_load_unexpected =", "x N x N # input head_mask has shape [num_heads]", "embeddings = ( words_embeddings + position_embeddings + two_dim_pos_embeddings + token_type_embeddings", "+ encoder_outputs[1:] return outputs return SkimformerModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, attentions=skim_attention_output if", "input_ids is not None else inputs_embeds.device if position_ids is None:", "device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings =", "hidden_states, all_hidden_states, ] if v is not None ) return", "specify either input_ids or inputs_embeds\") device = input_ids.device if input_ids", "position_ids = torch.arange( 0, seq_length, dtype=torch.long, device=device ) position_ids =", "super().__init__() self.config = config self.layer = nn.ModuleList([SkimformerLayer(config) for _ in", "config.degrade_2d_positions if hasattr(config, \"degrade_2d_positions\") else False self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps)", "in the self-attention heads. \"\"\" last_hidden_state: torch.FloatTensor = None pooler_output:", "the weights \"\"\" if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different", "self.text_embeddings = SkimformerTextEmbeddings(config) if self.use_1d_positions: self.one_dim_pos_embeddings = Skimformer1DPositionEmbeddings(config) else: self.two_dim_pos_embeddings", "..., config.num_labels - 1]``. \"\"\" return_dict = return_dict if return_dict", "``[0, ..., config.num_labels - 1]``. \"\"\" return_dict = return_dict if", "nn.Dropout(config.hidden_dropout_prob) self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward( self, input_ids=None, bbox=None, token_type_ids=None,", "config): super().__init__() self.max_position_embeddings = config.max_position_embeddings self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)", "enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask =", "= nn.Linear(config.hidden_layout_size, config.hidden_size) self.h_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.w_position_projection = nn.Linear(config.hidden_layout_size,", "__init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention", "= layer_module( hidden_states, attention_probs, layer_head_mask, ) hidden_states = layer_output if", "extended_attention_mask) * -10000.0 if head_mask is not None: if head_mask.dim()", "as e: raise IndexError(\"The :obj:`bbox` coordinate values should be within", "input_ids=None, bbox=None, token_type_ids=None, position_ids=None, inputs_embeds=None, ): if input_ids is not", "self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None,", "\"\"\" if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the", "layout embeddings from the bounding box coordinates.\"\"\" def __init__(self, config):", ":, 0]) embeddings = ( left_position_embeddings + upper_position_embeddings + right_position_embeddings", "seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if self.core_model_type ==", "is not None ) return SkimformerEncoderOutput( hidden_states=hidden_states, all_hidden_states=all_hidden_states, ) class", "import torch from torch import nn from torch.nn import CrossEntropyLoss,", "nn.Linear) and module.bias is not None: module.bias.data.zero_() class BertWithSkimEmbedPreTrainedModel(PreTrainedModel): \"\"\"", "from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :,", "return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler", "class SkimformerSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size", "downloading and loading pretrained models. \"\"\" config_class = BertWithSkimEmbedConfig base_model_prefix", "batch_size, seq_length = input_shape elif inputs_embeds is not None: input_shape", "= input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1]", "hidden_states=pos_embedding_output, )[0] skim_attention_output = self.skim_attention( pos_embedding_output, attention_mask=extended_attention_mask, ) encoder_outputs =", "bbox=bbox, ) if self.contextualize_2d_positions: pos_embedding_output = self.layout_encoder( hidden_states=pos_embedding_output, )[0] skim_attention_output", "\"\"\"Construct sequential position embeddings.\"\"\" def __init__(self, config): super().__init__() self.position_embeddings =", "3]) except IndexError as e: raise IndexError(\"The :obj:`bbox`coordinate values should", "input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask", "processed by a Linear layer and a Tanh activation function.", "ValueError(\"You cannot specify both input_ids and inputs_embeds at the same", "the attention probabilities only: Softmax(QK^T/sqrt(d)) return attention_probs class SkimformerSelfAttention(nn.Module): def", "self, hidden_states, attention_probs, head_mask=None, ): value_layer = self.transpose_for_scores(self.value(hidden_states)) # Mask", "attention_probs = torch.index_select(attention_probs, 1, indices) self_output = self.self( hidden_states, attention_probs,", "= context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)", "output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict", "[num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted", "= [r\"pooler\"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert_with_skim_embed", "# Slightly different from the TF version which uses truncated_normal", "self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads =", "embeddings from the bounding box coordinates.\"\"\" def __init__(self, config): super().__init__()", "__init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.core_model_type = config.core_model_type", "else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output", "padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout", "transformers.file_utils import ( ModelOutput, ) from transformers.modeling_utils import ( PreTrainedModel,", "= self.get_head_mask(head_mask, self.config.num_hidden_layers) if self.core_model_type == \"bert\": text_embedding_output = self.embeddings(", "or inputs_embeds\") device = input_ids.device if input_ids is not None", "_keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"] def __init__(self, config): super().__init__(config) self.skimformer =", "Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) #", "of :class:`~SkimformerModel`. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):", "= (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss", "inputs_embeds.size()[:-1] device = inputs_embeds.device device = input_ids.device if input_ids is", "add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def", "the same time\") elif input_ids is not None: input_shape =", "return self.text_embeddings.word_embeddings def set_input_embeddings(self, value): self.text_embeddings.word_embeddings = value def _prune_heads(self,", "layer} See base class PreTrainedModel \"\"\" for layer, heads in", "loss if attention_mask is not None: active_loss = attention_mask.view(-1) ==", "config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)", ") # Prune linear layers self.self.value = prune_linear_layer(self.self.value, index) self.output.dense", "the dot product between \"query\" and \"key\" to get the", "len(self.pruned_heads) > 0: num_attention_heads = attention_probs.shape[1] indices = [idx for", "IndexError as e: raise IndexError(\"The :obj:`bbox` coordinate values should be", "attention_scores = attention_scores + attention_mask # Normalize the attention scores", "and inputs_embeds is not None: raise ValueError(\"You cannot specify both", ".configuration_skim import ( SkimformerConfig, BertWithSkimEmbedConfig, SkimmingMaskConfig, ) logger = logging.getLogger(__name__)", "padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict:", "\"all_hidden_states\"], ) class SkimformerTextEmbeddings(nn.Module): \"\"\"Construct the text embeddings from word", "def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings", "output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not", "config.num_attention_heads self.attention_head_size = config.attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.value", "self.word_embeddings(input_ids) words_embeddings = inputs_embeds position_embeddings = self.position_embeddings(position_ids) try: left_position_embeddings =", "= [idx for idx in range(num_attention_heads) if idx not in", "1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1),", "for v in [ hidden_states, all_hidden_states, ] if v is", "is not None: active_loss = attention_mask.view(-1) == 1 active_logits =", "active_loss = (torch.sum(attention_mask, dim=-1)).view(-1) > 0 else: active_loss = attention_mask.view(-1)", "inputs_embeds\") device = input_ids.device if input_ids is not None else", "not None else None if not return_dict: outputs = (sequence_output,", "self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in", "provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] #", "self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct", "self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def get_input_embeddings(self): return self.bert_with_skim_embed.embeddings.word_embeddings def", "in range(num_attention_heads) if idx not in self.pruned_heads] attention_probs = torch.index_select(attention_probs,", "if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @dataclass", "computing the token classification loss. Indices should be in ``[0,", "hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SkimformerForMaskedLM(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] _keys_to_ignore_on_load_missing", "isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class SkimmingMaskPreTrainedModel(PreTrainedModel):", "= x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0,", "BertOutput(config) def forward( self, hidden_states, attention_probs, head_mask=None, ): attention_output =", "index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params", "torch.zeros(input_shape, dtype=torch.long, device=device) if bbox is None: bbox = torch.zeros(tuple(list(input_shape)", "w_position_embeddings = self.w_position_projection(w_position_embeddings) two_dim_pos_embeddings = ( left_position_embeddings + upper_position_embeddings +", "input_shape = inputs_embeds.size()[:-1] else: raise ValueError(\"You have to specify either", "import Optional, Tuple import math import torch from torch import", "head_mask.to(dtype=next(self.parameters()).dtype) else: head_mask = [None] * self.config.num_hidden_layers embedding_output = self.embeddings(", "from the TF version which uses truncated_normal for initialization #", "self.x_position_projection(right_position_embeddings) lower_position_embeddings = self.y_position_projection(lower_position_embeddings) h_position_embeddings = self.h_position_projection(h_position_embeddings) w_position_embeddings = self.w_position_projection(w_position_embeddings)", "1, 3) def forward( self, hidden_layout_states, attention_mask=None, ): key_layer =", "return self.bert_with_skim_embed.embeddings.word_embeddings def set_input_embeddings(self, value): self.bert_with_skim_embed.embeddings.word_embeddings = value def forward(", "if attention_mask.dim() == 3: active_loss = (torch.sum(attention_mask, dim=-1)).view(-1) > 0", "bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, extended_attention_mask,", "self.init_weights() def get_input_embeddings(self): return self.text_embeddings.word_embeddings def set_input_embeddings(self, value): self.text_embeddings.word_embeddings =", "__init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.skimformer = SkimformerModel(config, add_pooling_layer=False)", "indices set to ``-100`` are ignored (masked), the loss is", "token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r\"\"\"", "last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SkimmingMaskModel(SkimmingMaskPreTrainedModel): def __init__(self,", "elif input_ids is not None: input_shape = input_ids.size() batch_size, seq_length", ":] skim_attention_mask = (1.0 - skim_attention_mask) * -10000.0 encoder_outputs =", "token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) if self.use_1d_positions: pos_embedding_output = self.one_dim_pos_embeddings( input_shape=input_shape, device=device,", "= self.self( hidden_states, attention_probs, head_mask, ) attention_output = self.output(self_output, hidden_states)", "if return_dict is not None else self.config.use_return_dict outputs = self.skimformer(", "self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.degrade_2d_positions = config.degrade_2d_positions if hasattr(config, \"degrade_2d_positions\")", "outputs = self.skimformer( input_ids, bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds,", "= self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertWithSkimEmbedEmbeddings(nn.Module): \"\"\"Construct", "the embeddings from word, position and token_type embeddings.\"\"\" def __init__(self,", "``output_attentions=True`` is passed or when ``config.output_attentions=True``): :obj:`torch.FloatTensor` of shape :obj:`(batch_size,", "plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when", "None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape else: raise", "= torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds =", "= self.w_position_projection(w_position_embeddings) two_dim_pos_embeddings = ( left_position_embeddings + upper_position_embeddings + right_position_embeddings", ":obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling", "e: raise IndexError(\"The :obj:`bbox`coordinate values should be within 0-1000 range.\")", "self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError(\"The :obj:`bbox`", "= self.get_head_mask(head_mask, self.config.num_hidden_layers) text_embedding_output = self.text_embeddings( input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, )", "BertWithSkimEmbedModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights()", "1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss =", "two_dim_pos_embeddings = ( left_position_embeddings + upper_position_embeddings + right_position_embeddings + lower_position_embeddings", "super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = SkimformerAttention(config)", "from torch.autograd.function import Function from transformers.file_utils import ( ModelOutput, )", "self, hidden_layout_states, attention_mask=None, ): key_layer = self.transpose_for_scores(self.key(hidden_layout_states)) query_layer = self.transpose_for_scores(self.query(hidden_layout_states))", "the output of the embeddings and one for the output", "x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape)", "either input_ids or inputs_embeds\") device = input_ids.device if input_ids is", "be within 0-1000 range.\") from e embeddings = x_center_position_embeddings +", "= attention_probs * head_mask # Softmax(QK^T/sqrt(d)) . V context_layer =", "super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)", "batch_size, seq_length = input_shape else: raise ValueError(\"You have to specify", "when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): :obj:`torch.FloatTensor` of shape", "value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2]", "objective during pretraining. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is", "# Take the dot product between \"query\" and \"key\" to", "\"position_embedding_type\", \"absolute\") self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def", "class SkimAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size", "self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError(\"The :obj:`bbox`coordinate", "False): def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward layer_output", "- bbox[:, :, 0]) embeddings = ( left_position_embeddings + upper_position_embeddings", "of the model. ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is", "outputs = (sequence_output, pooled_output) if output_attentions: outputs = outputs +", "attentions=outputs.attentions, ) class SkimmingMaskForTokenClassification(SkimmingMaskPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self, config):", "self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_probs, head_mask=None,", "= torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask", "all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # Prepare", "self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long,", "): value_layer = self.transpose_for_scores(self.value(hidden_states)) # Mask heads if we want", "self.skim_attention( spatial_pos_embedding_output, attention_mask=extended_attention_mask, ) topk_idx = torch.topk(skim_attention_output, self.top_k, -1).indices skim_attention_mask", "loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see", "to be of shape `[batch_size, sequence_length]`, but got shape: {}\".format(input_shape)", "\"\"\"Construct the layout embeddings from the bounding box coordinates.\"\"\" def", "if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict:", "output of the embeddings and one for the output of", "2] - bbox[:, :, 0]) embeddings = ( left_position_embeddings +", "self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None): if input_ids", "= SkimAttention(config) self.encoder = SkimformerEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer", "computed for the tokens with labels in ``[0, ..., config.vocab_size]``", "self.pruned_heads] attention_probs = torch.index_select(attention_probs, 1, indices) self_output = self.self( hidden_states,", "# Prune linear layers self.self.value = prune_linear_layer(self.self.value, index) self.output.dense =", "needed # 1.0 in head_mask indicate we keep the head", "self.cls.predictions.decoder = new_embeddings def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None,", "SkimmingMaskModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights()", "( ModelOutput, ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices,", "__init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.skimming_mask_model = SkimmingMaskModel(config, add_pooling_layer=False)", "output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss", "BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead, ) from transformers.models.layoutlm.modeling_layoutlm", "+ outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not", "torch.autograd.function import Function from transformers.file_utils import ( ModelOutput, ) from", "# This is actually dropping out entire tokens to attend", ") sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None", "bbox[:, :, 0]) embeddings = ( left_position_embeddings + upper_position_embeddings +", "self.w_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout =", "(:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the", "inputs_embeds=inputs_embeds, ) if self.use_1d_positions: pos_embedding_output = self.one_dim_pos_embeddings( input_shape=input_shape, device=device, position_ids=position_ids,", "input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None,", "self.transpose_for_scores(self.query(hidden_layout_states)) # Take the dot product between \"query\" and \"key\"", "token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict", "Prepare head mask if needed # 1.0 in head_mask indicate", "module.bias.data.zero_() @dataclass class SkimformerModelOutput(ModelOutput): \"\"\" Output type of :class:`~SkimformerModel`. Args:", "prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not", "class SkimformerPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights initialization", "self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings def forward( self,", "x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_probs, head_mask=None,", "head_mask is not None else None if getattr(self.config, \"gradient_checkpointing\", False):", "None: loss_fct = CrossEntropyLoss() # -100 index = padding token", "nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\") def transpose_for_scores(self, x): new_x_shape", "are trained from the next sentence prediction (classification) objective during", "text_embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) spatial_pos_embedding_output", "output_attentions: outputs = outputs + (skim_attention_output, ) if output_hidden_states: outputs", "= CrossEntropyLoss() if attention_mask is not None: active_loss = attention_mask.view(-1)", "torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) # We can provide a", "(sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions,", "encoder_outputs = self.encoder( embedding_output, extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )", "1.0 in head_mask indicate we keep the head # attention_probs", "x num_heads] # and head_mask is converted to shape [num_hidden_layers", "not None: raise ValueError(\"You cannot specify both input_ids and inputs_embeds", "in ``[0, ..., config.num_labels - 1]``. \"\"\" return_dict = return_dict", "self.pooler = BertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self):", "return self.skimming_mask_model.embeddings.word_embeddings def set_input_embeddings(self, value): self.skimming_mask_model.embeddings.word_embeddings = value def forward(", "input_ids, bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict,", "attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores", "is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings =", "def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value", "torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # Prepare head mask if", "- len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads)", "if input_ids is not None: input_shape = input_ids.size() device =", "= attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels =", "self.core_model_type == \"bert\": text_embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds,", "not None else inputs_embeds.device if position_ids is None: position_ids =", "self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads", "max_position_embeddings=config.max_2d_position_embeddings, initializer_range=config.initializer_range, layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.gradient_checkpointing, ) ) self.skim_attention = SkimAttention(config) self.encoder", "= BertEncoder( BertConfig( hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder, num_attention_heads=config.num_attention_heads_layout_encoder, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob,", "= torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) # We can provide", "self.bert_with_skim_embed( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states,", "= [r\"pooler\"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.skimformer", "CrossEntropyLoss() # Only keep active parts of the loss if", "Labels for computing the token classification loss. Indices should be", "hasattr(config, \"degrade_2d_positions\") else False self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout =", "the model. heads_to_prune: dict of {layer_num: list of heads to", "in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with", "self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is", "not None: loss_fct = CrossEntropyLoss() # Only keep active parts", "__init__(self, config): super().__init__() self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings,", ") else: text_embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds,", "device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) words_embeddings =", "to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,", "nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.x_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size)", "def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.attention_head_size", "+ [4]), dtype=torch.long, device=device) # We can provide a self-attention", "= nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.degrade_2d_positions = config.degrade_2d_positions if hasattr(config, \"degrade_2d_positions\") else", "coordinate values should be within 0-1000 range.\") from e h_position_embeddings", "transformers.models.bert.modeling_bert import ( BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead,", "SkimformerForMaskedLM(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"] def __init__(self,", "self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def get_input_embeddings(self): return self.skimming_mask_model.embeddings.word_embeddings def", "_prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the model. heads_to_prune: dict", "BertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings", "extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 if head_mask is", "self-attention heads. \"\"\" last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor =", "self.encoder( text_embedding_output, skim_attention_output, head_mask=head_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0]", "None: if attention_mask.dim() == 3: active_loss = (torch.sum(attention_mask, dim=-1)).view(-1) >", "all layers in BertModel forward() function) attention_scores = attention_scores +", "= config.num_attention_heads * config.attention_head_size self.dense = nn.Linear(all_head_size, config.hidden_size) self.LayerNorm =", "N x N # input head_mask has shape [num_heads] or", "config): super().__init__() all_head_size = config.num_attention_heads * config.attention_head_size self.dense = nn.Linear(all_head_size,", "= SkimformerModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return", "not None: module.bias.data.zero_() class SkimmingMaskPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to", "len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads,", "tokens with labels in ``[0, ..., config.vocab_size]`` \"\"\" return_dict =", "= CrossEntropyLoss() # -100 index = padding token masked_lm_loss =", "Initialize the weights \"\"\" if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly", "transformers.models.layoutlm.modeling_layoutlm import LayoutLMEmbeddings from .configuration_skim import ( SkimformerConfig, BertWithSkimEmbedConfig, SkimmingMaskConfig,", "attention_probs.shape[1] indices = [idx for idx in range(num_attention_heads) if idx", "self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder =", "torch from torch import nn from torch.nn import CrossEntropyLoss, LayerNorm", "None else self.config.use_return_dict if input_ids is not None and inputs_embeds", "(:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):", "from e embeddings = x_center_position_embeddings + y_center_position_embeddings else: try: left_position_embeddings", "= return_dict if return_dict is not None else self.config.use_return_dict if", "input_ids.size() batch_size, seq_length = input_shape elif inputs_embeds is not None:", "not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1,", ") class SkimmingMaskForTokenClassification(SkimmingMaskPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self, config): super().__init__(config)", "self.contextualize_2d_positions = config.contextualize_2d_positions if self.contextualize_2d_positions: self.layout_encoder = BertEncoder( BertConfig( hidden_size=config.hidden_layout_size,", "\"`input_ids` has to be of shape `[batch_size, sequence_length]`, but got", "logging from dataclasses import dataclass from typing import Optional, Tuple", "head_mask is not None: attention_probs = attention_probs * head_mask #", "in range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_probs, head_mask=None, output_hidden_states=False, return_dict=None,", "\"degrade_2d_positions\") else False self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob)", "should be within 0-1000 range.\") from e h_position_embeddings = self.h_position_embeddings(bbox[:,", "self.num_attention_heads * self.attention_head_size self.value = nn.Linear(config.hidden_size, self.all_head_size) def transpose_for_scores(self, x):", "BertWithSkimEmbedEmbeddings(nn.Module): \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"", "tuple( v for v in [ hidden_states, all_hidden_states, ] if", "return_dict=None, ): all_hidden_states = () if output_hidden_states else None for", "of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at", "config.num_labels self.skimming_mask_model = SkimmingMaskModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier =", "if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i]", "position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) spatial_pos_embedding_output = self.two_dim_pos_embeddings(bbox=bbox) if self.contextualize_2d_positions: spatial_pos_embedding_output", "= nn.Linear(config.hidden_layout_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob)", "return hidden_states class SkimformerAttention(nn.Module): def __init__(self, config): super().__init__() self.self =", "cross_attentions=encoder_outputs.cross_attentions, ) class SkimformerForMaskedLM(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] _keys_to_ignore_on_load_missing = [r\"position_ids\",", "= config.attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.value = nn.Linear(config.hidden_size,", "hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class BertWithSkimEmbedForTokenClassification(BertWithSkimEmbedPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self,", "SkimmingMaskModel(SkimmingMaskPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.core_model_type", "token_type embeddings.\"\"\" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size,", "head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) text_embedding_output = self.text_embeddings( input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds,", "self.attention_head_size self.query = nn.Linear(config.hidden_layout_size, self.all_head_size) self.key = nn.Linear(config.hidden_layout_size, self.all_head_size) self.dropout", "self.attention = SkimformerAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def", "def forward( self, hidden_states, attention_probs, head_mask=None, ): attention_output = self.attention(", "not None else self.config.use_return_dict outputs = self.skimming_mask_model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask,", "= None class SkimformerModel(SkimformerPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config", "BertWithSkimEmbedModel(BertWithSkimEmbedPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings", "pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of", "BertPooler, BertEncoder, BertOnlyMLMHead, ) from transformers.models.layoutlm.modeling_layoutlm import LayoutLMEmbeddings from .configuration_skim", "from torch.nn import CrossEntropyLoss, LayerNorm from torch.autograd.function import Function from", "is not None: input_shape = input_ids.size() batch_size, seq_length = input_shape", "the sequence (classification token) further processed by a Linear layer", "config.hidden_size) self.y_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.h_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.w_position_projection", "words_embeddings = inputs_embeds position_embeddings = self.position_embeddings(position_ids) try: left_position_embeddings = self.x_position_embeddings(bbox[:,", "config.attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.value = nn.Linear(config.hidden_size, self.all_head_size)", "loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) +", "super().__init__(config) self.num_labels = config.num_labels self.skimformer = SkimformerModel(config, add_pooling_layer=False) self.dropout =", "def prune_heads(self, heads): if len(heads) == 0: return heads, index", "BertEncoder, BertOnlyMLMHead, ) from transformers.models.layoutlm.modeling_layoutlm import LayoutLMEmbeddings from .configuration_skim import", "simple interface for downloading and loading pretrained models. \"\"\" config_class", "if self.pooler is not None else None if not return_dict:", "bbox.size() assert ( len(bbox_shape) == 3 ), \"`bbox` has to", "config_class = SkimmingMaskConfig base_model_prefix = \"skimmingmask\" _keys_to_ignore_on_load_missing = [r\"position_ids\"] def", "class SkimformerTextEmbeddings(nn.Module): \"\"\"Construct the text embeddings from word and token_type", "else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device", "pooled_output = self.pooler(sequence_output) if self.pooler is not None else None", "position_ids.unsqueeze(0).expand(input_shape) position_embeddings = self.position_embeddings(position_ids) position_embeddings = self.LayerNorm(position_embeddings) position_embeddings = self.dropout(position_embeddings)", "= self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class SkimAttention(nn.Module): def", "None: loss_fct = CrossEntropyLoss() if attention_mask is not None: active_loss", "self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings", "not None: if attention_mask.dim() == 3: active_loss = (torch.sum(attention_mask, dim=-1)).view(-1)", "softmax, used to compute the weighted average in the self-attention", "padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.x_position_embeddings", "def forward( self, hidden_states, attention_probs, head_mask=None, ): value_layer = self.transpose_for_scores(self.value(hidden_states))", "set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings def forward( self, input_ids=None, bbox=None,", "class SkimformerForTokenClassification(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self, config): super().__init__(config) self.num_labels", "attention probabilities only: Softmax(QK^T/sqrt(d)) return attention_probs class SkimformerSelfAttention(nn.Module): def __init__(self,", "self, input_ids=None, bbox=None, token_type_ids=None, position_ids=None, inputs_embeds=None, ): if input_ids is", "output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SkimmingMaskForTokenClassification(SkimmingMaskPreTrainedModel):", "= CrossEntropyLoss() if attention_mask is not None: if attention_mask.dim() ==", "return embeddings class Skimformer1DPositionEmbeddings(nn.Module): \"\"\"Construct sequential position embeddings.\"\"\" def __init__(self,", "else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)),", "nn.Linear) and module.bias is not None: module.bias.data.zero_() class SkimmingMaskPreTrainedModel(PreTrainedModel): \"\"\"", "Only keep active parts of the loss if attention_mask is", ":, :] skim_attention_mask = (1.0 - skim_attention_mask) * -10000.0 encoder_outputs", "bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) spatial_pos_embedding_output = self.two_dim_pos_embeddings(bbox=bbox) if self.contextualize_2d_positions:", "config_class = BertWithSkimEmbedConfig base_model_prefix = \"bertwithskimembed\" _keys_to_ignore_on_load_missing = [r\"position_ids\"] def", "config.hidden_layout_size) self.x_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.y_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.h_position_projection", "all_hidden_states=all_hidden_states, ) class SkimformerPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle", "hidden_act=config.hidden_act, hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, max_position_embeddings=config.max_2d_position_embeddings, initializer_range=config.initializer_range, layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.gradient_checkpointing, ) ) self.skim_attention", "if output_attentions: outputs = outputs + (skim_attention_output, ) if output_hidden_states:", "x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if self.core_model_type == \"bert\":", ". V context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2,", "super().__init__() all_head_size = config.num_attention_heads * config.attention_head_size self.dense = nn.Linear(all_head_size, config.hidden_size)", "SkimformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() all_head_size = config.num_attention_heads * config.attention_head_size", "\"\"\" Output type of :class:`~SkimformerModel`. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape", "encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else", "self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertWithSkimEmbedEmbeddings(nn.Module): \"\"\"Construct the", "inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length =", "attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply", "= config.num_labels self.skimming_mask_model = SkimmingMaskModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier", "= inputs_embeds.size()[:-1] device = inputs_embeds.device device = input_ids.device if input_ids", "for downloading and loading pretrained models. \"\"\" config_class = SkimformerConfig", "attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss]", "SkimformerConfig, BertWithSkimEmbedConfig, SkimmingMaskConfig, ) logger = logging.getLogger(__name__) SkimformerEncoderOutput = namedtuple(", "nn.Linear(config.hidden_size, self.all_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads,", "return embeddings class SkimAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads =", "config self.use_1d_positions = config.use_1d_positions self.text_embeddings = SkimformerTextEmbeddings(config) if self.use_1d_positions: self.one_dim_pos_embeddings", "seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) text_embedding_output = self.text_embeddings(", "``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then ``num_predict``", "config.use_1d_positions self.text_embeddings = SkimformerTextEmbeddings(config) if self.use_1d_positions: self.one_dim_pos_embeddings = Skimformer1DPositionEmbeddings(config) else:", "is not None: if attention_mask.dim() == 3: active_loss = (torch.sum(attention_mask,", ") self.skim_attention = SkimAttention(config) self.top_k = config.top_k self.encoder = BertEncoder(config)", "during pretraining. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed", "input_ids.device if input_ids is not None else inputs_embeds.device if position_ids", "[idx for idx in range(num_attention_heads) if idx not in self.pruned_heads]", "attention_probs, head_mask=None, ): attention_output = self.attention( hidden_states, attention_probs, head_mask, )", "num_attention_heads = attention_probs.shape[1] indices = [idx for idx in range(num_attention_heads)", "is not None else self.config.use_return_dict outputs = self.skimming_mask_model( input_ids=input_ids, bbox=bbox,", "loading pretrained models. \"\"\" config_class = SkimmingMaskConfig base_model_prefix = \"skimmingmask\"", "head_mask=None, ): value_layer = self.transpose_for_scores(self.value(hidden_states)) # Mask heads if we", "skim_attention_output, head_mask=head_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output =", "module.bias is not None: module.bias.data.zero_() @dataclass class SkimformerModelOutput(ModelOutput): \"\"\" Output", "self.bert_with_skim_embed = BertWithSkimEmbedModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size,", "inputs_embeds at the same time\") elif input_ids is not None:", "+ lower_position_embeddings + h_position_embeddings + w_position_embeddings ) token_type_embeddings = self.token_type_embeddings(token_type_ids)", "spatial_pos_embedding_output = self.two_dim_pos_embeddings(bbox=bbox) if self.contextualize_2d_positions: spatial_pos_embedding_output = self.layout_encoder(hidden_states=spatial_pos_embedding_output)[0] skim_attention_output =", "return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output)", "self.attention_head_size = config.skim_attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.query =", "downloading and loading pretrained models. \"\"\" config_class = SkimformerConfig base_model_prefix", "_keys_to_ignore_on_load_missing = [r\"position_ids\"] def _init_weights(self, module): \"\"\" Initialize the weights", "std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and", "heads_to_prune): \"\"\" Prunes heads of the model. heads_to_prune: dict of", "if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if", "= self.text_embeddings( input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) if self.use_1d_positions: pos_embedding_output =", "hidden_states, attention_probs, head_mask, ) layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim,", "between \"query\" and \"key\" to get the raw attention scores.", "is not None else None if not return_dict: return (sequence_output,", "the attention mask is (precomputed for all layers in BertModel", "`optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): :obj:`torch.FloatTensor`", "sequence_length)`. Attentions weights after the attention softmax, used to compute", "params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads)", "BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput, ) from transformers.models.bert.modeling_bert import ( BertConfig, BertEmbeddings,", "from the bounding box coordinates.\"\"\" def __init__(self, config): super().__init__() self.x_position_embeddings", "\"bert\" else LayoutLMEmbeddings(config) self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions if", "entire tokens to attend to, which might # seem a", "layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.gradient_checkpointing, ) ) self.skim_attention = SkimAttention(config) self.top_k = config.top_k", "= input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device", "self.get_head_mask(head_mask, self.config.num_hidden_layers) text_embedding_output = self.text_embeddings( input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) if", "- extended_attention_mask) * -10000.0 if head_mask is not None: if", "-1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) head_mask", "and token_type embeddings.\"\"\" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size,", "the model at the output of each layer plus the", "device=device) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0", "else: text_embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, )", "[num_hidden_layers x num_heads] # and head_mask is converted to shape", "dot product between \"query\" and \"key\" to get the raw", "None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids", "= self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels", "= SkimmingMaskConfig base_model_prefix = \"skimmingmask\" _keys_to_ignore_on_load_missing = [r\"position_ids\"] def _init_weights(self,", "self.config.use_return_dict outputs = self.skimming_mask_model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask,", "ValueError(\"You have to specify either input_ids or inputs_embeds\") assert (", "nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.x_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.y_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size)", "if self.use_1d_positions: self.one_dim_pos_embeddings = Skimformer1DPositionEmbeddings(config) else: self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions", "lower_position_embeddings + h_position_embeddings + w_position_embeddings ) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings", "self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class SkimformerEncoder(nn.Module): def", "= None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class SkimformerModel(SkimformerPreTrainedModel): def __init__(self,", "= inputs_embeds.size()[:-1] else: raise ValueError(\"You have to specify either input_ids", "base_model_prefix = \"skimmingmask\" _keys_to_ignore_on_load_missing = [r\"position_ids\"] def _init_weights(self, module): \"\"\"", "BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states, attention_probs, head_mask=None,", "self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, bbox=None): if self.degrade_2d_positions: try: x_center", "= getattr(config, \"position_embedding_type\", \"absolute\") def transpose_for_scores(self, x): new_x_shape = x.size()[:-1]", "= bbox.size() assert ( len(bbox_shape) == 3 ), \"`bbox` has", "= self.y_position_projection(lower_position_embeddings) h_position_embeddings = self.h_position_projection(h_position_embeddings) w_position_embeddings = self.w_position_projection(w_position_embeddings) two_dim_pos_embeddings =", "nn.ModuleList([SkimformerLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_probs,", "None: loss_fct = CrossEntropyLoss() # Only keep active parts of", ") hidden_states = layer_output if output_hidden_states: all_hidden_states = all_hidden_states +", "nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.degrade_2d_positions = config.degrade_2d_positions if", "is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if", "shape bsz x n_heads x N x N # input", "self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = SkimformerAttention(config) self.intermediate", "else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class", "shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]", "attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits", "= self.x_position_projection(right_position_embeddings) lower_position_embeddings = self.y_position_projection(lower_position_embeddings) h_position_embeddings = self.h_position_projection(h_position_embeddings) w_position_embeddings =", "nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None):", "self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) # project into", "shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the", ":, 2]) // 2 y_center = (bbox[:, :, 1] +", "config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings", "= nn.Linear(config.hidden_layout_size, config.hidden_size) self.w_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size,", "seem a bit unusual, but is taken from the original", "self.skimming_mask_model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states,", "word, position and token_type embeddings.\"\"\" def __init__(self, config): super().__init__() self.word_embeddings", "config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self,", "is not None else inputs_embeds.device if attention_mask is None: attention_mask", "is not None: input_shape = input_ids.size() elif inputs_embeds is not", "== \"bert\" else LayoutLMEmbeddings(config) self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions", "shape: {}\".format(bbox_shape) device = input_ids.device if input_ids is not None", "skim_attention_mask) * -10000.0 encoder_outputs = self.encoder( text_embedding_output, skim_attention_mask, head_mask=head_mask, output_attentions=output_attentions,", "not None else self.config.use_return_dict if input_ids is not None and", "nn.Linear) and module.bias is not None: module.bias.data.zero_() @dataclass class SkimformerModelOutput(ModelOutput):", "logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss", "self.one_dim_pos_embeddings( input_shape=input_shape, device=device, position_ids=position_ids, ) else: pos_embedding_output = self.two_dim_pos_embeddings( bbox=bbox,", "= self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) embeddings =", "None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is", "outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or", "Tuple import math import torch from torch import nn from", "self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None", "inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores =", "forward( self, input_ids=None, bbox=None, token_type_ids=None, position_ids=None, inputs_embeds=None, ): if input_ids", ") return_dict = return_dict if return_dict is not None else", "not None ) return SkimformerEncoderOutput( hidden_states=hidden_states, all_hidden_states=all_hidden_states, ) class SkimformerPreTrainedModel(PreTrainedModel):", "config): super().__init__(config) self.skimformer = SkimformerModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.init_weights()", "\"\"\" An abstract class to handle weights initialization and a", "if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def", "= nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\") def transpose_for_scores(self, x):", "if self.contextualize_2d_positions: pos_embedding_output = self.layout_encoder( hidden_states=pos_embedding_output, )[0] skim_attention_output = self.skim_attention(", "CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1,", "nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None,", "is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states,", "if head_mask is not None: attention_probs = attention_probs * head_mask", "self.pooler(sequence_output) if self.pooler is not None else None if not", "input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output,", "not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) +", "if hasattr(config, \"degrade_2d_positions\") else False self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout", "passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the", "self.bert_with_skim_embed.embeddings.word_embeddings def set_input_embeddings(self, value): self.bert_with_skim_embed.embeddings.word_embeddings = value def forward( self,", "def _init_weights(self, module): \"\"\" Initialize the weights \"\"\" if isinstance(module,", "[r\"position_ids\"] def _init_weights(self, module): \"\"\" Initialize the weights \"\"\" if", "= (torch.sum(attention_mask, dim=-1)).view(-1) > 0 else: active_loss = attention_mask.view(-1) ==", "self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\") def", "None else self.config.use_return_dict outputs = self.bert_with_skim_embed( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids,", "None else None if getattr(self.config, \"gradient_checkpointing\", False): def create_custom_forward(module): def", "SkimformerPreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights initialization and", "embeddings from word, position and token_type embeddings.\"\"\" def __init__(self, config):", "``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored", "class Skimformer1DPositionEmbeddings(nn.Module): \"\"\"Construct sequential position embeddings.\"\"\" def __init__(self, config): super().__init__()", "ValueError(\"You have to specify either input_ids or inputs_embeds\") device =", "= [r\"pooler\"] _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"] def __init__(self, config): super().__init__(config)", "or when ``config.output_attentions=True``): :obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.", "output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is", "config.hidden_layout_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.w_position_embeddings", "not return_dict: return tuple( v for v in [ hidden_states,", "x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(", "and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size", "not None and inputs_embeds is not None: raise ValueError(\"You cannot", "and loading pretrained models. \"\"\" config_class = BertWithSkimEmbedConfig base_model_prefix =", "None and inputs_embeds is not None: raise ValueError(\"You cannot specify", "torch.topk(skim_attention_output, self.top_k, -1).indices skim_attention_mask = torch.zeros(skim_attention_output.shape, device=device) skim_attention_mask = skim_attention_mask.scatter(-1,", "3: active_loss = (torch.sum(attention_mask, dim=-1)).view(-1) > 0 else: active_loss =", "of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the", "= attention_scores + attention_mask # Normalize the attention scores to", "token_type_ids=None, position_ids=None, inputs_embeds=None, ): if input_ids is not None: input_shape", "return layer_output class SkimformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config =", "None if not return_dict: outputs = (sequence_output, pooled_output) if output_attentions:", "torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape =", "active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)", "self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:]", "is None: position_ids = torch.arange( 0, seq_length, dtype=torch.long, device=device )", "== \"bert\": text_embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, )", "the layout embeddings from the bounding box coordinates.\"\"\" def __init__(self,", "This is actually dropping out entire tokens to attend to,", "one for the output of each layer) of shape :obj:`(batch_size,", "attention_mask=extended_attention_mask, ) encoder_outputs = self.encoder( text_embedding_output, skim_attention_output, head_mask=head_mask, output_hidden_states=output_hidden_states, return_dict=return_dict,", "loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class BertWithSkimEmbedForTokenClassification(BertWithSkimEmbedPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"]", "h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings", "return ((masked_lm_loss,) + output) if masked_lm_loss is not None else", "as e: raise IndexError(\"The :obj:`bbox`coordinate values should be within 0-1000", "might # seem a bit unusual, but is taken from", "nn from torch.nn import CrossEntropyLoss, LayerNorm from torch.autograd.function import Function", "weighted average in the self-attention heads. \"\"\" last_hidden_state: torch.FloatTensor =", "add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self,", "got shape: {}\".format(input_shape) if bbox is not None: bbox_shape =", "head_mask is converted to shape [num_hidden_layers x batch x num_heads", "= self.output(self_output, hidden_states) return attention_output class SkimformerLayer(nn.Module): def __init__(self, config):", "spatial_pos_embedding_output = self.layout_encoder(hidden_states=spatial_pos_embedding_output)[0] skim_attention_output = self.skim_attention( spatial_pos_embedding_output, attention_mask=extended_attention_mask, ) topk_idx", "new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer", "def set_input_embeddings(self, value): self.bert_with_skim_embed.embeddings.word_embeddings = value def forward( self, input_ids=None,", "the next sentence prediction (classification) objective during pretraining. hidden_states (:obj:`tuple(torch.FloatTensor)`,", ") logger = logging.getLogger(__name__) SkimformerEncoderOutput = namedtuple( \"SkimformerEncoderOutput\", [\"hidden_states\", \"all_hidden_states\"],", "embeddings = self.dropout(embeddings) return embeddings class SkimAttention(nn.Module): def __init__(self, config):", "input_ids.device else: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device device =", "Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings", "assert ( len(bbox_shape) == 3 ), \"`bbox` has to be", "the loss if attention_mask is not None: active_loss = attention_mask.view(-1)", "``None``, then ``num_predict`` corresponds to ``sequence_length``. pooler_output (:obj:`torch.FloatTensor` of shape", "self.config.num_hidden_layers embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, )", "loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))", "outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is", "loss = None if labels is not None: loss_fct =", "attention_mask=None, ): key_layer = self.transpose_for_scores(self.key(hidden_layout_states)) query_layer = self.transpose_for_scores(self.query(hidden_layout_states)) # Take", "x batch x num_heads x seq_length x seq_length] head_mask =", "list of heads to prune in this layer} See base", "+ (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1,", "self.get_extended_attention_mask(attention_mask, input_shape, device) # Prepare head mask if needed #", "embeddings left_position_embeddings = self.x_position_projection(left_position_embeddings) upper_position_embeddings = self.y_position_projection(upper_position_embeddings) right_position_embeddings = self.x_position_projection(right_position_embeddings)", "text_embedding_output = self.text_embeddings( input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) if self.use_1d_positions: pos_embedding_output", "inputs_embeds.device device = input_ids.device if input_ids is not None else", "= torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: token_type_ids =", "not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length", "embeddings = self.dropout(embeddings) return embeddings class BertWithSkimEmbedEmbeddings(nn.Module): \"\"\"Construct the embeddings", "return embeddings class BertWithSkimEmbedEmbeddings(nn.Module): \"\"\"Construct the embeddings from word, position", "2 ), \"`input_ids` has to be of shape `[batch_size, sequence_length]`,", "bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ):", "initializer_range=config.initializer_range, layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.gradient_checkpointing, ) ) self.skim_attention = SkimAttention(config) self.top_k =", "= self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs =", "= torch.topk(skim_attention_output, self.top_k, -1).indices skim_attention_mask = torch.zeros(skim_attention_output.shape, device=device) skim_attention_mask =", "embeddings and one for the output of each layer) of", "inputs_embeds=inputs_embeds, ) spatial_pos_embedding_output = self.two_dim_pos_embeddings(bbox=bbox) if self.contextualize_2d_positions: spatial_pos_embedding_output = self.layout_encoder(hidden_states=spatial_pos_embedding_output)[0]", "attention softmax, used to compute the weighted average in the", "x n_heads x N x N # input head_mask has", "of hidden-states at the last layer of the model. ``num_predict``", "None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states =", "text_embedding_output, skim_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0]", "bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:,", "== 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss", "e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])", "= all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is", "attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) *", "forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None,", "query_layer = self.transpose_for_scores(self.query(hidden_layout_states)) # Take the dot product between \"query\"", "self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward( self, input_ids=None, bbox=None,", "input_tensor) return hidden_states class SkimformerAttention(nn.Module): def __init__(self, config): super().__init__() self.self", "config.contextualize_2d_positions: self.layout_encoder = BertEncoder( BertConfig( hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder, num_attention_heads=config.num_attention_heads_layout_encoder, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act,", "nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward( self, input_ids=None,", "+ y_center_position_embeddings else: try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings", "output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states", "attention_mask # Normalize the attention scores to probabilities. attention_probs =", "config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.attention_head_size self.all_head_size =", "prediction (classification) objective during pretraining. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when", "head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output =", "self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None", "skim_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output", "module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None:", "prune_linear_layer, ) from transformers.modeling_outputs import ( BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput, )", "forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None): if input_ids is not None: input_shape", "= nn.Dropout(config.hidden_dropout_prob) def forward(self, bbox=None): if self.degrade_2d_positions: try: x_center =", "n_heads x N x N # input head_mask has shape", "= inputs_embeds + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings)", "w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) embeddings", "labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return", "= self.encoder( text_embedding_output, skim_attention_output, head_mask=head_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output =", "0-1000 range.\") from e embeddings = x_center_position_embeddings + y_center_position_embeddings else:", ":, 2] - bbox[:, :, 0]) embeddings = ( left_position_embeddings", "head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output", "set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): \"\"\" Prunes", "SkimformerConfig base_model_prefix = \"skimformer\" def _init_weights(self, module): \"\"\" Initialize the", "sequence_length]`, but got shape: {}\".format(input_shape) if bbox is not None:", "bit unusual, but is taken from the original Transformer paper.", "is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1]", "device=device) if bbox is None: bbox = torch.zeros(tuple(list(input_shape) + [4]),", "we just need to make it broadcastable to all heads.", "config.contextualize_2d_positions if config.contextualize_2d_positions: self.layout_encoder = BertEncoder( BertConfig( hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder, num_attention_heads=config.num_attention_heads_layout_encoder,", "BertWithSkimEmbedForTokenClassification(BertWithSkimEmbedPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self, config): super().__init__(config) self.num_labels =", "nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size,", "dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) words_embeddings", "self.dropout(embeddings) return embeddings class Skimformer1DPositionEmbeddings(nn.Module): \"\"\"Construct sequential position embeddings.\"\"\" def", "self.dense = nn.Linear(all_head_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout =", "input_ids is not None else inputs_embeds.device if attention_mask is None:", "device=device, position_ids=position_ids, ) else: pos_embedding_output = self.two_dim_pos_embeddings( bbox=bbox, ) if", "else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device)", "parts of the loss if attention_mask is not None: active_loss", "bbox is not None: bbox_shape = bbox.size() assert ( len(bbox_shape)", "): all_hidden_states = () if output_hidden_states else None for i,", "= BertEmbeddings(config) if self.core_model_type == \"bert\" else LayoutLMEmbeddings(config) self.two_dim_pos_embeddings =", "nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size)", "self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings =", "prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper", "active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss =", "get_input_embeddings(self): return self.bert_with_skim_embed.embeddings.word_embeddings def set_input_embeddings(self, value): self.bert_with_skim_embed.embeddings.word_embeddings = value def", "time\") elif input_ids is not None: input_shape = input_ids.size() batch_size,", "of the model. heads_to_prune: dict of {layer_num: list of heads", "sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if", "= nn.Linear(config.hidden_size, self.all_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] +", "labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict", "position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0]", "Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids``", "sequence_length, 4]`, but got shape: {}\".format(bbox_shape) device = input_ids.device if", "class SkimformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer", "config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.skim_attention_head_size self.all_head_size =", "2, 1, 3) def forward( self, hidden_layout_states, attention_mask=None, ): key_layer", "not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict", "nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def get_input_embeddings(self): return self.bert_with_skim_embed.embeddings.word_embeddings", "unusual, but is taken from the original Transformer paper. attention_probs", "truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module,", "= x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self,", "super().__init__() self.self = SkimformerSelfAttention(config) self.output = SkimformerSelfOutput(config) self.pruned_heads = set()", "when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of", "got shape: {}\".format(bbox_shape) device = input_ids.device if input_ids is not", "the loss is only computed for the tokens with labels", "= self.dropout(embeddings) return embeddings class Skimformer1DPositionEmbeddings(nn.Module): \"\"\"Construct sequential position embeddings.\"\"\"", "BertEmbeddings(config) if self.core_model_type == \"bert\" else LayoutLMEmbeddings(config) self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config)", ") sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output)", "Transformer paper. attention_probs = self.dropout(attention_probs) # return the attention probabilities", "= self.y_position_projection(upper_position_embeddings) right_position_embeddings = self.x_position_projection(right_position_embeddings) lower_position_embeddings = self.y_position_projection(lower_position_embeddings) h_position_embeddings =", "the last layer of the model. ``num_predict`` corresponds to ``target_mapping.shape[1]``.", "if position_ids is None: position_ids = torch.arange( 0, seq_length, dtype=torch.long,", "hidden-states at the last layer of the model. ``num_predict`` corresponds", "output) if masked_lm_loss is not None else output return MaskedLMOutput(", "2] - bbox[:, :, 0]) # project into same dimension", "3 ), \"`bbox` has to be of shape `[batch_size, sequence_length,", "for computing the masked language modeling loss. Indices should be", "isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class BertWithSkimEmbedPreTrainedModel(PreTrainedModel):", "head_mask indicate we keep the head # attention_probs has shape", "if head_mask is not None else None if getattr(self.config, \"gradient_checkpointing\",", "= [r\"position_ids\", r\"predictions.decoder.bias\"] def __init__(self, config): super().__init__(config) self.skimformer = SkimformerModel(config,", "not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape else:", "handle weights initialization and a simple interface for downloading and", "+ output) if loss is not None else output return", "attention_output = self.attention( hidden_states, attention_probs, head_mask, ) layer_output = apply_chunking_to_forward(", "loss_fct = CrossEntropyLoss() if attention_mask is not None: if attention_mask.dim()", "a bit unusual, but is taken from the original Transformer", "self.two_dim_pos_embeddings(bbox=bbox) if self.contextualize_2d_positions: spatial_pos_embedding_output = self.layout_encoder(hidden_states=spatial_pos_embedding_output)[0] skim_attention_output = self.skim_attention( spatial_pos_embedding_output,", "output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output)", "attention_probs = attention_probs * head_mask # Softmax(QK^T/sqrt(d)) . V context_layer", ":, 2] - bbox[:, :, 0]) # project into same", "if output_hidden_states: outputs = outputs + encoder_outputs[1:] return outputs return", "# Only keep active parts of the loss if attention_mask", "intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, max_position_embeddings=config.max_2d_position_embeddings, initializer_range=config.initializer_range, layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.gradient_checkpointing, ) )", "has to be of shape `[batch_size, sequence_length]`, but got shape:", "= torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels)", "position_ids=None, inputs_embeds=None, ): if input_ids is not None: input_shape =", "(self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3)", "torch.index_select(attention_probs, 1, indices) self_output = self.self( hidden_states, attention_probs, head_mask, )", "return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads )", "3] - bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2]", "token) further processed by a Linear layer and a Tanh", "-2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not", "and token_type embeddings.\"\"\" def __init__(self, config): super().__init__() self.max_position_embeddings = config.max_position_embeddings", "- skim_attention_mask) * -10000.0 encoder_outputs = self.encoder( text_embedding_output, skim_attention_mask, head_mask=head_mask,", "interface for downloading and loading pretrained models. \"\"\" config_class =", "output_hidden_states = ( output_hidden_states if output_hidden_states is not None else", "the masked language modeling loss. Indices should be in ``[-100,", "False self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self,", "* self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_probs,", "cannot specify both input_ids and inputs_embeds at the same time\")", "torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_probs, layer_head_mask, ) else: layer_output = layer_module(", "last layer of the model. ``num_predict`` corresponds to ``target_mapping.shape[1]``. If", "self.layer = nn.ModuleList([SkimformerLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self,", "hidden_states, attention_probs, head_mask, ) attention_output = self.output(self_output, hidden_states) return attention_output", "of :obj:`torch.FloatTensor` (one for the output of the embeddings and", "2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.to(dtype=next(self.parameters()).dtype) else: head_mask =", "config, add_pooling_layer=True): super().__init__(config) self.config = config self.core_model_type = config.core_model_type self.embeddings", "self.encoder = SkimformerEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None", "None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask =", "``-100`` are ignored (masked), the loss is only computed for", "self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings embeddings", "num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used", "\"\"\" config_class = BertWithSkimEmbedConfig base_model_prefix = \"bertwithskimembed\" _keys_to_ignore_on_load_missing = [r\"position_ids\"]", "inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds +", "to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping", "return custom_forward layer_output = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_probs, layer_head_mask, )", "custom_forward(*inputs): return module(*inputs) return custom_forward layer_output = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states,", "self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return", "// 2 y_center = (bbox[:, :, 1] + bbox[:, :,", "None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) # We", "embeddings = x_center_position_embeddings + y_center_position_embeddings else: try: left_position_embeddings = self.x_position_embeddings(bbox[:,", "None, hidden_states=encoder_outputs.all_hidden_states, ) class BertWithSkimEmbedModel(BertWithSkimEmbedPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config)", "loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SkimformerForTokenClassification(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r\"pooler\"]", "bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)", "# We can provide a self-attention mask of dimensions [batch_size,", "+ (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None", "self.encoder( text_embedding_output, skim_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output =", "return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class BertWithSkimEmbedForTokenClassification(BertWithSkimEmbedPreTrainedModel): _keys_to_ignore_on_load_unexpected", "self.contextualize_2d_positions: self.layout_encoder = BertEncoder( BertConfig( hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder, num_attention_heads=config.num_attention_heads_layout_encoder, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act,", "raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores =", "classification loss. Indices should be in ``[0, ..., config.num_labels -", "last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SkimformerForMaskedLM(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected =", "= SkimformerTextEmbeddings(config) if self.use_1d_positions: self.one_dim_pos_embeddings = Skimformer1DPositionEmbeddings(config) else: self.two_dim_pos_embeddings =", "self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def get_input_embeddings(self):", "input_shape elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size,", "for the tokens with labels in ``[0, ..., config.vocab_size]`` \"\"\"", "= (1.0 - skim_attention_mask) * -10000.0 encoder_outputs = self.encoder( text_embedding_output,", "super().__init__(config) self.config = config self.use_1d_positions = config.use_1d_positions self.text_embeddings = SkimformerTextEmbeddings(config)", "= self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor)", "computing the masked language modeling loss. Indices should be in", "only computed for the tokens with labels in ``[0, ...,", "is not None and inputs_embeds is not None: raise ValueError(\"You", "not None: attention_probs = attention_probs * head_mask # Softmax(QK^T/sqrt(d)) .", "head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)", "attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire", "extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output", "layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,)", "SkimformerAttention(nn.Module): def __init__(self, config): super().__init__() self.self = SkimformerSelfAttention(config) self.output =", "= SkimformerEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None self.init_weights()", "hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states +", ") class BertWithSkimEmbedModel(BertWithSkimEmbedPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config =", "def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.skimformer = SkimformerModel(config,", "``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one", "original Transformer paper. attention_probs = self.dropout(attention_probs) # return the attention", "= nn.Dropout(config.hidden_dropout_prob) def forward(self, input_shape, device, position_ids=None): seq_length = input_shape[1]", "set() def prune_heads(self, heads): if len(heads) == 0: return heads,", "if output_hidden_states is not None else self.config.output_hidden_states ) return_dict =", "token_type_embeddings ) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings", "h_position_embeddings + w_position_embeddings ) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = (", "specify both input_ids and inputs_embeds at the same time\") elif", "transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x =", "num_heads] # and head_mask is converted to shape [num_hidden_layers x", "nn.Embedding(config.type_vocab_size, config.hidden_size) self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size)", "self.core_model_type == \"bert\" else LayoutLMEmbeddings(config) self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions =", "dtype=torch.long, device=device) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask =", "all_head_size = config.num_attention_heads * config.attention_head_size self.dense = nn.Linear(all_head_size, config.hidden_size) self.LayerNorm", "from the original Transformer paper. attention_probs = self.dropout(attention_probs) # return", "to specify either input_ids or inputs_embeds\") device = input_ids.device if", "[4]), dtype=torch.long, device=device) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask", "hidden_states = layer_output if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,)", "= set() def prune_heads(self, heads): if len(heads) == 0: return", "output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is", "if loss is not None else output return TokenClassifierOutput( loss=loss,", "attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is", "self.y_position_embeddings(y_center) except IndexError as e: raise IndexError(\"The :obj:`bbox` coordinate values", "self.core_model_type = config.core_model_type self.embeddings = BertEmbeddings(config) if self.core_model_type == \"bert\"", "= None pooler_output: torch.FloatTensor = None attentions: Optional[torch.FloatTensor] = None", "mask if needed # 1.0 in head_mask indicate we keep", "= nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings =", "sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not", "super().__init__(config) self.config = config self.embeddings = BertWithSkimEmbedEmbeddings(config) self.encoder = BertEncoder(config)", "head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict", "attention_mask is not None: # Apply the attention mask is", "class Skimformer2DPositionEmbeddings(nn.Module): \"\"\"Construct the layout embeddings from the bounding box", "return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_probs,", "either input_ids or inputs_embeds\") assert ( len(input_shape) == 2 ),", "hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SkimmingMaskModel(SkimmingMaskPreTrainedModel): def __init__(self, config, add_pooling_layer=True):", "BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SkimformerForMaskedLM(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected", "self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\") def transpose_for_scores(self,", "input_shape else: raise ValueError(\"You have to specify either input_ids or", "embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class SkimAttention(nn.Module):", "as text embeddings left_position_embeddings = self.x_position_projection(left_position_embeddings) upper_position_embeddings = self.y_position_projection(upper_position_embeddings) right_position_embeddings", "] if v is not None ) return SkimformerEncoderOutput( hidden_states=hidden_states,", "self.all_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)", "abstract class to handle weights initialization and a simple interface", "layers self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)", "embeddings.\"\"\" def __init__(self, config): super().__init__() self.max_position_embeddings = config.max_position_embeddings self.word_embeddings =", "logits = self.classifier(sequence_output) loss = None if labels is not", "token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class", "for computing the token classification loss. Indices should be in", "): output_attentions = output_attentions if output_attentions is not None else", "= \"skimmingmask\" _keys_to_ignore_on_load_missing = [r\"position_ids\"] def _init_weights(self, module): \"\"\" Initialize", "= self.encoder( text_embedding_output, skim_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output", "SkimAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size =", "different from the TF version which uses truncated_normal for initialization", "pretrained models. \"\"\" config_class = SkimformerConfig base_model_prefix = \"skimformer\" def", "pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size", "_keys_to_ignore_on_load_unexpected = [r\"pooler\"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels", "`optional`): Labels for computing the token classification loss. Indices should", "is passed or when ``config.output_attentions=True``): :obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads,", "self.skimformer( input_ids, bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states,", "1 self.attention = SkimformerAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config)", "hidden_states class SkimformerAttention(nn.Module): def __init__(self, config): super().__init__() self.self = SkimformerSelfAttention(config)", "config.max_position_embeddings self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)", "- bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] -", "= self.pruned_heads.union(heads) def forward( self, hidden_states, attention_probs, head_mask=None, ): if", "self.all_head_size) self.key = nn.Linear(config.hidden_layout_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type =", "attend to, which might # seem a bit unusual, but", "max_position_embeddings=config.max_2d_position_embeddings, initializer_range=config.initializer_range, layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.gradient_checkpointing, ) ) self.skim_attention = SkimAttention(config) self.top_k", "device=device) skim_attention_mask = skim_attention_mask.scatter(-1, topk_idx, 1) skim_attention_mask = skim_attention_mask *", "into same dimension as text embeddings left_position_embeddings = self.x_position_projection(left_position_embeddings) upper_position_embeddings", "to, which might # seem a bit unusual, but is", "from dataclasses import dataclass from typing import Optional, Tuple import", "+ token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings", "config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings,", "config.hidden_layout_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.x_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.y_position_projection", "2, 1, 3) def forward( self, hidden_states, attention_probs, head_mask=None, ):", "SkimformerEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None self.init_weights() def", "input_shape, device, position_ids=None): seq_length = input_shape[1] if position_ids is None:", "self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings =", "from typing import Optional, Tuple import math import torch from", "len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def", "1, indices) self_output = self.self( hidden_states, attention_probs, head_mask, ) attention_output", "layer_head_mask, ) else: layer_output = layer_module( hidden_states, attention_probs, layer_head_mask, )", "self.layout_encoder(hidden_states=spatial_pos_embedding_output)[0] skim_attention_output = self.skim_attention( spatial_pos_embedding_output, attention_mask=extended_attention_mask, ) topk_idx = torch.topk(skim_attention_output,", "# Softmax(QK^T/sqrt(d)) . V context_layer = torch.matmul(attention_probs, value_layer) context_layer =", "if not return_dict: return tuple( v for v in [", "= outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss =", "device=device) # We can provide a self-attention mask of dimensions", "def __init__(self, config): super().__init__() all_head_size = config.num_attention_heads * config.attention_head_size self.dense", "if input_ids is not None: input_shape = input_ids.size() else: input_shape", "position_ids=None): seq_length = input_shape[1] if position_ids is None: position_ids =", "position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids =", "outputs + (skim_attention_output, ) if output_hidden_states: outputs = outputs +", "y_center_position_embeddings = self.y_position_embeddings(y_center) except IndexError as e: raise IndexError(\"The :obj:`bbox`", "attention_probs, layer_head_mask, ) else: layer_output = layer_module( hidden_states, attention_probs, layer_head_mask,", "topk_idx = torch.topk(skim_attention_output, self.top_k, -1).indices skim_attention_mask = torch.zeros(skim_attention_output.shape, device=device) skim_attention_mask", "super().__init__(config) self.num_labels = config.num_labels self.skimming_mask_model = SkimmingMaskModel(config, add_pooling_layer=False) self.dropout =", "Apply the attention mask is (precomputed for all layers in", "config.hidden_layout_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.degrade_2d_positions", "x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2,", "h_position_embeddings = self.h_position_projection(h_position_embeddings) w_position_embeddings = self.w_position_projection(w_position_embeddings) two_dim_pos_embeddings = ( left_position_embeddings", ":, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:,", "* self.config.num_hidden_layers embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds,", "self.pooler is not None else None if not return_dict: outputs", "layer_output = self.output(intermediate_output, attention_output) return layer_output class SkimformerEncoder(nn.Module): def __init__(self,", "of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first", "else: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device device = input_ids.device" ]
[ "总内存 memoryUsed = round(m.used / (1024.0 * 1024.0 * 1024.0),", "pymysql from fastapi import APIRouter from api.utils import response_code router", "内存信息 memoryTotal = round(m.total / (1024.0 * 1024.0 * 1024.0),", "import psutil import pymysql from fastapi import APIRouter from api.utils", "# cpu cpuCount = psutil.cpu_count(logical=False) # CPU核心 cpuPercent = psutil.cpu_percent(0.5)", "* 1024.0), 2) # 已用内存 memoryFree = round(memoryTotal - memoryUsed,", "cur.fetchall() db.close() return response_code.resp_200(data={\"res\": res}) def get_performance(): res = {}", "1024.0 * 1024.0)) except: pass res['cpu'] = cpuPercent res['mem'] =", "desc limit 10') res['recent_event'] = cur.fetchall() db.close() return response_code.resp_200(data={\"res\": res})", "res['image_count'] = len(g.dc.images.list()) res['networks_count'] = len(g.dc.networks.list()) cur = db.cursor(cursor=pymysql.cursors.DictCursor) cur.execute(f'select", "int(o.used / (1024.0 * 1024.0 * 1024.0)) diskFree += int(o.free", "import g res = {} db = g.db_pool.connection() cur =", "# 总储存空间大小 diskUsed = 0 # 已用 diskFree = 0", "- memoryUsed, 2) # 剩余内存 # 磁盘 io = psutil.disk_partitions()", "/ (1024.0 * 1024.0 * 1024.0)) diskFree += int(o.free /", "res['memoryTotal'] = memoryTotal res['memoryUsed'] = memoryUsed res['diskTotal'] = diskTotal res['diskUsed']", "response_code router = APIRouter() @router.get('/dashboard/getinfo') def getinfo(): from init_global import", "from init_global import g from main import socket_manager as sm", "= memoryUsed res['diskTotal'] = diskTotal res['diskUsed'] = diskUsed return res", "= memoryTotal res['memoryUsed'] = memoryUsed res['diskTotal'] = diskTotal res['diskUsed'] =", "(1024.0 * 1024.0 * 1024.0), 2) # 总内存 memoryUsed =", "round(memoryTotal - memoryUsed, 2) # 剩余内存 # 磁盘 io =", "import g from main import socket_manager as sm print(g.person_online) while", "psutil.disk_partitions() diskCount = len(io) diskTotal = 0 # 总储存空间大小 diskUsed", "# 已用 diskFree = 0 # 剩余 for i in", "app_list') res['app_count'] = cur.fetchall()[0][0] cur.execute(f'select count(app_name) from app_list where status=\"running\"')", "cpufree = round(100 - cpuPercent, 2) # CPU空余 # 内存", "memoryUsed = round(m.used / (1024.0 * 1024.0 * 1024.0), 2)", "* 1024.0)) except: pass res['cpu'] = cpuPercent res['mem'] = m.percent", "io: try: o = psutil.disk_usage(i.mountpoint) diskTotal += int(o.total / (1024.0", "psutil.cpu_percent(0.5) # 使用率 cpufree = round(100 - cpuPercent, 2) #", "= psutil.virtual_memory() # 内存信息 memoryTotal = round(m.total / (1024.0 *", "diskTotal = 0 # 总储存空间大小 diskUsed = 0 # 已用", "1024.0 * 1024.0), 2) # 已用内存 memoryFree = round(memoryTotal -", "res['mem'] = m.percent res['disk'] = o.percent res['memoryTotal'] = memoryTotal res['memoryUsed']", "0 # 剩余 for i in io: try: o =", "CPU空余 # 内存 m = psutil.virtual_memory() # 内存信息 memoryTotal =", "where status=\"running\"') res['app_run_count'] = cur.fetchall()[0][0] res['image_count'] = len(g.dc.images.list()) res['networks_count'] =", "cpu cpuCount = psutil.cpu_count(logical=False) # CPU核心 cpuPercent = psutil.cpu_percent(0.5) #", "已用内存 memoryFree = round(memoryTotal - memoryUsed, 2) # 剩余内存 #", "/ (1024.0 * 1024.0 * 1024.0), 2) # 总内存 memoryUsed", "try: o = psutil.disk_usage(i.mountpoint) diskTotal += int(o.total / (1024.0 *", "* from app_list order by start_time desc limit 10') res['recent_event']", "= psutil.disk_partitions() diskCount = len(io) diskTotal = 0 # 总储存空间大小", "def push_realinfo(): from init_global import g from main import socket_manager", "= {} db = g.db_pool.connection() cur = db.cursor() cur.execute(f'select count(app_name)", "@router.get('/dashboard/getinfo') def getinfo(): from init_global import g res = {}", "import pymysql from fastapi import APIRouter from api.utils import response_code", "- cpuPercent, 2) # CPU空余 # 内存 m = psutil.virtual_memory()", "# CPU核心 cpuPercent = psutil.cpu_percent(0.5) # 使用率 cpufree = round(100", "i in io: try: o = psutil.disk_usage(i.mountpoint) diskTotal += int(o.total", "while g.person_online: res = get_performance() # print(res) g.push_loop.run_until_complete(sm.emit('dashboard', {'data': res}))", "limit 10') res['recent_event'] = cur.fetchall() db.close() return response_code.resp_200(data={\"res\": res}) def", "round(m.total / (1024.0 * 1024.0 * 1024.0), 2) # 总内存", "psutil.cpu_count(logical=False) # CPU核心 cpuPercent = psutil.cpu_percent(0.5) # 使用率 cpufree =", "* 1024.0 * 1024.0)) except: pass res['cpu'] = cpuPercent res['mem']", "psutil.disk_usage(i.mountpoint) diskTotal += int(o.total / (1024.0 * 1024.0 * 1024.0))", "= diskUsed return res def push_realinfo(): from init_global import g", "# 已用内存 memoryFree = round(memoryTotal - memoryUsed, 2) # 剩余内存", "res['memoryUsed'] = memoryUsed res['diskTotal'] = diskTotal res['diskUsed'] = diskUsed return", "res['app_run_count'] = cur.fetchall()[0][0] res['image_count'] = len(g.dc.images.list()) res['networks_count'] = len(g.dc.networks.list()) cur", "diskUsed = 0 # 已用 diskFree = 0 # 剩余", "res['diskTotal'] = diskTotal res['diskUsed'] = diskUsed return res def push_realinfo():", "def getinfo(): from init_global import g res = {} db", "res['networks_count'] = len(g.dc.networks.list()) cur = db.cursor(cursor=pymysql.cursors.DictCursor) cur.execute(f'select * from app_list", "start_time desc limit 10') res['recent_event'] = cur.fetchall() db.close() return response_code.resp_200(data={\"res\":", "2) # CPU空余 # 内存 m = psutil.virtual_memory() # 内存信息", "2) # 剩余内存 # 磁盘 io = psutil.disk_partitions() diskCount =", "cpuPercent = psutil.cpu_percent(0.5) # 使用率 cpufree = round(100 - cpuPercent,", "1024.0)) except: pass res['cpu'] = cpuPercent res['mem'] = m.percent res['disk']", "/ (1024.0 * 1024.0 * 1024.0), 2) # 已用内存 memoryFree", "print(g.person_online) while g.person_online: res = get_performance() # print(res) g.push_loop.run_until_complete(sm.emit('dashboard', {'data':", "init_global import g from main import socket_manager as sm print(g.person_online)", "= db.cursor() cur.execute(f'select count(app_name) from app_list') res['app_count'] = cur.fetchall()[0][0] cur.execute(f'select", "2) # 已用内存 memoryFree = round(memoryTotal - memoryUsed, 2) #", "# 内存 m = psutil.virtual_memory() # 内存信息 memoryTotal = round(m.total", "= len(g.dc.networks.list()) cur = db.cursor(cursor=pymysql.cursors.DictCursor) cur.execute(f'select * from app_list order", "for i in io: try: o = psutil.disk_usage(i.mountpoint) diskTotal +=", "* 1024.0)) diskUsed += int(o.used / (1024.0 * 1024.0 *", "1024.0), 2) # 总内存 memoryUsed = round(m.used / (1024.0 *", "已用 diskFree = 0 # 剩余 for i in io:", "cpuPercent, 2) # CPU空余 # 内存 m = psutil.virtual_memory() #", "2) # 总内存 memoryUsed = round(m.used / (1024.0 * 1024.0", "fastapi import APIRouter from api.utils import response_code router = APIRouter()", "time import psutil import pymysql from fastapi import APIRouter from", "= o.percent res['memoryTotal'] = memoryTotal res['memoryUsed'] = memoryUsed res['diskTotal'] =", "g from main import socket_manager as sm print(g.person_online) while g.person_online:", "len(io) diskTotal = 0 # 总储存空间大小 diskUsed = 0 #", "db.cursor(cursor=pymysql.cursors.DictCursor) cur.execute(f'select * from app_list order by start_time desc limit", "10') res['recent_event'] = cur.fetchall() db.close() return response_code.resp_200(data={\"res\": res}) def get_performance():", "round(m.used / (1024.0 * 1024.0 * 1024.0), 2) # 已用内存", "cur.fetchall()[0][0] res['image_count'] = len(g.dc.images.list()) res['networks_count'] = len(g.dc.networks.list()) cur = db.cursor(cursor=pymysql.cursors.DictCursor)", "diskFree += int(o.free / (1024.0 * 1024.0 * 1024.0)) except:", "res['recent_event'] = cur.fetchall() db.close() return response_code.resp_200(data={\"res\": res}) def get_performance(): res", "diskFree = 0 # 剩余 for i in io: try:", "cur.execute(f'select count(app_name) from app_list') res['app_count'] = cur.fetchall()[0][0] cur.execute(f'select count(app_name) from", "res['cpu'] = cpuPercent res['mem'] = m.percent res['disk'] = o.percent res['memoryTotal']", "from init_global import g res = {} db = g.db_pool.connection()", "db.cursor() cur.execute(f'select count(app_name) from app_list') res['app_count'] = cur.fetchall()[0][0] cur.execute(f'select count(app_name)", "cur.execute(f'select * from app_list order by start_time desc limit 10')", "by start_time desc limit 10') res['recent_event'] = cur.fetchall() db.close() return", "+= int(o.total / (1024.0 * 1024.0 * 1024.0)) diskUsed +=", "get_performance(): res = {} # cpu cpuCount = psutil.cpu_count(logical=False) #", "0 # 已用 diskFree = 0 # 剩余 for i", "1024.0), 2) # 已用内存 memoryFree = round(memoryTotal - memoryUsed, 2)", "init_global import g res = {} db = g.db_pool.connection() cur", "= db.cursor(cursor=pymysql.cursors.DictCursor) cur.execute(f'select * from app_list order by start_time desc", "# 剩余内存 # 磁盘 io = psutil.disk_partitions() diskCount = len(io)", "res['diskUsed'] = diskUsed return res def push_realinfo(): from init_global import", "磁盘 io = psutil.disk_partitions() diskCount = len(io) diskTotal = 0", "= APIRouter() @router.get('/dashboard/getinfo') def getinfo(): from init_global import g res", "= cpuPercent res['mem'] = m.percent res['disk'] = o.percent res['memoryTotal'] =", "1024.0 * 1024.0)) diskUsed += int(o.used / (1024.0 * 1024.0", "m.percent res['disk'] = o.percent res['memoryTotal'] = memoryTotal res['memoryUsed'] = memoryUsed", "= 0 # 剩余 for i in io: try: o", "cur.fetchall()[0][0] cur.execute(f'select count(app_name) from app_list where status=\"running\"') res['app_run_count'] = cur.fetchall()[0][0]", "push_realinfo(): from init_global import g from main import socket_manager as", "db.close() return response_code.resp_200(data={\"res\": res}) def get_performance(): res = {} #", "psutil import pymysql from fastapi import APIRouter from api.utils import", "diskUsed return res def push_realinfo(): from init_global import g from", "cpuCount = psutil.cpu_count(logical=False) # CPU核心 cpuPercent = psutil.cpu_percent(0.5) # 使用率", "# 内存信息 memoryTotal = round(m.total / (1024.0 * 1024.0 *", "# CPU空余 # 内存 m = psutil.virtual_memory() # 内存信息 memoryTotal", "{} db = g.db_pool.connection() cur = db.cursor() cur.execute(f'select count(app_name) from", "from app_list') res['app_count'] = cur.fetchall()[0][0] cur.execute(f'select count(app_name) from app_list where", "{} # cpu cpuCount = psutil.cpu_count(logical=False) # CPU核心 cpuPercent =", "* 1024.0), 2) # 总内存 memoryUsed = round(m.used / (1024.0", "1024.0)) diskUsed += int(o.used / (1024.0 * 1024.0 * 1024.0))", "1024.0 * 1024.0)) diskFree += int(o.free / (1024.0 * 1024.0", "socket_manager as sm print(g.person_online) while g.person_online: res = get_performance() #", "= g.db_pool.connection() cur = db.cursor() cur.execute(f'select count(app_name) from app_list') res['app_count']", "count(app_name) from app_list') res['app_count'] = cur.fetchall()[0][0] cur.execute(f'select count(app_name) from app_list", "order by start_time desc limit 10') res['recent_event'] = cur.fetchall() db.close()", "diskTotal res['diskUsed'] = diskUsed return res def push_realinfo(): from init_global", "剩余内存 # 磁盘 io = psutil.disk_partitions() diskCount = len(io) diskTotal", "APIRouter from api.utils import response_code router = APIRouter() @router.get('/dashboard/getinfo') def", "db = g.db_pool.connection() cur = db.cursor() cur.execute(f'select count(app_name) from app_list')", "cur = db.cursor() cur.execute(f'select count(app_name) from app_list') res['app_count'] = cur.fetchall()[0][0]", "* 1024.0 * 1024.0), 2) # 总内存 memoryUsed = round(m.used", "总储存空间大小 diskUsed = 0 # 已用 diskFree = 0 #", "return res def push_realinfo(): from init_global import g from main", "res['disk'] = o.percent res['memoryTotal'] = memoryTotal res['memoryUsed'] = memoryUsed res['diskTotal']", "o = psutil.disk_usage(i.mountpoint) diskTotal += int(o.total / (1024.0 * 1024.0", "(1024.0 * 1024.0 * 1024.0)) diskFree += int(o.free / (1024.0", "memoryTotal = round(m.total / (1024.0 * 1024.0 * 1024.0), 2)", "= round(memoryTotal - memoryUsed, 2) # 剩余内存 # 磁盘 io", "* 1024.0 * 1024.0), 2) # 已用内存 memoryFree = round(memoryTotal", "res = {} db = g.db_pool.connection() cur = db.cursor() cur.execute(f'select", "import socket_manager as sm print(g.person_online) while g.person_online: res = get_performance()", "int(o.free / (1024.0 * 1024.0 * 1024.0)) except: pass res['cpu']", "使用率 cpufree = round(100 - cpuPercent, 2) # CPU空余 #", "= psutil.cpu_percent(0.5) # 使用率 cpufree = round(100 - cpuPercent, 2)", "sm print(g.person_online) while g.person_online: res = get_performance() # print(res) g.push_loop.run_until_complete(sm.emit('dashboard',", "import APIRouter from api.utils import response_code router = APIRouter() @router.get('/dashboard/getinfo')", "diskTotal += int(o.total / (1024.0 * 1024.0 * 1024.0)) diskUsed", "app_list order by start_time desc limit 10') res['recent_event'] = cur.fetchall()", "= psutil.cpu_count(logical=False) # CPU核心 cpuPercent = psutil.cpu_percent(0.5) # 使用率 cpufree", "/ (1024.0 * 1024.0 * 1024.0)) diskUsed += int(o.used /", "main import socket_manager as sm print(g.person_online) while g.person_online: res =", "status=\"running\"') res['app_run_count'] = cur.fetchall()[0][0] res['image_count'] = len(g.dc.images.list()) res['networks_count'] = len(g.dc.networks.list())", "except: pass res['cpu'] = cpuPercent res['mem'] = m.percent res['disk'] =", "def get_performance(): res = {} # cpu cpuCount = psutil.cpu_count(logical=False)", "= diskTotal res['diskUsed'] = diskUsed return res def push_realinfo(): from", "(1024.0 * 1024.0 * 1024.0)) except: pass res['cpu'] = cpuPercent", "from fastapi import APIRouter from api.utils import response_code router =", "1024.0)) diskFree += int(o.free / (1024.0 * 1024.0 * 1024.0))", "o.percent res['memoryTotal'] = memoryTotal res['memoryUsed'] = memoryUsed res['diskTotal'] = diskTotal", "import time import psutil import pymysql from fastapi import APIRouter", "memoryFree = round(memoryTotal - memoryUsed, 2) # 剩余内存 # 磁盘", "app_list where status=\"running\"') res['app_run_count'] = cur.fetchall()[0][0] res['image_count'] = len(g.dc.images.list()) res['networks_count']", "* 1024.0)) diskFree += int(o.free / (1024.0 * 1024.0 *", "count(app_name) from app_list where status=\"running\"') res['app_run_count'] = cur.fetchall()[0][0] res['image_count'] =", "cpuPercent res['mem'] = m.percent res['disk'] = o.percent res['memoryTotal'] = memoryTotal", "+= int(o.free / (1024.0 * 1024.0 * 1024.0)) except: pass", "round(100 - cpuPercent, 2) # CPU空余 # 内存 m =", "= psutil.disk_usage(i.mountpoint) diskTotal += int(o.total / (1024.0 * 1024.0 *", "cur.execute(f'select count(app_name) from app_list where status=\"running\"') res['app_run_count'] = cur.fetchall()[0][0] res['image_count']", "+= int(o.used / (1024.0 * 1024.0 * 1024.0)) diskFree +=", "CPU核心 cpuPercent = psutil.cpu_percent(0.5) # 使用率 cpufree = round(100 -", "psutil.virtual_memory() # 内存信息 memoryTotal = round(m.total / (1024.0 * 1024.0", "memoryUsed, 2) # 剩余内存 # 磁盘 io = psutil.disk_partitions() diskCount", "= {} # cpu cpuCount = psutil.cpu_count(logical=False) # CPU核心 cpuPercent", "diskUsed += int(o.used / (1024.0 * 1024.0 * 1024.0)) diskFree", "from api.utils import response_code router = APIRouter() @router.get('/dashboard/getinfo') def getinfo():", "getinfo(): from init_global import g res = {} db =", "/ (1024.0 * 1024.0 * 1024.0)) except: pass res['cpu'] =", "= m.percent res['disk'] = o.percent res['memoryTotal'] = memoryTotal res['memoryUsed'] =", "g res = {} db = g.db_pool.connection() cur = db.cursor()", "(1024.0 * 1024.0 * 1024.0), 2) # 已用内存 memoryFree =", "pass res['cpu'] = cpuPercent res['mem'] = m.percent res['disk'] = o.percent", "diskCount = len(io) diskTotal = 0 # 总储存空间大小 diskUsed =", "g.person_online: res = get_performance() # print(res) g.push_loop.run_until_complete(sm.emit('dashboard', {'data': res})) time.sleep(3)", "* 1024.0 * 1024.0)) diskFree += int(o.free / (1024.0 *", "in io: try: o = psutil.disk_usage(i.mountpoint) diskTotal += int(o.total /", "as sm print(g.person_online) while g.person_online: res = get_performance() # print(res)", "len(g.dc.images.list()) res['networks_count'] = len(g.dc.networks.list()) cur = db.cursor(cursor=pymysql.cursors.DictCursor) cur.execute(f'select * from", "memoryTotal res['memoryUsed'] = memoryUsed res['diskTotal'] = diskTotal res['diskUsed'] = diskUsed", "= cur.fetchall()[0][0] res['image_count'] = len(g.dc.images.list()) res['networks_count'] = len(g.dc.networks.list()) cur =", "= cur.fetchall()[0][0] cur.execute(f'select count(app_name) from app_list where status=\"running\"') res['app_run_count'] =", "= len(g.dc.images.list()) res['networks_count'] = len(g.dc.networks.list()) cur = db.cursor(cursor=pymysql.cursors.DictCursor) cur.execute(f'select *", "response_code.resp_200(data={\"res\": res}) def get_performance(): res = {} # cpu cpuCount", "# 总内存 memoryUsed = round(m.used / (1024.0 * 1024.0 *", "m = psutil.virtual_memory() # 内存信息 memoryTotal = round(m.total / (1024.0", "= round(m.total / (1024.0 * 1024.0 * 1024.0), 2) #", "* 1024.0 * 1024.0)) diskUsed += int(o.used / (1024.0 *", "import response_code router = APIRouter() @router.get('/dashboard/getinfo') def getinfo(): from init_global", "剩余 for i in io: try: o = psutil.disk_usage(i.mountpoint) diskTotal", "1024.0 * 1024.0), 2) # 总内存 memoryUsed = round(m.used /", "api.utils import response_code router = APIRouter() @router.get('/dashboard/getinfo') def getinfo(): from", "cur = db.cursor(cursor=pymysql.cursors.DictCursor) cur.execute(f'select * from app_list order by start_time", "res = {} # cpu cpuCount = psutil.cpu_count(logical=False) # CPU核心", "= round(m.used / (1024.0 * 1024.0 * 1024.0), 2) #", "io = psutil.disk_partitions() diskCount = len(io) diskTotal = 0 #", "g.db_pool.connection() cur = db.cursor() cur.execute(f'select count(app_name) from app_list') res['app_count'] =", "# 使用率 cpufree = round(100 - cpuPercent, 2) # CPU空余", "from app_list where status=\"running\"') res['app_run_count'] = cur.fetchall()[0][0] res['image_count'] = len(g.dc.images.list())", "= cur.fetchall() db.close() return response_code.resp_200(data={\"res\": res}) def get_performance(): res =", "return response_code.resp_200(data={\"res\": res}) def get_performance(): res = {} # cpu", "res def push_realinfo(): from init_global import g from main import", "from main import socket_manager as sm print(g.person_online) while g.person_online: res", "= 0 # 总储存空间大小 diskUsed = 0 # 已用 diskFree", "(1024.0 * 1024.0 * 1024.0)) diskUsed += int(o.used / (1024.0", "0 # 总储存空间大小 diskUsed = 0 # 已用 diskFree =", "= 0 # 已用 diskFree = 0 # 剩余 for", "from app_list order by start_time desc limit 10') res['recent_event'] =", "内存 m = psutil.virtual_memory() # 内存信息 memoryTotal = round(m.total /", "= len(io) diskTotal = 0 # 总储存空间大小 diskUsed = 0", "# 剩余 for i in io: try: o = psutil.disk_usage(i.mountpoint)", "router = APIRouter() @router.get('/dashboard/getinfo') def getinfo(): from init_global import g", "int(o.total / (1024.0 * 1024.0 * 1024.0)) diskUsed += int(o.used", "# 磁盘 io = psutil.disk_partitions() diskCount = len(io) diskTotal =", "res['app_count'] = cur.fetchall()[0][0] cur.execute(f'select count(app_name) from app_list where status=\"running\"') res['app_run_count']", "memoryUsed res['diskTotal'] = diskTotal res['diskUsed'] = diskUsed return res def", "res}) def get_performance(): res = {} # cpu cpuCount =", "= round(100 - cpuPercent, 2) # CPU空余 # 内存 m", "APIRouter() @router.get('/dashboard/getinfo') def getinfo(): from init_global import g res =", "len(g.dc.networks.list()) cur = db.cursor(cursor=pymysql.cursors.DictCursor) cur.execute(f'select * from app_list order by" ]
[ "self.crit = nn.MSELoss() def step(self): self.optimizer.zero_grad() glb = self.forward(self.rotations, self.position,", "!= 3: raise Exception('Unexpected shape of rotation') if quater and", "torch.empty(rotation.shape[:-1] + (3,), device=position.device) norm = torch.norm(rotation, dim=-1, keepdim=True) rotation", "parents, constrains): self.rotations = rotations self.rotations.requires_grad_(True) self.position = positions self.position.requires_grad_(True)", "def forward(self, rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False,", "shape of rotation') if quater and rotation.shape[-2] != 4: raise", "if axis == 'x': transform[..., 1, 1] = transform[..., 2,", "args, edges): self.topology = [-1] * (len(edges) + 1) self.rotation_map", "pi in enumerate(self.topology): if pi == -1: assert i ==", "0, :] = position for i, pi in enumerate(self.parents): if", "pi, :] return res @staticmethod def transform_from_euler(rotation, order): rotation =", "* Time offset should have shape batch_size * Joint_num *", "i, :] = torch.matmul(transform[..., i, :, :], offset[..., i, :,", "output have shape batch_size * Time * Joint_num * 3", "cos transform[..., 0, 2] = sin transform[..., 2, 0] =", "= quater[..., 0] qx = quater[..., 1] qy = quater[...,", "x2 yy = qy * y2 wx = qw *", "= 1.0 - (xx + yy) return m class InverseKinematics:", "= sin transform[..., 2, 0] = -sin if axis ==", "= qw * z2 m = torch.empty(quater.shape[:-1] + (3, 3),", "''' if not quater and rotation.shape[-2] != 3: raise Exception('Unexpected", "= qx * y2 yz = qy * z2 wy", "shape of rotation') rotation = rotation.permute(0, 3, 1, 2) position", "cos transform[..., 1, 2] = -sin transform[..., 2, 1] =", "torch import torch.nn as nn import numpy as np import", "transform = torch.empty(euler.shape[0:3] + (3, 3), device=euler.device) cos = torch.cos(euler)", "transform[..., 2, 1] = sin if axis == 'y': transform[...,", "result[..., pi, :] return result @staticmethod def transform_from_euler(rotation, order): rotation", "= sin return transform @staticmethod def transform_from_quaternion(quater: torch.Tensor): qw =", "* Time * Joint_num * 3 ''' def forward(self, rotation:", ":]) def all_loss(self): res = [self.tloss(t).detach().numpy() for t in range(self.constrains.shape[0])]", "Time offset should have shape batch_size * Joint_num * 3", "dtype=torch.float, device=raw.device) identity = identity.reshape((1, 1, -1, 1)) new_shape =", ":, :], transform[..., i, :, :]) if world: result[..., i,", "i, pi in enumerate(self.parents): if pi == -1: assert i", "transform[..., 2, 2] = cos transform[..., 0, 2] = sin", "= offset self.constrains = constrains self.optimizer = torch.optim.Adam([self.position, self.rotations], lr=1e-3,", "1] = cos transform[..., 0, 1] = -sin transform[..., 1,", "rotation / 180 * math.pi transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]),", "0, 0] = transform[..., 2, 2] = cos transform[..., 0,", "dtype=torch.float, device=raw.device) else: rotation = rotation.reshape((rotation.shape[0], -1, 3, rotation.shape[-1])) identity", "== -1: assert i == 0 continue transform[..., i, :,", "(xx + yy) return m class InverseKinematics: def __init__(self, rotations:", "transform[..., 0, 0] = transform[..., 2, 2] = cos transform[...,", "time): return self.crit(self.glb[time, :], self.constrains[time, :]) def all_loss(self): res =", ":, :]).squeeze() transform[..., i, :, :] = torch.matmul(transform[..., pi, :,", "2, 0] = -sin if axis == 'z': transform[..., 0,", "position.permute(0, 2, 1) result = torch.empty(rotation.shape[:-1] + (3, ), device=position.device)", "transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]), ForwardKinematics.transform_from_axis(rotation[..., 2], order[2])) transform =", "axis): transform = torch.empty(euler.shape[0:3] + (3, 3), device=euler.device) cos =", "enumerate(self.topology): if pi == 0 or pi == -1: continue", "qy * z2 wy = qw * y2 xz =", "3, 1, 2) position = position.permute(0, 2, 1) ''' result", "i, :, :]) result[..., i, :] = torch.matmul(transform[..., i, :,", "0, 0), dtype=torch.float, device=raw.device) else: rotation = rotation.reshape((rotation.shape[0], -1, 3,", "self.quater if self.pos_repr == '3d': position = raw[:, -3:, :]", "0] = sin return transform @staticmethod def transform_from_quaternion(quater: torch.Tensor): qw", "yy = qy * y2 wx = qw * x2", "self.constrains) loss.backward() self.optimizer.step() self.glb = glb return loss.item() def tloss(self,", "res = [self.tloss(t).detach().numpy() for t in range(self.constrains.shape[0])] return np.array(res) '''", "norm if quater: transform = self.transform_from_quaternion(rotation) else: transform = self.transform_from_euler(rotation,", "sin if axis == 'y': transform[..., 0, 0] = transform[...,", "if quater and rotation.shape[-2] != 4: raise Exception('Unexpected shape of", "- wz m[..., 0, 2] = xz + wy m[...,", "= torch.matmul(transform[..., pi, :, :], offset[..., i, :, :]).squeeze() transform[...,", "* x2 yy = qy * y2 wx = qw", "= torch.sin(euler) cord = ord(axis) - ord('x') transform[..., cord, :]", "continue result[..., i, :] = torch.matmul(transform[..., pi, :, :], offset[...,", "support') if quater: rotation = rotation.reshape((rotation.shape[0], -1, 4, rotation.shape[-1])) identity", "qx y2 = qy + qy z2 = qz +", "__init__(self, args, edges): self.topology = [-1] * (len(edges) + 1)", "cos transform[..., 0, 1] = -sin transform[..., 1, 0] =", "torch.Tensor, positions: torch.Tensor, offset, parents, constrains): self.rotations = rotations self.rotations.requires_grad_(True)", "transform = self.transform_from_quaternion(rotation) else: transform = self.transform_from_euler(rotation, order) offset =", "class ForwardKinematics: def __init__(self, args, edges): self.topology = [-1] *", "= ord(axis) - ord('x') transform[..., cord, :] = transform[..., :,", "= self.transform_from_quaternion(rotation) else: transform = self.transform_from_euler(rotation, order) offset = offset.reshape((-1,", "(3,), device=position.device) norm = torch.norm(rotation, dim=-1, keepdim=True) rotation = rotation", "= torch.empty(quater.shape[:-1] + (3, 3), device=quater.device) m[..., 0, 0] =", "= identity.repeat(new_shape) for i, j in enumerate(self.rotation_map): rotation_final[:, j, :,", "qz xx = qx * x2 yy = qy *", "0] qx = quater[..., 1] qy = quater[..., 2] qz", "transform_from_axis(euler, axis): transform = torch.empty(euler.shape[0:3] + (3, 3), device=euler.device) cos", ":] = torch.matmul(transform[..., pi, :, :], transform[..., i, :, :])", "transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform) return transform @staticmethod def", "-sin if axis == 'z': transform[..., 0, 0] = transform[...,", "identity = torch.tensor((1, 0, 0, 0), dtype=torch.float, device=raw.device) else: rotation", "offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1)) result[..., 0, :] = position", "m[..., 1, 0] = xy + wz m[..., 1, 1]", "offset[..., i, :, :]).squeeze() transform[..., i, :, :] = torch.matmul(transform[...,", "as np import math class ForwardKinematics: def __init__(self, args, edges):", "== '3d': position = raw[:, -3:, :] rotation = raw[:,", "if quater is None: quater = self.quater if self.pos_repr ==", "constrains): self.rotations = rotations self.rotations.requires_grad_(True) self.position = positions self.position.requires_grad_(True) self.parents", "None: quater = self.quater if self.pos_repr == '3d': position =", "self.transform_from_euler(rotation, order) offset = offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1)) result[...,", "edge in enumerate(edges): self.topology[edge[1]] = edge[0] self.rotation_map.append(edge[1]) self.world = args.fk_world", "'3d': position = raw[:, -3:, :] rotation = raw[:, :-3,", "2], order[2])) transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform) return transform", "(len(edges) + 1) self.rotation_map = [] for i, edge in", "4, rotation.shape[-1])) identity = torch.tensor((1, 0, 0, 0), dtype=torch.float, device=raw.device)", "self.optimizer.zero_grad() glb = self.forward(self.rotations, self.position, self.offset, order='', quater=True, world=True) loss", ":] = torch.matmul(transform[..., i, :, :], offset[..., i, :, :]).squeeze()", "import numpy as np import math class ForwardKinematics: def __init__(self,", "transform @staticmethod def transform_from_axis(euler, axis): transform = torch.empty(euler.shape[0:3] + (3,", "1 if axis == 'x': transform[..., 1, 1] = transform[...,", "have shape batch_size * Time * Joint_num * 3 '''", "device=quater.device) m[..., 0, 0] = 1.0 - (yy + zz)", "1, 2] = -sin transform[..., 2, 1] = sin if", "* x2 xy = qx * y2 yz = qy", "torch.Tensor, offset, parents, constrains): self.rotations = rotations self.rotations.requires_grad_(True) self.position =", "i == 0 continue result[..., i, :] = torch.matmul(transform[..., pi,", "i, :, :]).squeeze() if world: result[..., i, :] += result[...,", "Time * Joint_num * 3 ''' def forward(self, rotation: torch.Tensor,", "loss.backward() self.optimizer.step() self.glb = glb return loss.item() def tloss(self, time):", "self.rotation_map.append(edge[1]) self.world = args.fk_world self.pos_repr = args.pos_repr self.quater = args.rotation", "@staticmethod def transform_from_axis(euler, axis): transform = torch.empty(euler.shape[0:3] + (3, 3),", "+ yy) return m class InverseKinematics: def __init__(self, rotations: torch.Tensor,", "[self.tloss(t).detach().numpy() for t in range(self.constrains.shape[0])] return np.array(res) ''' rotation should", "for i, pi in enumerate(self.parents): if pi == -1: assert", "raise Exception('Unexpected shape of rotation') if quater and rotation.shape[-2] !=", "= quater[..., 1] qy = quater[..., 2] qz = quater[...,", "= edge[0] self.rotation_map.append(edge[1]) self.world = args.fk_world self.pos_repr = args.pos_repr self.quater", ":] = rotation[:, i, :, :] return self.forward(rotation_final, position, offset,", "i, edge in enumerate(edges): self.topology[edge[1]] = edge[0] self.rotation_map.append(edge[1]) self.world =", "rotation.permute(0, 3, 1, 2) position = position.permute(0, 2, 1) result", "3, 1, 2) position = position.permute(0, 2, 1) result =", "identity = torch.zeros((3, ), dtype=torch.float, device=raw.device) identity = identity.reshape((1, 1,", "= 0 transform[..., cord, cord] = 1 if axis ==", "= torch.zeros((3, ), dtype=torch.float, device=raw.device) identity = identity.reshape((1, 1, -1,", "wx m[..., 2, 0] = xz - wy m[..., 2,", "self.forward(rotation_final, position, offset, world=world, quater=quater) ''' rotation should have shape", "1] qy = quater[..., 2] qz = quater[..., 3] x2", "self.rotations = rotations self.rotations.requires_grad_(True) self.position = positions self.position.requires_grad_(True) self.parents =", "rotations: torch.Tensor, positions: torch.Tensor, offset, parents, constrains): self.rotations = rotations", "offset: torch.Tensor, order='xyz', quater=False, world=True): if not quater and rotation.shape[-2]", "nn import numpy as np import math class ForwardKinematics: def", "= transform[..., 2, 2] = cos transform[..., 0, 2] =", "= args.pos_repr self.quater = args.rotation == 'quaternion' def forward_from_raw(self, raw,", "'y': transform[..., 0, 0] = transform[..., 2, 2] = cos", "* 3 output have shape batch_size * Time * Joint_num", "if pi == -1: assert i == 0 continue transform[...,", "zz = qz * z2 wz = qw * z2", "keepdim=True) rotation = rotation / norm if quater: transform =", "/ norm if quater: transform = self.transform_from_quaternion(rotation) else: transform =", "quater is None: quater = self.quater if self.pos_repr == '3d':", "3), device=quater.device) m[..., 0, 0] = 1.0 - (yy +", "qx = quater[..., 1] qy = quater[..., 2] qz =", "pi == -1: assert i == 0 continue result[..., i,", "for i, edge in enumerate(edges): self.topology[edge[1]] = edge[0] self.rotation_map.append(edge[1]) self.world", "constrains self.optimizer = torch.optim.Adam([self.position, self.rotations], lr=1e-3, betas=(0.9, 0.999)) self.crit =", "torch.matmul(transform[..., pi, :, :], transform[..., i, :, :]) if world:", ":] += res[..., pi, :] return res @staticmethod def transform_from_euler(rotation,", "self.offset, order='', quater=True, world=True) loss = self.crit(glb, self.constrains) loss.backward() self.optimizer.step()", ":] return result @staticmethod def transform_from_euler(rotation, order): rotation = rotation", "result def from_local_to_world(self, res: torch.Tensor): res = res.clone() for i,", "m[..., 1, 2] = yz - wx m[..., 2, 0]", "of rotation') if quater and rotation.shape[-2] != 4: raise Exception('Unexpected", "Joint_num * 3 ''' def forward(self, rotation: torch.Tensor, position: torch.Tensor,", "), device=position.device) norm = torch.norm(rotation, dim=-1, keepdim=True) #norm[norm < 1e-10]", "* (len(edges) + 1) self.rotation_map = [] for i, edge", "qw * x2 xy = qx * y2 yz =", "2, 1] = yz + wx m[..., 2, 2] =", "= rotation / 180 * math.pi transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1],", "yz + wx m[..., 2, 2] = 1.0 - (xx", "torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False, world=True): ''' if", "edge[0] self.rotation_map.append(edge[1]) self.world = args.fk_world self.pos_repr = args.pos_repr self.quater =", "None: world = self.world if quater is None: quater =", "1) result = torch.empty(rotation.shape[:-1] + (3, ), device=position.device) norm =", "+ zz) m[..., 1, 2] = yz - wx m[...,", "and rotation.shape[-2] != 3: raise Exception('Unexpected shape of rotation') if", "loss.item() def tloss(self, time): return self.crit(self.glb[time, :], self.constrains[time, :]) def", "i, :, :], offset[..., i, :, :]).squeeze() if world: result[...,", "return result @staticmethod def transform_from_euler(rotation, order): rotation = rotation /", "0] = -sin if axis == 'z': transform[..., 0, 0]", "= torch.matmul(transform[..., i, :, :], offset[..., i, :, :]).squeeze() if", "= self.world if quater is None: quater = self.quater if", ":, :], transform[..., i, :, :]) result[..., i, :] =", "def transform_from_euler(rotation, order): rotation = rotation / 180 * math.pi", "m = torch.empty(quater.shape[:-1] + (3, 3), device=quater.device) m[..., 0, 0]", "1, 0] = xy + wz m[..., 1, 1] =", ":]).squeeze() if world: result[..., i, :] += result[..., pi, :]", "m[..., 0, 1] = xy - wz m[..., 0, 2]", "self.optimizer = torch.optim.Adam([self.position, self.rotations], lr=1e-3, betas=(0.9, 0.999)) self.crit = nn.MSELoss()", "1, 2) position = position.permute(0, 2, 1) ''' result =", "position = position.permute(0, 2, 1) ''' result = torch.empty(rotation.shape[:-1] +", "enumerate(self.parents): if pi == -1: assert i == 0 continue", "3 * Time offset should have shape batch_size * Joint_num", "= rotation.reshape((rotation.shape[0], -1, 4, rotation.shape[-1])) identity = torch.tensor((1, 0, 0,", "j, :, :] = rotation[:, i, :, :] return self.forward(rotation_final,", "transform) return transform @staticmethod def transform_from_axis(euler, axis): transform = torch.empty(euler.shape[0:3]", "torch.sin(euler) cord = ord(axis) - ord('x') transform[..., cord, :] =", "= cos transform[..., 1, 2] = -sin transform[..., 2, 1]", "= xy - wz m[..., 0, 2] = xz +", ":] += result[..., pi, :] return result @staticmethod def transform_from_euler(rotation,", "(3, 3), device=quater.device) m[..., 0, 0] = 1.0 - (yy", "+ (3, 3), device=quater.device) m[..., 0, 0] = 1.0 -", "position = raw[:, -3:, :] rotation = raw[:, :-3, :]", "0], order[0]), transform) return transform @staticmethod def transform_from_axis(euler, axis): transform", "order): rotation = rotation / 180 * math.pi transform =", "0] = transform[..., 2, 2] = cos transform[..., 0, 2]", "return m class InverseKinematics: def __init__(self, rotations: torch.Tensor, positions: torch.Tensor,", "pi in enumerate(self.parents): if pi == -1: assert i ==", "Exception('Not support') if quater: rotation = rotation.reshape((rotation.shape[0], -1, 4, rotation.shape[-1]))", "2] = xz + wy m[..., 1, 0] = xy", "+ qz xx = qx * x2 yy = qy", "0, :] = position for i, pi in enumerate(self.topology): if", "list(rotation.shape) new_shape[1] += 1 new_shape[2] = 1 rotation_final = identity.repeat(new_shape)", "position for i, pi in enumerate(self.parents): if pi == -1:", "y2 xz = qx * z2 zz = qz *", "-1: assert i == 0 continue result[..., i, :] =", "(3/4) * Time position should have shape batch_size * 3", "pi == -1: continue res[..., i, :] += res[..., pi,", "ForwardKinematics: def __init__(self, args, edges): self.topology = [-1] * (len(edges)", "return transform @staticmethod def transform_from_quaternion(quater: torch.Tensor): qw = quater[..., 0]", "result[..., 0, :] = position for i, pi in enumerate(self.parents):", "* z2 zz = qz * z2 wz = qw", "= self.forward(self.rotations, self.position, self.offset, order='', quater=True, world=True) loss = self.crit(glb,", "= 1.0 - (yy + zz) m[..., 0, 1] =", "= qz * z2 wz = qw * z2 m", "torch.tensor((1, 0, 0, 0), dtype=torch.float, device=raw.device) else: rotation = rotation.reshape((rotation.shape[0],", "torch.Tensor): res = res.clone() for i, pi in enumerate(self.topology): if", "is None: quater = self.quater if self.pos_repr == '3d': position", "should have shape batch_size * 3 * Time offset should", "xz - wy m[..., 2, 1] = yz + wx", "-1: assert i == 0 continue transform[..., i, :, :]", "xz + wy m[..., 1, 0] = xy + wz", "world=world, quater=quater) ''' rotation should have shape batch_size * Joint_num", "qx * y2 yz = qy * z2 wy =", "= qx * z2 zz = qz * z2 wz", "for i, pi in enumerate(self.topology): if pi == 0 or", "in enumerate(edges): self.topology[edge[1]] = edge[0] self.rotation_map.append(edge[1]) self.world = args.fk_world self.pos_repr", "1, 0] = sin return transform @staticmethod def transform_from_quaternion(quater: torch.Tensor):", "def __init__(self, args, edges): self.topology = [-1] * (len(edges) +", "identity = identity.reshape((1, 1, -1, 1)) new_shape = list(rotation.shape) new_shape[1]", "rotation.shape[-2] != 3: raise Exception('Unexpected shape of rotation') if quater", "quater and rotation.shape[-2] != 3: raise Exception('Unexpected shape of rotation')", "= qy + qy z2 = qz + qz xx", "xy = qx * y2 yz = qy * z2", "torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False, world=True): if not", "def from_local_to_world(self, res: torch.Tensor): res = res.clone() for i, pi", "- (yy + zz) m[..., 0, 1] = xy -", "self.position = positions self.position.requires_grad_(True) self.parents = parents self.offset = offset", ":] rotation = raw[:, :-3, :] elif self.pos_repr == '4d':", "0 continue transform[..., i, :, :] = torch.matmul(transform[..., pi, :,", "1] = transform[..., 2, 2] = cos transform[..., 1, 2]", "y2 wx = qw * x2 xy = qx *", "rotation.shape[-2] != 4: raise Exception('Unexpected shape of rotation') rotation =", "norm = torch.norm(rotation, dim=-1, keepdim=True) #norm[norm < 1e-10] = 1", ":] elif self.pos_repr == '4d': raise Exception('Not support') if quater:", "self.parents = parents self.offset = offset self.constrains = constrains self.optimizer", "res[..., pi, :] return res @staticmethod def transform_from_euler(rotation, order): rotation", "position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False, world=True): ''' if not", "world is None: world = self.world if quater is None:", "= position for i, pi in enumerate(self.topology): if pi ==", "wy = qw * y2 xz = qx * z2", "-1, 4, rotation.shape[-1])) identity = torch.tensor((1, 0, 0, 0), dtype=torch.float,", "else: transform = self.transform_from_euler(rotation, order) offset = offset.reshape((-1, 1, offset.shape[-2],", "= rotations self.rotations.requires_grad_(True) self.position = positions self.position.requires_grad_(True) self.parents = parents", "= transform[..., 2, 2] = cos transform[..., 1, 2] =", "device=position.device) norm = torch.norm(rotation, dim=-1, keepdim=True) #norm[norm < 1e-10] =", "have shape batch_size * Joint_num * (3/4) * Time position", "0, 2] = sin transform[..., 2, 0] = -sin if", "yy) return m class InverseKinematics: def __init__(self, rotations: torch.Tensor, positions:", "rotation = rotation.reshape((rotation.shape[0], -1, 3, rotation.shape[-1])) identity = torch.zeros((3, ),", "1] = xy - wz m[..., 0, 2] = xz", "= sin if axis == 'y': transform[..., 0, 0] =", "rotation.shape[-1])) identity = torch.zeros((3, ), dtype=torch.float, device=raw.device) identity = identity.reshape((1,", ":] return res @staticmethod def transform_from_euler(rotation, order): rotation = rotation", "if quater: rotation = rotation.reshape((rotation.shape[0], -1, 4, rotation.shape[-1])) identity =", "transform[..., 0, 1] = -sin transform[..., 1, 0] = sin", "transform[..., 1, 1] = cos transform[..., 0, 1] = -sin", "= quater[..., 3] x2 = qx + qx y2 =", "self.world = args.fk_world self.pos_repr = args.pos_repr self.quater = args.rotation ==", "= args.fk_world self.pos_repr = args.pos_repr self.quater = args.rotation == 'quaternion'", "lr=1e-3, betas=(0.9, 0.999)) self.crit = nn.MSELoss() def step(self): self.optimizer.zero_grad() glb", "i, j in enumerate(self.rotation_map): rotation_final[:, j, :, :] = rotation[:,", ":, :] = torch.matmul(transform[..., pi, :, :], transform[..., i, :,", "return res @staticmethod def transform_from_euler(rotation, order): rotation = rotation /", "self.optimizer.step() self.glb = glb return loss.item() def tloss(self, time): return", "for i, pi in enumerate(self.topology): if pi == -1: assert", "1, offset.shape[-2], offset.shape[-1], 1)) result[..., 0, :] = position for", "__init__(self, rotations: torch.Tensor, positions: torch.Tensor, offset, parents, constrains): self.rotations =", "= 1.0 - (xx + zz) m[..., 1, 2] =", "1, 1] = cos transform[..., 0, 1] = -sin transform[...,", "if pi == 0 or pi == -1: continue res[...,", "raw[:, -3:, :] rotation = raw[:, :-3, :] elif self.pos_repr", "ord(axis) - ord('x') transform[..., cord, :] = transform[..., :, cord]", "def step(self): self.optimizer.zero_grad() glb = self.forward(self.rotations, self.position, self.offset, order='', quater=True,", "= position.permute(0, 2, 1) ''' result = torch.empty(rotation.shape[:-1] + (3,),", ":], self.constrains[time, :]) def all_loss(self): res = [self.tloss(t).detach().numpy() for t", "i, :] += res[..., pi, :] return res @staticmethod def", "wz m[..., 0, 2] = xz + wy m[..., 1,", "self.rotations.requires_grad_(True) self.position = positions self.position.requires_grad_(True) self.parents = parents self.offset =", "quater = self.quater if self.pos_repr == '3d': position = raw[:,", "= qx * x2 yy = qy * y2 wx", "x2 xy = qx * y2 yz = qy *", "i, :, :]) if world: result[..., i, :] += result[...,", "args.fk_world self.pos_repr = args.pos_repr self.quater = args.rotation == 'quaternion' def", "self.transform_from_quaternion(rotation) else: transform = self.transform_from_euler(rotation, order) offset = offset.reshape((-1, 1,", "in enumerate(self.topology): if pi == -1: assert i == 0", "qy z2 = qz + qz xx = qx *", "offset[..., i, :, :]).squeeze() if world: result[..., i, :] +=", "[] for i, edge in enumerate(edges): self.topology[edge[1]] = edge[0] self.rotation_map.append(edge[1])", "raw[:, :-3, :] elif self.pos_repr == '4d': raise Exception('Not support')", "+= result[..., pi, :] return result def from_local_to_world(self, res: torch.Tensor):", "x2 = qx + qx y2 = qy + qy", ":] return result def from_local_to_world(self, res: torch.Tensor): res = res.clone()", "pi in enumerate(self.topology): if pi == 0 or pi ==", ":, :] return self.forward(rotation_final, position, offset, world=world, quater=quater) ''' rotation", "''' rotation should have shape batch_size * Joint_num * (3/4)", "position = position.permute(0, 2, 1) result = torch.empty(rotation.shape[:-1] + (3,", "self.crit(self.glb[time, :], self.constrains[time, :]) def all_loss(self): res = [self.tloss(t).detach().numpy() for", "xz = qx * z2 zz = qz * z2", "* y2 xz = qx * z2 zz = qz", "= identity.reshape((1, 1, -1, 1)) new_shape = list(rotation.shape) new_shape[1] +=", "= offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1)) result[..., 0, :] =", "180 * math.pi transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]), ForwardKinematics.transform_from_axis(rotation[..., 2],", "position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False, world=True): if not quater", "qx + qx y2 = qy + qy z2 =", "- wx m[..., 2, 0] = xz - wy m[...,", "i == 0 continue transform[..., i, :, :] = torch.matmul(transform[...,", "if self.pos_repr == '3d': position = raw[:, -3:, :] rotation", "3), device=euler.device) cos = torch.cos(euler) sin = torch.sin(euler) cord =", "in enumerate(self.parents): if pi == -1: assert i == 0", ":] = position for i, pi in enumerate(self.topology): if pi", "parents self.offset = offset self.constrains = constrains self.optimizer = torch.optim.Adam([self.position,", ":], transform[..., i, :, :]) if world: result[..., i, :]", "pi == -1: assert i == 0 continue transform[..., i,", "+ wy m[..., 1, 0] = xy + wz m[...,", "''' def forward(self, rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz',", "* z2 m = torch.empty(quater.shape[:-1] + (3, 3), device=quater.device) m[...,", "= nn.MSELoss() def step(self): self.optimizer.zero_grad() glb = self.forward(self.rotations, self.position, self.offset,", "Exception('Unexpected shape of rotation') rotation = rotation.permute(0, 3, 1, 2)", "world=None, quater=None): if world is None: world = self.world if", "1] = sin if axis == 'y': transform[..., 0, 0]", "= qz + qz xx = qx * x2 yy", "= xy + wz m[..., 1, 1] = 1.0 -", "2] = yz - wx m[..., 2, 0] = xz", "@staticmethod def transform_from_euler(rotation, order): rotation = rotation / 180 *", "offset should have shape batch_size * Joint_num * 3 output", "def forward_from_raw(self, raw, offset, world=None, quater=None): if world is None:", "transform_from_euler(rotation, order): rotation = rotation / 180 * math.pi transform", "torch.norm(rotation, dim=-1, keepdim=True) #norm[norm < 1e-10] = 1 rotation =", "2] = sin transform[..., 2, 0] = -sin if axis", "= rotation.permute(0, 3, 1, 2) position = position.permute(0, 2, 1)", ":], offset[..., i, :, :]).squeeze() if world: result[..., i, :]", "args.rotation == 'quaternion' def forward_from_raw(self, raw, offset, world=None, quater=None): if", "- ord('x') transform[..., cord, :] = transform[..., :, cord] =", "should have shape batch_size * Joint_num * (3/4) * Time", "step(self): self.optimizer.zero_grad() glb = self.forward(self.rotations, self.position, self.offset, order='', quater=True, world=True)", "* (3/4) * Time position should have shape batch_size *", ":]) result[..., i, :] = torch.matmul(transform[..., i, :, :], offset[...,", "from_local_to_world(self, res: torch.Tensor): res = res.clone() for i, pi in", "import torch import torch.nn as nn import numpy as np", "z2 zz = qz * z2 wz = qw *", "self.crit(glb, self.constrains) loss.backward() self.optimizer.step() self.glb = glb return loss.item() def", "Joint_num * (3/4) * Time position should have shape batch_size", "enumerate(self.topology): if pi == -1: assert i == 0 continue", "rotation.permute(0, 3, 1, 2) position = position.permute(0, 2, 1) '''", "pi == 0 or pi == -1: continue res[..., i,", "if axis == 'y': transform[..., 0, 0] = transform[..., 2,", "sin = torch.sin(euler) cord = ord(axis) - ord('x') transform[..., cord,", "positions: torch.Tensor, offset, parents, constrains): self.rotations = rotations self.rotations.requires_grad_(True) self.position", "InverseKinematics: def __init__(self, rotations: torch.Tensor, positions: torch.Tensor, offset, parents, constrains):", "batch_size * Joint_num * 3 output have shape batch_size *", "+ (3, 3), device=euler.device) cos = torch.cos(euler) sin = torch.sin(euler)", "z2 = qz + qz xx = qx * x2", "positions self.position.requires_grad_(True) self.parents = parents self.offset = offset self.constrains =", ":] return self.forward(rotation_final, position, offset, world=world, quater=quater) ''' rotation should", "return loss.item() def tloss(self, time): return self.crit(self.glb[time, :], self.constrains[time, :])", "dim=-1, keepdim=True) rotation = rotation / norm if quater: transform", "+ wx m[..., 2, 2] = 1.0 - (xx +", "offset.shape[-2], offset.shape[-1], 1)) result[..., 0, :] = position for i,", ":, :] = rotation[:, i, :, :] return self.forward(rotation_final, position,", "quater and rotation.shape[-2] != 4: raise Exception('Unexpected shape of rotation')", "j in enumerate(self.rotation_map): rotation_final[:, j, :, :] = rotation[:, i,", "axis == 'z': transform[..., 0, 0] = transform[..., 1, 1]", "0] = transform[..., 1, 1] = cos transform[..., 0, 1]", "pi, :] return result @staticmethod def transform_from_euler(rotation, order): rotation =", "return np.array(res) ''' rotation should have shape batch_size * Joint_num", "== 0 or pi == -1: continue res[..., i, :]", "transform @staticmethod def transform_from_quaternion(quater: torch.Tensor): qw = quater[..., 0] qx", "should have shape batch_size * Joint_num * 3 output have", "for t in range(self.constrains.shape[0])] return np.array(res) ''' rotation should have", "1)) result[..., 0, :] = position for i, pi in", "order='', quater=True, world=True) loss = self.crit(glb, self.constrains) loss.backward() self.optimizer.step() self.glb", "offset: torch.Tensor, order='xyz', quater=False, world=True): ''' if not quater and", "1)) new_shape = list(rotation.shape) new_shape[1] += 1 new_shape[2] = 1", "transform[..., 1, 2] = -sin transform[..., 2, 1] = sin", "i, :] += result[..., pi, :] return result def from_local_to_world(self,", "= qy * z2 wy = qw * y2 xz", "torch.Tensor, offset: torch.Tensor, order='xyz', quater=False, world=True): ''' if not quater", ":], offset[..., i, :, :]).squeeze() transform[..., i, :, :] =", "identity.reshape((1, 1, -1, 1)) new_shape = list(rotation.shape) new_shape[1] += 1", "rotation = rotation / norm if quater: transform = self.transform_from_quaternion(rotation)", "ord('x') transform[..., cord, :] = transform[..., :, cord] = 0", "self.pos_repr == '3d': position = raw[:, -3:, :] rotation =", "== 'quaternion' def forward_from_raw(self, raw, offset, world=None, quater=None): if world", "= self.transform_from_euler(rotation, order) offset = offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1))", "0] = xy + wz m[..., 1, 1] = 1.0", "offset self.constrains = constrains self.optimizer = torch.optim.Adam([self.position, self.rotations], lr=1e-3, betas=(0.9,", "= torch.tensor((1, 0, 0, 0), dtype=torch.float, device=raw.device) else: rotation =", "+= result[..., pi, :] return result @staticmethod def transform_from_euler(rotation, order):", "* Joint_num * 3 ''' def forward(self, rotation: torch.Tensor, position:", "axis == 'x': transform[..., 1, 1] = transform[..., 2, 2]", "= position.permute(0, 2, 1) result = torch.empty(rotation.shape[:-1] + (3, ),", "rotation = rotation.permute(0, 3, 1, 2) position = position.permute(0, 2,", "for i, j in enumerate(self.rotation_map): rotation_final[:, j, :, :] =", "quater[..., 1] qy = quater[..., 2] qz = quater[..., 3]", "quater=quater) ''' rotation should have shape batch_size * Joint_num *", "rotation = raw[:, :-3, :] elif self.pos_repr == '4d': raise", "(3, 3), device=euler.device) cos = torch.cos(euler) sin = torch.sin(euler) cord", "new_shape = list(rotation.shape) new_shape[1] += 1 new_shape[2] = 1 rotation_final", "+= res[..., pi, :] return res @staticmethod def transform_from_euler(rotation, order):", "torch.Tensor, offset: torch.Tensor, order='xyz', quater=False, world=True): if not quater and", "'quaternion' def forward_from_raw(self, raw, offset, world=None, quater=None): if world is", "rotation should have shape batch_size * Joint_num * (3/4) *", "= 1 rotation_final = identity.repeat(new_shape) for i, j in enumerate(self.rotation_map):", "torch.matmul(transform[..., pi, :, :], transform[..., i, :, :]) result[..., i,", "in enumerate(self.rotation_map): rotation_final[:, j, :, :] = rotation[:, i, :,", "< 1e-10] = 1 rotation = rotation / norm if", "0 or pi == -1: continue res[..., i, :] +=", "transform[..., 2, 0] = -sin if axis == 'z': transform[...,", "0, 0] = transform[..., 1, 1] = cos transform[..., 0,", "+ zz) m[..., 0, 1] = xy - wz m[...,", "-1, 3, rotation.shape[-1])) identity = torch.zeros((3, ), dtype=torch.float, device=raw.device) identity", "offset, world=None, quater=None): if world is None: world = self.world", "= glb return loss.item() def tloss(self, time): return self.crit(self.glb[time, :],", "= torch.norm(rotation, dim=-1, keepdim=True) rotation = rotation / norm if", "torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform) return transform @staticmethod def transform_from_axis(euler, axis):", "m[..., 0, 0] = 1.0 - (yy + zz) m[...,", "''' result = torch.empty(rotation.shape[:-1] + (3,), device=position.device) norm = torch.norm(rotation,", "2] = -sin transform[..., 2, 1] = sin if axis", "self.rotations], lr=1e-3, betas=(0.9, 0.999)) self.crit = nn.MSELoss() def step(self): self.optimizer.zero_grad()", "of rotation') rotation = rotation.permute(0, 3, 1, 2) position =", "transform[..., i, :, :]) if world: result[..., i, :] +=", "= raw[:, -3:, :] rotation = raw[:, :-3, :] elif", "== -1: continue res[..., i, :] += res[..., pi, :]", "cos = torch.cos(euler) sin = torch.sin(euler) cord = ord(axis) -", "self.forward(self.rotations, self.position, self.offset, order='', quater=True, world=True) loss = self.crit(glb, self.constrains)", "return self.crit(self.glb[time, :], self.constrains[time, :]) def all_loss(self): res = [self.tloss(t).detach().numpy()", "xx = qx * x2 yy = qy * y2", "self.world if quater is None: quater = self.quater if self.pos_repr", "'4d': raise Exception('Not support') if quater: rotation = rotation.reshape((rotation.shape[0], -1,", "identity.repeat(new_shape) for i, j in enumerate(self.rotation_map): rotation_final[:, j, :, :]", "+ (3, ), device=position.device) norm = torch.norm(rotation, dim=-1, keepdim=True) #norm[norm", "1] = yz + wx m[..., 2, 2] = 1.0", "= quater[..., 2] qz = quater[..., 3] x2 = qx", "quater: rotation = rotation.reshape((rotation.shape[0], -1, 4, rotation.shape[-1])) identity = torch.tensor((1,", "result[..., i, :] = torch.matmul(transform[..., pi, :, :], offset[..., i,", "return self.forward(rotation_final, position, offset, world=world, quater=quater) ''' rotation should have", "import torch.nn as nn import numpy as np import math", "= qy * y2 wx = qw * x2 xy", "res = res.clone() for i, pi in enumerate(self.topology): if pi", "result[..., i, :] += result[..., pi, :] return result def", "self.pos_repr = args.pos_repr self.quater = args.rotation == 'quaternion' def forward_from_raw(self,", "torch.matmul(transform[..., i, :, :], offset[..., i, :, :]).squeeze() if world:", "quater[..., 3] x2 = qx + qx y2 = qy", "have shape batch_size * Joint_num * 3 output have shape", "edges): self.topology = [-1] * (len(edges) + 1) self.rotation_map =", "1, -1, 1)) new_shape = list(rotation.shape) new_shape[1] += 1 new_shape[2]", "= rotation[:, i, :, :] return self.forward(rotation_final, position, offset, world=world,", "batch_size * Time * Joint_num * 3 ''' def forward(self,", "forward(self, rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False, world=True):", ":] = transform[..., :, cord] = 0 transform[..., cord, cord]", "1.0 - (xx + zz) m[..., 1, 2] = yz", "wy m[..., 1, 0] = xy + wz m[..., 1,", "result @staticmethod def transform_from_euler(rotation, order): rotation = rotation / 180", "1.0 - (yy + zz) m[..., 0, 1] = xy", "xy + wz m[..., 1, 1] = 1.0 - (xx", "= torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform) return transform @staticmethod def transform_from_axis(euler,", "1 rotation = rotation / norm if quater: transform =", ":], transform[..., i, :, :]) result[..., i, :] = torch.matmul(transform[...,", "= torch.empty(euler.shape[0:3] + (3, 3), device=euler.device) cos = torch.cos(euler) sin", "2] = cos transform[..., 0, 2] = sin transform[..., 2,", "= qw * x2 xy = qx * y2 yz", "= cos transform[..., 0, 1] = -sin transform[..., 1, 0]", "1) self.rotation_map = [] for i, edge in enumerate(edges): self.topology[edge[1]]", "in enumerate(self.topology): if pi == 0 or pi == -1:", "pi, :, :], transform[..., i, :, :]) if world: result[...,", "loss = self.crit(glb, self.constrains) loss.backward() self.optimizer.step() self.glb = glb return", "shape batch_size * Joint_num * (3/4) * Time position should", "numpy as np import math class ForwardKinematics: def __init__(self, args,", "* y2 wx = qw * x2 xy = qx", "result = torch.empty(rotation.shape[:-1] + (3, ), device=position.device) norm = torch.norm(rotation,", "torch.zeros((3, ), dtype=torch.float, device=raw.device) identity = identity.reshape((1, 1, -1, 1))", "in range(self.constrains.shape[0])] return np.array(res) ''' rotation should have shape batch_size", "torch.matmul(transform[..., pi, :, :], offset[..., i, :, :]).squeeze() transform[..., i,", "3] x2 = qx + qx y2 = qy +", "new_shape[2] = 1 rotation_final = identity.repeat(new_shape) for i, j in", "2] = cos transform[..., 1, 2] = -sin transform[..., 2,", "torch.Tensor, order='xyz', quater=False, world=True): if not quater and rotation.shape[-2] !=", "or pi == -1: continue res[..., i, :] += res[...,", "res[..., i, :] += res[..., pi, :] return res @staticmethod", "* Time position should have shape batch_size * 3 *", "+ (3,), device=position.device) norm = torch.norm(rotation, dim=-1, keepdim=True) rotation =", "Time position should have shape batch_size * 3 * Time", "rotation') if quater and rotation.shape[-2] != 4: raise Exception('Unexpected shape", "wy m[..., 2, 1] = yz + wx m[..., 2,", "transform[..., i, :, :] = torch.matmul(transform[..., pi, :, :], transform[...,", "res: torch.Tensor): res = res.clone() for i, pi in enumerate(self.topology):", "qz * z2 wz = qw * z2 m =", "axis == 'y': transform[..., 0, 0] = transform[..., 2, 2]", "= res.clone() for i, pi in enumerate(self.topology): if pi ==", "cord] = 1 if axis == 'x': transform[..., 1, 1]", "= -sin transform[..., 1, 0] = sin return transform @staticmethod", "= xz - wy m[..., 2, 1] = yz +", "glb = self.forward(self.rotations, self.position, self.offset, order='', quater=True, world=True) loss =", "= yz + wx m[..., 2, 2] = 1.0 -", "y2 yz = qy * z2 wy = qw *", "= list(rotation.shape) new_shape[1] += 1 new_shape[2] = 1 rotation_final =", "m[..., 1, 1] = 1.0 - (xx + zz) m[...,", "= parents self.offset = offset self.constrains = constrains self.optimizer =", "betas=(0.9, 0.999)) self.crit = nn.MSELoss() def step(self): self.optimizer.zero_grad() glb =", "res @staticmethod def transform_from_euler(rotation, order): rotation = rotation / 180", "* z2 wz = qw * z2 m = torch.empty(quater.shape[:-1]", ":, :]).squeeze() if world: result[..., i, :] += result[..., pi,", "shape batch_size * 3 * Time offset should have shape", "pi, :] return result def from_local_to_world(self, res: torch.Tensor): res =", "= constrains self.optimizer = torch.optim.Adam([self.position, self.rotations], lr=1e-3, betas=(0.9, 0.999)) self.crit", "cord] = 0 transform[..., cord, cord] = 1 if axis", "continue res[..., i, :] += res[..., pi, :] return res", "1 rotation_final = identity.repeat(new_shape) for i, j in enumerate(self.rotation_map): rotation_final[:,", "i, :, :] = torch.matmul(transform[..., pi, :, :], transform[..., i,", "0] = xz - wy m[..., 2, 1] = yz", "* Joint_num * (3/4) * Time position should have shape", "3 output have shape batch_size * Time * Joint_num *", "transform[..., 1, 0] = sin return transform @staticmethod def transform_from_quaternion(quater:", ":] = torch.matmul(transform[..., pi, :, :], offset[..., i, :, :]).squeeze()", "i, :, :]).squeeze() transform[..., i, :, :] = torch.matmul(transform[..., pi,", "sin return transform @staticmethod def transform_from_quaternion(quater: torch.Tensor): qw = quater[...,", "1, 1] = transform[..., 2, 2] = cos transform[..., 1,", "args.pos_repr self.quater = args.rotation == 'quaternion' def forward_from_raw(self, raw, offset,", "else: rotation = rotation.reshape((rotation.shape[0], -1, 3, rotation.shape[-1])) identity = torch.zeros((3,", "= rotation.reshape((rotation.shape[0], -1, 3, rotation.shape[-1])) identity = torch.zeros((3, ), dtype=torch.float,", "world: result[..., i, :] += result[..., pi, :] return result", "1], order[1]), ForwardKinematics.transform_from_axis(rotation[..., 2], order[2])) transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]),", "world=True): if not quater and rotation.shape[-2] != 3: raise Exception('Unexpected", "0 transform[..., cord, cord] = 1 if axis == 'x':", "m[..., 2, 1] = yz + wx m[..., 2, 2]", "2) position = position.permute(0, 2, 1) result = torch.empty(rotation.shape[:-1] +", "self.pos_repr == '4d': raise Exception('Not support') if quater: rotation =", "2, 2] = cos transform[..., 1, 2] = -sin transform[...,", "import math class ForwardKinematics: def __init__(self, args, edges): self.topology =", "- wy m[..., 2, 1] = yz + wx m[...,", "1) ''' result = torch.empty(rotation.shape[:-1] + (3,), device=position.device) norm =", "+ wz m[..., 1, 1] = 1.0 - (xx +", "== 0 continue result[..., i, :] = torch.matmul(transform[..., pi, :,", "dim=-1, keepdim=True) #norm[norm < 1e-10] = 1 rotation = rotation", "= rotation / norm if quater: transform = self.transform_from_quaternion(rotation) else:", "device=position.device) norm = torch.norm(rotation, dim=-1, keepdim=True) rotation = rotation /", "shape batch_size * Time * Joint_num * 3 ''' def", "qx * z2 zz = qz * z2 wz =", "@staticmethod def transform_from_quaternion(quater: torch.Tensor): qw = quater[..., 0] qx =", "zz) m[..., 1, 2] = yz - wx m[..., 2,", "0] = 1.0 - (yy + zz) m[..., 0, 1]", "world=True): ''' if not quater and rotation.shape[-2] != 3: raise", "yz = qy * z2 wy = qw * y2", "return transform @staticmethod def transform_from_axis(euler, axis): transform = torch.empty(euler.shape[0:3] +", "= 1 if axis == 'x': transform[..., 1, 1] =", "have shape batch_size * 3 * Time offset should have", "torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]), ForwardKinematics.transform_from_axis(rotation[..., 2], order[2])) transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0],", "def transform_from_axis(euler, axis): transform = torch.empty(euler.shape[0:3] + (3, 3), device=euler.device)", "1.0 - (xx + yy) return m class InverseKinematics: def", "pi, :, :], transform[..., i, :, :]) result[..., i, :]", "if axis == 'z': transform[..., 0, 0] = transform[..., 1,", "def all_loss(self): res = [self.tloss(t).detach().numpy() for t in range(self.constrains.shape[0])] return", "i, pi in enumerate(self.topology): if pi == 0 or pi", "def transform_from_quaternion(quater: torch.Tensor): qw = quater[..., 0] qx = quater[...,", "self.constrains = constrains self.optimizer = torch.optim.Adam([self.position, self.rotations], lr=1e-3, betas=(0.9, 0.999))", "if quater: transform = self.transform_from_quaternion(rotation) else: transform = self.transform_from_euler(rotation, order)", "[-1] * (len(edges) + 1) self.rotation_map = [] for i,", "+ qx y2 = qy + qy z2 = qz", "m[..., 2, 0] = xz - wy m[..., 2, 1]", "quater[..., 0] qx = quater[..., 1] qy = quater[..., 2]", "yz - wx m[..., 2, 0] = xz - wy", "!= 4: raise Exception('Unexpected shape of rotation') rotation = rotation.permute(0,", "Joint_num * 3 output have shape batch_size * Time *", "== 'x': transform[..., 1, 1] = transform[..., 2, 2] =", "'z': transform[..., 0, 0] = transform[..., 1, 1] = cos", "= yz - wx m[..., 2, 0] = xz -", "wx m[..., 2, 2] = 1.0 - (xx + yy)", "-sin transform[..., 1, 0] = sin return transform @staticmethod def", "zz) m[..., 0, 1] = xy - wz m[..., 0,", "self.position, self.offset, order='', quater=True, world=True) loss = self.crit(glb, self.constrains) loss.backward()", "norm = torch.norm(rotation, dim=-1, keepdim=True) rotation = rotation / norm", ":] = position for i, pi in enumerate(self.parents): if pi", "if world: result[..., i, :] += result[..., pi, :] return", "= transform[..., :, cord] = 0 transform[..., cord, cord] =", ":, :], offset[..., i, :, :]).squeeze() if world: result[..., i,", "xy - wz m[..., 0, 2] = xz + wy", "rotation') rotation = rotation.permute(0, 3, 1, 2) position = position.permute(0,", "0, 2] = xz + wy m[..., 1, 0] =", "2] = 1.0 - (xx + yy) return m class", "rotation = rotation / 180 * math.pi transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[...,", "y2 = qy + qy z2 = qz + qz", "qz = quater[..., 3] x2 = qx + qx y2", "quater=True, world=True) loss = self.crit(glb, self.constrains) loss.backward() self.optimizer.step() self.glb =", "'x': transform[..., 1, 1] = transform[..., 2, 2] = cos", "m[..., 0, 2] = xz + wy m[..., 1, 0]", "3: raise Exception('Unexpected shape of rotation') if quater and rotation.shape[-2]", "enumerate(self.rotation_map): rotation_final[:, j, :, :] = rotation[:, i, :, :]", "), dtype=torch.float, device=raw.device) identity = identity.reshape((1, 1, -1, 1)) new_shape", "= qw * y2 xz = qx * z2 zz", "2) position = position.permute(0, 2, 1) ''' result = torch.empty(rotation.shape[:-1]", "as nn import numpy as np import math class ForwardKinematics:", "1, 2) position = position.permute(0, 2, 1) result = torch.empty(rotation.shape[:-1]", "#norm[norm < 1e-10] = 1 rotation = rotation / norm", "offset.shape[-1], 1)) result[..., 0, :] = position for i, pi", "self.glb = glb return loss.item() def tloss(self, time): return self.crit(self.glb[time,", "= torch.matmul(transform[..., pi, :, :], transform[..., i, :, :]) if", ":, cord] = 0 transform[..., cord, cord] = 1 if", "rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False, world=True): if", "== 0 continue transform[..., i, :, :] = torch.matmul(transform[..., pi,", "quater: transform = self.transform_from_quaternion(rotation) else: transform = self.transform_from_euler(rotation, order) offset", "2, 0] = xz - wy m[..., 2, 1] =", "position.permute(0, 2, 1) ''' result = torch.empty(rotation.shape[:-1] + (3,), device=position.device)", "/ 180 * math.pi transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]), ForwardKinematics.transform_from_axis(rotation[...,", ":]).squeeze() transform[..., i, :, :] = torch.matmul(transform[..., pi, :, :],", "self.topology[edge[1]] = edge[0] self.rotation_map.append(edge[1]) self.world = args.fk_world self.pos_repr = args.pos_repr", "1 new_shape[2] = 1 rotation_final = identity.repeat(new_shape) for i, j", "* Joint_num * 3 output have shape batch_size * Time", "np import math class ForwardKinematics: def __init__(self, args, edges): self.topology", "math.pi transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]), ForwardKinematics.transform_from_axis(rotation[..., 2], order[2])) transform", "= torch.cos(euler) sin = torch.sin(euler) cord = ord(axis) - ord('x')", "+= 1 new_shape[2] = 1 rotation_final = identity.repeat(new_shape) for i,", "= -sin if axis == 'z': transform[..., 0, 0] =", "new_shape[1] += 1 new_shape[2] = 1 rotation_final = identity.repeat(new_shape) for", "np.array(res) ''' rotation should have shape batch_size * Joint_num *", "rotation[:, i, :, :] return self.forward(rotation_final, position, offset, world=world, quater=quater)", "wx = qw * x2 xy = qx * y2", "3, rotation.shape[-1])) identity = torch.zeros((3, ), dtype=torch.float, device=raw.device) identity =", "= torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]), ForwardKinematics.transform_from_axis(rotation[..., 2], order[2])) transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[...,", "* 3 * Time offset should have shape batch_size *", "1, 2] = yz - wx m[..., 2, 0] =", "offset = offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1)) result[..., 0, :]", "qw = quater[..., 0] qx = quater[..., 1] qy =", "qy + qy z2 = qz + qz xx =", "z2 wz = qw * z2 m = torch.empty(quater.shape[:-1] +", "transform[..., i, :, :]) result[..., i, :] = torch.matmul(transform[..., i,", ":, :], offset[..., i, :, :]).squeeze() transform[..., i, :, :]", "cord, :] = transform[..., :, cord] = 0 transform[..., cord,", ":-3, :] elif self.pos_repr == '4d': raise Exception('Not support') if", "i, pi in enumerate(self.topology): if pi == -1: assert i", "elif self.pos_repr == '4d': raise Exception('Not support') if quater: rotation", "transform[..., :, cord] = 0 transform[..., cord, cord] = 1", "torch.Tensor, order='xyz', quater=False, world=True): ''' if not quater and rotation.shape[-2]", "quater=False, world=True): if not quater and rotation.shape[-2] != 3: raise", "3 ''' def forward(self, rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor,", "= qx + qx y2 = qy + qy z2", "is None: world = self.world if quater is None: quater", "rotation.reshape((rotation.shape[0], -1, 3, rotation.shape[-1])) identity = torch.zeros((3, ), dtype=torch.float, device=raw.device)", "order='xyz', quater=False, world=True): ''' if not quater and rotation.shape[-2] !=", "= xz + wy m[..., 1, 0] = xy +", "(3, ), device=position.device) norm = torch.norm(rotation, dim=-1, keepdim=True) #norm[norm <", "2] qz = quater[..., 3] x2 = qx + qx", "order[0]), transform) return transform @staticmethod def transform_from_axis(euler, axis): transform =", "cord = ord(axis) - ord('x') transform[..., cord, :] = transform[...,", ":]) if world: result[..., i, :] += result[..., pi, :]", "wz = qw * z2 m = torch.empty(quater.shape[:-1] + (3,", "= [-1] * (len(edges) + 1) self.rotation_map = [] for", "transform[..., cord, cord] = 1 if axis == 'x': transform[...,", "enumerate(edges): self.topology[edge[1]] = edge[0] self.rotation_map.append(edge[1]) self.world = args.fk_world self.pos_repr =", "t in range(self.constrains.shape[0])] return np.array(res) ''' rotation should have shape", "= [] for i, edge in enumerate(edges): self.topology[edge[1]] = edge[0]", "0 continue result[..., i, :] = torch.matmul(transform[..., pi, :, :],", "== 'z': transform[..., 0, 0] = transform[..., 1, 1] =", "quater=None): if world is None: world = self.world if quater", "torch.optim.Adam([self.position, self.rotations], lr=1e-3, betas=(0.9, 0.999)) self.crit = nn.MSELoss() def step(self):", "1, 1] = 1.0 - (xx + zz) m[..., 1,", "ForwardKinematics.transform_from_axis(rotation[..., 2], order[2])) transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform) return", "batch_size * 3 * Time offset should have shape batch_size", "- (xx + yy) return m class InverseKinematics: def __init__(self,", "= torch.optim.Adam([self.position, self.rotations], lr=1e-3, betas=(0.9, 0.999)) self.crit = nn.MSELoss() def", "quater=False, world=True): ''' if not quater and rotation.shape[-2] != 3:", "= [self.tloss(t).detach().numpy() for t in range(self.constrains.shape[0])] return np.array(res) ''' rotation", "2, 1) ''' result = torch.empty(rotation.shape[:-1] + (3,), device=position.device) norm", "== -1: assert i == 0 continue result[..., i, :]", "= self.crit(glb, self.constrains) loss.backward() self.optimizer.step() self.glb = glb return loss.item()", "* math.pi transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]), ForwardKinematics.transform_from_axis(rotation[..., 2], order[2]))", "result = torch.empty(rotation.shape[:-1] + (3,), device=position.device) norm = torch.norm(rotation, dim=-1,", "assert i == 0 continue result[..., i, :] = torch.matmul(transform[...,", "batch_size * Joint_num * (3/4) * Time position should have", "position should have shape batch_size * 3 * Time offset", "torch.nn as nn import numpy as np import math class", "qz + qz xx = qx * x2 yy =", "qy * y2 wx = qw * x2 xy =", "device=raw.device) else: rotation = rotation.reshape((rotation.shape[0], -1, 3, rotation.shape[-1])) identity =", "if world is None: world = self.world if quater is", "res.clone() for i, pi in enumerate(self.topology): if pi == 0", "-1, 1)) new_shape = list(rotation.shape) new_shape[1] += 1 new_shape[2] =", "quater[..., 2] qz = quater[..., 3] x2 = qx +", "2, 2] = 1.0 - (xx + yy) return m", "0, 1] = xy - wz m[..., 0, 2] =", "sin transform[..., 2, 0] = -sin if axis == 'z':", "m[..., 2, 2] = 1.0 - (xx + yy) return", "2, 1] = sin if axis == 'y': transform[..., 0,", "= torch.empty(rotation.shape[:-1] + (3,), device=position.device) norm = torch.norm(rotation, dim=-1, keepdim=True)", "self.constrains[time, :]) def all_loss(self): res = [self.tloss(t).detach().numpy() for t in", "torch.norm(rotation, dim=-1, keepdim=True) rotation = rotation / norm if quater:", "order[1]), ForwardKinematics.transform_from_axis(rotation[..., 2], order[2])) transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform)", "pi, :, :], offset[..., i, :, :]).squeeze() transform[..., i, :,", "world=True) loss = self.crit(glb, self.constrains) loss.backward() self.optimizer.step() self.glb = glb", "position, offset, world=world, quater=quater) ''' rotation should have shape batch_size", "torch.Tensor): qw = quater[..., 0] qx = quater[..., 1] qy", "= args.rotation == 'quaternion' def forward_from_raw(self, raw, offset, world=None, quater=None):", "self.quater = args.rotation == 'quaternion' def forward_from_raw(self, raw, offset, world=None,", "self.position.requires_grad_(True) self.parents = parents self.offset = offset self.constrains = constrains", "* 3 ''' def forward(self, rotation: torch.Tensor, position: torch.Tensor, offset:", "order) offset = offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1)) result[..., 0,", "position for i, pi in enumerate(self.topology): if pi == -1:", "m class InverseKinematics: def __init__(self, rotations: torch.Tensor, positions: torch.Tensor, offset,", "0, 0] = 1.0 - (yy + zz) m[..., 0,", "= self.quater if self.pos_repr == '3d': position = raw[:, -3:,", "= transform[..., 1, 1] = cos transform[..., 0, 1] =", "self.topology = [-1] * (len(edges) + 1) self.rotation_map = []", "+ qy z2 = qz + qz xx = qx", "offset, parents, constrains): self.rotations = rotations self.rotations.requires_grad_(True) self.position = positions", "1] = -sin transform[..., 1, 0] = sin return transform", "2, 2] = cos transform[..., 0, 2] = sin transform[...,", "2, 1) result = torch.empty(rotation.shape[:-1] + (3, ), device=position.device) norm", "result[..., i, :] += result[..., pi, :] return result @staticmethod", "z2 wy = qw * y2 xz = qx *", "rotation_final[:, j, :, :] = rotation[:, i, :, :] return", "0, 1] = -sin transform[..., 1, 0] = sin return", "i, :, :] return self.forward(rotation_final, position, offset, world=world, quater=quater) '''", "qw * z2 m = torch.empty(quater.shape[:-1] + (3, 3), device=quater.device)", "glb return loss.item() def tloss(self, time): return self.crit(self.glb[time, :], self.constrains[time,", "transform[..., 1, 1] = transform[..., 2, 2] = cos transform[...,", "-sin transform[..., 2, 1] = sin if axis == 'y':", "torch.empty(euler.shape[0:3] + (3, 3), device=euler.device) cos = torch.cos(euler) sin =", "transform[..., 2, 2] = cos transform[..., 1, 2] = -sin", "order='xyz', quater=False, world=True): if not quater and rotation.shape[-2] != 3:", "self.offset = offset self.constrains = constrains self.optimizer = torch.optim.Adam([self.position, self.rotations],", "math class ForwardKinematics: def __init__(self, args, edges): self.topology = [-1]", "torch.empty(quater.shape[:-1] + (3, 3), device=quater.device) m[..., 0, 0] = 1.0", "z2 m = torch.empty(quater.shape[:-1] + (3, 3), device=quater.device) m[..., 0,", "= 1 rotation = rotation / norm if quater: transform", "order[2])) transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform) return transform @staticmethod", "-1: continue res[..., i, :] += res[..., pi, :] return", "torch.cos(euler) sin = torch.sin(euler) cord = ord(axis) - ord('x') transform[...,", "def tloss(self, time): return self.crit(self.glb[time, :], self.constrains[time, :]) def all_loss(self):", "rotation.shape[-1])) identity = torch.tensor((1, 0, 0, 0), dtype=torch.float, device=raw.device) else:", "result[..., 0, :] = position for i, pi in enumerate(self.topology):", ":, :]) result[..., i, :] = torch.matmul(transform[..., i, :, :],", ":] += result[..., pi, :] return result def from_local_to_world(self, res:", "= torch.norm(rotation, dim=-1, keepdim=True) #norm[norm < 1e-10] = 1 rotation", "i, :] += result[..., pi, :] return result @staticmethod def", "return result def from_local_to_world(self, res: torch.Tensor): res = res.clone() for", "-3:, :] rotation = raw[:, :-3, :] elif self.pos_repr ==", "offset, world=world, quater=quater) ''' rotation should have shape batch_size *", "1] = 1.0 - (xx + zz) m[..., 1, 2]", "qx * x2 yy = qy * y2 wx =", "raise Exception('Unexpected shape of rotation') rotation = rotation.permute(0, 3, 1,", "1e-10] = 1 rotation = rotation / norm if quater:", "result[..., i, :] = torch.matmul(transform[..., i, :, :], offset[..., i,", "= torch.empty(rotation.shape[:-1] + (3, ), device=position.device) norm = torch.norm(rotation, dim=-1,", "result[..., pi, :] return result def from_local_to_world(self, res: torch.Tensor): res", "transform[..., 0, 2] = sin transform[..., 2, 0] = -sin", "0.999)) self.crit = nn.MSELoss() def step(self): self.optimizer.zero_grad() glb = self.forward(self.rotations,", "* y2 yz = qy * z2 wy = qw", "rotation.reshape((rotation.shape[0], -1, 4, rotation.shape[-1])) identity = torch.tensor((1, 0, 0, 0),", "class InverseKinematics: def __init__(self, rotations: torch.Tensor, positions: torch.Tensor, offset, parents,", "transform[..., 0, 0] = transform[..., 1, 1] = cos transform[...,", "range(self.constrains.shape[0])] return np.array(res) ''' rotation should have shape batch_size *", "keepdim=True) #norm[norm < 1e-10] = 1 rotation = rotation /", "= torch.matmul(transform[..., pi, :, :], transform[..., i, :, :]) result[...,", "4: raise Exception('Unexpected shape of rotation') rotation = rotation.permute(0, 3,", "+ 1) self.rotation_map = [] for i, edge in enumerate(edges):", "assert i == 0 continue transform[..., i, :, :] =", "rotations self.rotations.requires_grad_(True) self.position = positions self.position.requires_grad_(True) self.parents = parents self.offset", "transform_from_quaternion(quater: torch.Tensor): qw = quater[..., 0] qx = quater[..., 1]", "= raw[:, :-3, :] elif self.pos_repr == '4d': raise Exception('Not", "transform = self.transform_from_euler(rotation, order) offset = offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1],", "torch.empty(rotation.shape[:-1] + (3, ), device=position.device) norm = torch.norm(rotation, dim=-1, keepdim=True)", "device=euler.device) cos = torch.cos(euler) sin = torch.sin(euler) cord = ord(axis)", "cord, cord] = 1 if axis == 'x': transform[..., 1,", "* z2 wy = qw * y2 xz = qx", "= positions self.position.requires_grad_(True) self.parents = parents self.offset = offset self.constrains", "self.rotation_map = [] for i, edge in enumerate(edges): self.topology[edge[1]] =", "wz m[..., 1, 1] = 1.0 - (xx + zz)", "rotation / norm if quater: transform = self.transform_from_quaternion(rotation) else: transform", "= cos transform[..., 0, 2] = sin transform[..., 2, 0]", "0), dtype=torch.float, device=raw.device) else: rotation = rotation.reshape((rotation.shape[0], -1, 3, rotation.shape[-1]))", "shape batch_size * Joint_num * 3 output have shape batch_size", "(yy + zz) m[..., 0, 1] = xy - wz", "and rotation.shape[-2] != 4: raise Exception('Unexpected shape of rotation') rotation", "0, 0, 0), dtype=torch.float, device=raw.device) else: rotation = rotation.reshape((rotation.shape[0], -1,", "Exception('Unexpected shape of rotation') if quater and rotation.shape[-2] != 4:", "rotation_final = identity.repeat(new_shape) for i, j in enumerate(self.rotation_map): rotation_final[:, j,", "continue transform[..., i, :, :] = torch.matmul(transform[..., pi, :, :],", "= position for i, pi in enumerate(self.parents): if pi ==", "forward_from_raw(self, raw, offset, world=None, quater=None): if world is None: world", "= -sin transform[..., 2, 1] = sin if axis ==", "== 'y': transform[..., 0, 0] = transform[..., 2, 2] =", "if pi == -1: assert i == 0 continue result[...,", "raw, offset, world=None, quater=None): if world is None: world =", "device=raw.device) identity = identity.reshape((1, 1, -1, 1)) new_shape = list(rotation.shape)", ":, :]) if world: result[..., i, :] += result[..., pi,", "world = self.world if quater is None: quater = self.quater", "def __init__(self, rotations: torch.Tensor, positions: torch.Tensor, offset, parents, constrains): self.rotations", "transform[..., cord, :] = transform[..., :, cord] = 0 transform[...,", "qy = quater[..., 2] qz = quater[..., 3] x2 =", "i, :] = torch.matmul(transform[..., pi, :, :], offset[..., i, :,", "qw * y2 xz = qx * z2 zz =", "- (xx + zz) m[..., 1, 2] = yz -", "rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False, world=True): '''", "raise Exception('Not support') if quater: rotation = rotation.reshape((rotation.shape[0], -1, 4,", "not quater and rotation.shape[-2] != 3: raise Exception('Unexpected shape of", "all_loss(self): res = [self.tloss(t).detach().numpy() for t in range(self.constrains.shape[0])] return np.array(res)", "== '4d': raise Exception('Not support') if quater: rotation = rotation.reshape((rotation.shape[0],", "if not quater and rotation.shape[-2] != 3: raise Exception('Unexpected shape", "rotation = rotation.reshape((rotation.shape[0], -1, 4, rotation.shape[-1])) identity = torch.tensor((1, 0,", "(xx + zz) m[..., 1, 2] = yz - wx", "tloss(self, time): return self.crit(self.glb[time, :], self.constrains[time, :]) def all_loss(self): res", "nn.MSELoss() def step(self): self.optimizer.zero_grad() glb = self.forward(self.rotations, self.position, self.offset, order=''," ]
[ "task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "\"SELECT ${num_col} FROM ${hiveconf:table};\" t = HiveOperator( hiveconf_jinja_translate=True, task_id='dry_run_basic_hql', hql=hql,", "OF ANY # KIND, either express or implied. See the", "HiveOperator( hiveconf_jinja_translate=True, task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template() self.assertEqual(t.hql, \"SELECT {{ num_col", "ignore_ti_state=True) def test_presto(self): sql = \"\"\" SELECT count(1) FROM airflow.static_babynames_partitioned;", "more contributor license agreements. See the NOTICE file # distributed", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[", "t = operators.hive_to_mysql.HiveToMySqlTransfer( mysql_conn_id='airflow_db', task_id='hive_to_mysql_check', create=True, sql=\"\"\" SELECT name FROM", "hook = HiveCliHook() returner = mock.MagicMock() returner.extra_dejson = {'proxy_user': 'a_user_proxy'}", "t = operators.hive_to_samba_operator.Hive2SambaOperator( task_id='hive2samba_check', samba_conn_id='tableau_samba', hql=\"SELECT * FROM airflow.static_babynames LIMIT", "FROM airflow.static_babynames LIMIT 10000\", destination_filepath='test_airflow.csv', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "LIMIT 10000\", destination_filepath='test_airflow.csv', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_to_mysql(self): t", "import os import unittest from unittest import mock import nose", "Apache Software Foundation (ASF) under one # or more contributor", "t = HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) # just", "def test_presto_to_mysql(self): t = operators.presto_to_mysql.PrestoToMySqlTransfer( task_id='presto_to_mysql_check', sql=\"\"\" SELECT name, count(*)", "WARRANTIES OR CONDITIONS OF ANY # KIND, either express or", "'day': '{{ ds }}'}, task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template() self.assertEqual( t.hql,", "BY name \"\"\", mysql_table='test_static_babynames', mysql_preoperator='TRUNCATE TABLE test_static_babynames;', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "= DAG('test_dag_id', default_args=args) self.dag = dag self.hql = \"\"\" USE", "\"\"\" t = operators.presto_check_operator.PrestoCheckOperator( task_id='presto_check', sql=sql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "2.0 (the # \"License\"); you may not use this file", "table='airflow.static_babynames_partitioned', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_metastore_sql_sensor(self): t = operators.sensors.MetastorePartitionSensor(", "= DEFAULT_DATE_ISO[:10] class TestHiveEnvironment(unittest.TestCase): def setUp(self): args = {'owner': 'airflow',", "operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "task for {}.{}.{}.{}\" .format(fake_ti.hostname, self.dag.dag_id, t.task_id, fake_execution_date.isoformat()), mock_hook.mapred_job_name) if 'AIRFLOW_RUNALL_TESTS'", "dag=self.dag) fake_execution_date = timezone.datetime(2018, 6, 19) fake_ti = TaskInstance(task=t, execution_date=fake_execution_date)", "specific language governing permissions and limitations # under the License.", "= \"SELECT ${num_col} FROM ${hiveconf:table};\" t = HiveOperator( hiveconf_jinja_translate=True, task_id='dry_run_basic_hql',", "table }};\") def test_hiveconf(self): hql = \"SELECT * FROM ${hiveconf:table}", "operators.sensors.HivePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_metastore_sql_sensor(self): t", "under the License is distributed on an # \"AS IS\"", "'start_date': DEFAULT_DATE} dag = DAG('test_dag_id', default_args=args) self.dag = dag self.hql", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_to_mysql(self): t = operators.hive_to_mysql.HiveToMySqlTransfer( mysql_conn_id='airflow_db', task_id='hive_to_mysql_check', create=True,", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_parses_partitions_with_periods(self): t = operators.sensors.NamedHivePartitionSensor.parse_partition_name( partition=\"schema.table/part1=this.can.be.an.issue/part2=ok\") self.assertEqual(t[0],", "1) DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat() DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10] class TestHiveEnvironment(unittest.TestCase): def", "TestHivePresto(TestHiveEnvironment): def test_hive(self): t = HiveOperator( task_id='basic_hql', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE,", "\"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_parses_partitions_with_periods(self): t", "test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds=nonexistent\" ], poke_interval=0.1,", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #", "PARTITION (${hiveconf:day});\" t = HiveOperator( hiveconfs={'table': 'static_babynames', 'day': '{{ ds", "from unittest import mock import nose from airflow import DAG,", "= HiveOperator( task_id='test_mapred_job_name', hql=self.hql, dag=self.dag) fake_execution_date = timezone.datetime(2018, 6, 19)", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_parses_partitions_with_periods(self): t = operators.sensors.NamedHivePartitionSensor.parse_partition_name( partition=\"schema.table/part1=this.can.be.an.issue/part2=ok\")", "fake_execution_date.isoformat()), mock_hook.mapred_job_name) if 'AIRFLOW_RUNALL_TESTS' in os.environ: import airflow.hooks.hive_hooks import airflow.operators.presto_to_mysql", "EXISTS test_static_babynames;', 'CREATE TABLE test_static_babynames (name VARCHAR(500))', ], dag=self.dag) t.clear(start_date=DEFAULT_DATE,", "distributed with this work for additional information # regarding copyright", "test_hive_to_mysql(self): t = operators.hive_to_mysql.HiveToMySqlTransfer( mysql_conn_id='airflow_db', task_id='hive_to_mysql_check', create=True, sql=\"\"\" SELECT name", "self.assertEqual(t.hql, \"SELECT {{ num_col }} FROM {{ table }};\") def", "specific_mapred_queue) class HiveOperatorTest(TestHiveEnvironment): def test_hiveconf_jinja_translate(self): hql = \"SELECT ${num_col} FROM", "for the # specific language governing permissions and limitations #", "self.assertEqual( \"Airflow HiveOperator task for {}.{}.{}.{}\" .format(fake_ti.hostname, self.dag.dag_id, t.task_id, fake_execution_date.isoformat()),", "DEFAULT_DATE.isoformat() DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10] class TestHiveEnvironment(unittest.TestCase): def setUp(self): args =", "import DAG, configuration, operators from airflow.models import TaskInstance from airflow.operators.hive_operator", "governing permissions and limitations # under the License. import datetime", "FROM ${hiveconf:table} PARTITION (${hiveconf:day});\") @mock.patch('airflow.operators.hive_operator.HiveOperator.get_hook') def test_mapred_job_name(self, mock_get_hook): mock_hook =", "See the License for the # specific language governing permissions", "to in writing, # software distributed under the License is", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "t.hql, \"SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});\") @mock.patch('airflow.operators.hive_operator.HiveOperator.get_hook') def test_mapred_job_name(self,", "HiveOperator( task_id='test_mapred_job_name', hql=self.hql, dag=self.dag) fake_execution_date = timezone.datetime(2018, 6, 19) fake_ti", "hive_cli_conn_id='hive_cli_default', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto(self): sql =", "t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds=nonexistent\" ], poke_interval=0.1, timeout=1,", "# just check that the correct default value in test_default.cfg", "= configuration.conf.get( 'hive', 'default_hive_mapred_queue' ) self.assertEqual(t.get_hook().mapred_queue, test_config_hive_mapred_queue) def test_hive_airflow_default_config_queue_override(self): specific_mapred_queue", "airflow.operators.presto_to_mysql class TestHivePresto(TestHiveEnvironment): def test_hive(self): t = HiveOperator( task_id='basic_hql', hql=self.hql,", "def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds={{ds}}\" ],", "hook.conn = returner # Run result = hook._prepare_cli_cmd() # Verify", "file # distributed with this work for additional information #", "test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag)", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_partition_sensor(self): t = operators.sensors.HivePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned',", "t = operators.sensors.HivePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "string, name string, gender string, num int) PARTITIONED BY (ds", "returner # Run result = hook._prepare_cli_cmd() # Verify self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2])", "= operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_dryrun(self): t = HiveOperator( task_id='dry_run_basic_hql', hql=self.hql,", "hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto(self): sql = \"\"\"", "airflow.static_babynames GROUP BY name \"\"\", mysql_table='test_static_babynames', mysql_preoperator='TRUNCATE TABLE test_static_babynames;', dag=self.dag)", "partition={'ds': DEFAULT_DATE_DS}, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor(self): t =", "DROP TABLE IF EXISTS static_babynames_partitioned; CREATE TABLE IF NOT EXISTS", "}};\") def test_hiveconf(self): hql = \"SELECT * FROM ${hiveconf:table} PARTITION", "partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self): t", "count(1) FROM airflow.static_babynames_partitioned; \"\"\" t = operators.presto_check_operator.PrestoCheckOperator( task_id='presto_check', sql=sql, dag=self.dag)", "args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} dag = DAG('test_dag_id', default_args=args)", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check',", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check',", "task_id='presto_to_mysql_check', sql=\"\"\" SELECT name, count(*) as ccount FROM airflow.static_babynames GROUP", "mysql_conn_id='airflow_db', task_id='hive_to_mysql_check', create=True, sql=\"\"\" SELECT name FROM airflow.static_babynames LIMIT 100", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto_to_mysql(self): t = operators.presto_to_mysql.PrestoToMySqlTransfer( task_id='presto_to_mysql_check',", "t.dry_run() def test_beeline(self): t = HiveOperator( task_id='beeline_hql', hive_cli_conn_id='hive_cli_default', hql=self.hql, dag=self.dag)", "ds }}') SELECT state, year, name, gender, num FROM static_babynames;", "implied. See the License for the # specific language governing", "t = HiveOperator( task_id='basic_hql', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "to you under the Apache License, Version 2.0 (the #", "configuration.conf.get( 'hive', 'default_hive_mapred_queue' ) self.assertEqual(t.get_hook().mapred_queue, test_config_hive_mapred_queue) def test_hive_airflow_default_config_queue_override(self): specific_mapred_queue =", "partition=\"schema.table/part1=this.can.be.an.issue/part2=ok\") self.assertEqual(t[0], \"schema\") self.assertEqual(t[1], \"table\") self.assertEqual(t[2], \"part1=this.can.be.an.issue/part2=this_should_be_ok\") @nose.tools.raises(airflow.exceptions.AirflowSensorTimeout) def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self):", "task_id='hive_partition_check', table='airflow.static_babynames_partitioned', partition_name='ds={}'.format(DEFAULT_DATE_DS), dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive2samba(self): t", "TaskInstance from airflow.operators.hive_operator import HiveOperator from airflow.utils import timezone DEFAULT_DATE", "USE airflow; DROP TABLE IF EXISTS static_babynames_partitioned; CREATE TABLE IF", "TaskInstance(task=t, execution_date=fake_execution_date) fake_ti.hostname = 'fake_hostname' fake_context = {'ti': fake_ti} t.execute(fake_context)", "may not use this file except in compliance # with", "HiveOperator( task_id='beeline_hql', hive_cli_conn_id='hive_cli_default', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto(self):", "ignore_ti_state=True) def test_hive_dryrun(self): t = HiveOperator( task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag) t.dry_run()", "mapred_job_name='airflow.test_default_config_queue', dag=self.dag) self.assertEqual(t.get_hook().mapred_queue, specific_mapred_queue) class HiveOperatorTest(TestHiveEnvironment): def test_hiveconf_jinja_translate(self): hql =", "= HiveOperator( task_id='beeline_hql', hive_cli_conn_id='hive_cli_default', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "returner.extra_dejson = {'proxy_user': 'a_user_proxy'} hook.use_beeline = True hook.conn = returner", "os.environ[\"AIRFLOW__CORE__SECURITY\"] = \"kerberos\" def tearDown(self): del os.environ[\"AIRFLOW__CORE__SECURITY\"] def test_get_proxy_user_value(self): from", "FROM airflow.static_babynames LIMIT 1;\", dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_stats(self):", "hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_queues(self): t = HiveOperator(", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto(self): sql = \"\"\" SELECT", "= operators.sensors.HivePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_metastore_sql_sensor(self):", "'x' FROM airflow.static_babynames LIMIT 1;\", dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "used test_config_hive_mapred_queue = configuration.conf.get( 'hive', 'default_hive_mapred_queue' ) self.assertEqual(t.get_hook().mapred_queue, test_config_hive_mapred_queue) def", "self.assertEqual(t[1], \"table\") self.assertEqual(t[2], \"part1=this.can.be.an.issue/part2=this_should_be_ok\") @nose.tools.raises(airflow.exceptions.AirflowSensorTimeout) def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self): t = operators.sensors.NamedHivePartitionSensor(", "\"SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});\" t = HiveOperator( hiveconfs={'table':", "License, Version 2.0 (the # \"License\"); you may not use", "either express or implied. See the License for the #", "mock.MagicMock() returner.extra_dejson = {'proxy_user': 'a_user_proxy'} hook.use_beeline = True hook.conn =", "= mock_hook t = HiveOperator( task_id='test_mapred_job_name', hql=self.hql, dag=self.dag) fake_execution_date =", "test_hive2samba(self): t = operators.hive_to_samba_operator.Hive2SambaOperator( task_id='hive2samba_check', samba_conn_id='tableau_samba', hql=\"SELECT * FROM airflow.static_babynames", "test_mapred_job_name(self, mock_get_hook): mock_hook = mock.MagicMock() mock_get_hook.return_value = mock_hook t =", "value in test_default.cfg is used test_config_hive_mapred_queue = configuration.conf.get( 'hive', 'default_hive_mapred_queue'", "ignore_ti_state=True) def test_hive_to_mysql(self): t = operators.hive_to_mysql.HiveToMySqlTransfer( mysql_conn_id='airflow_db', task_id='hive_to_mysql_check', create=True, sql=\"\"\"", "= returner # Run result = hook._prepare_cli_cmd() # Verify self.assertIn('hive.server2.proxy.user=a_user_proxy',", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\"", "${hiveconf:table};\" t = HiveOperator( hiveconf_jinja_translate=True, task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template() self.assertEqual(t.hql,", "utf-8 -*- # # Licensed to the Apache Software Foundation", "LIMIT 1;\", dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_stats(self): t =", "( state string, year string, name string, gender string, num", "additional information # regarding copyright ownership. The ASF licenses this", "DEFAULT_DATE_ISO[:10] class TestHiveEnvironment(unittest.TestCase): def setUp(self): args = {'owner': 'airflow', 'start_date':", "= mock.MagicMock() returner.extra_dejson = {'proxy_user': 'a_user_proxy'} hook.use_beeline = True hook.conn", "See the NOTICE file # distributed with this work for", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_queues(self): t = HiveOperator( task_id='test_hive_queues', hql=self.hql,", "task_id='test_default_config_queue', hql=self.hql, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) # just check that the", "the correct default value in test_default.cfg is used test_config_hive_mapred_queue =", "timezone.datetime(2018, 6, 19) fake_ti = TaskInstance(task=t, execution_date=fake_execution_date) fake_ti.hostname = 'fake_hostname'", "\"table\") self.assertEqual(t[2], \"part1=this.can.be.an.issue/part2=this_should_be_ok\") @nose.tools.raises(airflow.exceptions.AirflowSensorTimeout) def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check',", "test_hdfs_sensor(self): t = operators.sensors.HdfsSensor( task_id='hdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "setUp(self): args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} dag = DAG('test_dag_id',", "= HiveOperator( hiveconf_jinja_translate=True, task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template() self.assertEqual(t.hql, \"SELECT {{", "HiveOperator( task_id='test_hive_queues', hql=self.hql, mapred_queue='default', mapred_queue_priority='HIGH', mapred_job_name='airflow.test_hive_queues', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "Apache License, Version 2.0 (the # \"License\"); you may not", "\"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_parses_partitions_with_periods(self): t =", "hook._prepare_cli_cmd() # Verify self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2]) class HiveOperatorConfigTest(TestHiveEnvironment): def test_hive_airflow_default_config_queue(self): t", "timeout=120, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_sql_sensor(self): t = operators.sensors.SqlSensor(", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto_to_mysql(self): t = operators.presto_to_mysql.PrestoToMySqlTransfer( task_id='presto_to_mysql_check', sql=\"\"\"", "mock import nose from airflow import DAG, configuration, operators from", "DEFAULT_DATE = datetime.datetime(2015, 1, 1) DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat() DEFAULT_DATE_DS =", "INSERT OVERWRITE TABLE static_babynames_partitioned PARTITION(ds='{{ ds }}') SELECT state, year,", "FROM ${hiveconf:table};\" t = HiveOperator( hiveconf_jinja_translate=True, task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template()", "samba_conn_id='tableau_samba', hql=\"SELECT * FROM airflow.static_babynames LIMIT 10000\", destination_filepath='test_airflow.csv', dag=self.dag) t.run(start_date=DEFAULT_DATE,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "= operators.presto_check_operator.PrestoCheckOperator( task_id='presto_check', sql=sql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto_to_mysql(self):", "file except in compliance # with the License. You may", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_parses_partitions_with_periods(self): t = operators.sensors.NamedHivePartitionSensor.parse_partition_name( partition=\"schema.table/part1=this.can.be.an.issue/part2=ok\") self.assertEqual(t[0], \"schema\")", "# specific language governing permissions and limitations # under the", "hook.use_beeline = True hook.conn = returner # Run result =", "task_id='test_mapred_job_name', hql=self.hql, dag=self.dag) fake_execution_date = timezone.datetime(2018, 6, 19) fake_ti =", "test_hive_airflow_default_config_queue(self): t = HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) #", "19) fake_ti = TaskInstance(task=t, execution_date=fake_execution_date) fake_ti.hostname = 'fake_hostname' fake_context =", "in test_default.cfg is used test_config_hive_mapred_queue = configuration.conf.get( 'hive', 'default_hive_mapred_queue' )", "def test_beeline(self): t = HiveOperator( task_id='beeline_hql', hive_cli_conn_id='hive_cli_default', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE,", "t = HiveOperator( task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag) t.dry_run() def test_beeline(self): t", "timezone DEFAULT_DATE = datetime.datetime(2015, 1, 1) DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat() DEFAULT_DATE_DS", "DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat() DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10] class TestHiveEnvironment(unittest.TestCase): def setUp(self):", "os.environ[\"AIRFLOW__CORE__SECURITY\"] def test_get_proxy_user_value(self): from airflow.hooks.hive_hooks import HiveCliHook hook = HiveCliHook()", "you may not use this file except in compliance #", "filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', timeout=120, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_sql_sensor(self): t =", "ignore_ti_state=True) def test_hive_stats(self): t = operators.hive_stats_operator.HiveStatsCollectionOperator( task_id='hive_stats_check', table=\"airflow.static_babynames_partitioned\", partition={'ds': DEFAULT_DATE_DS},", "the License. import datetime import os import unittest from unittest", "fake_context = {'ti': fake_ti} t.execute(fake_context) self.assertEqual( \"Airflow HiveOperator task for", "], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self): t = operators.sensors.NamedHivePartitionSensor(", "and limitations # under the License. import datetime import os", "use this file except in compliance # with the License.", "t = HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue=specific_mapred_queue, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) self.assertEqual(t.get_hook().mapred_queue,", "100 \"\"\", mysql_table='test_static_babynames', mysql_preoperator=[ 'DROP TABLE IF EXISTS test_static_babynames;', 'CREATE", "contributor license agreements. See the NOTICE file # distributed with", "from airflow.models import TaskInstance from airflow.operators.hive_operator import HiveOperator from airflow.utils", "task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self):", "sql=sql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto_to_mysql(self): t = operators.presto_to_mysql.PrestoToMySqlTransfer(", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_queues(self): t = HiveOperator( task_id='test_hive_queues',", "DEFAULT_DATE_DS}, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor(self): t = operators.sensors.NamedHivePartitionSensor(", "self.assertEqual(t[2], \"part1=this.can.be.an.issue/part2=this_should_be_ok\") @nose.tools.raises(airflow.exceptions.AirflowSensorTimeout) def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[", "mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) # just check that the correct default", "destination_filepath='test_airflow.csv', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_to_mysql(self): t = operators.hive_to_mysql.HiveToMySqlTransfer(", "mapred_queue=specific_mapred_queue, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) self.assertEqual(t.get_hook().mapred_queue, specific_mapred_queue) class HiveOperatorTest(TestHiveEnvironment): def test_hiveconf_jinja_translate(self):", "\"schema\") self.assertEqual(t[1], \"table\") self.assertEqual(t[2], \"part1=this.can.be.an.issue/part2=this_should_be_ok\") @nose.tools.raises(airflow.exceptions.AirflowSensorTimeout) def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self): t =", "mysql_table='test_static_babynames', mysql_preoperator=[ 'DROP TABLE IF EXISTS test_static_babynames;', 'CREATE TABLE test_static_babynames", "string, year string, name string, gender string, num int) PARTITIONED", "1, 1) DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat() DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10] class TestHiveEnvironment(unittest.TestCase):", "string, num int) PARTITIONED BY (ds string); INSERT OVERWRITE TABLE", "hiveconfs={'table': 'static_babynames', 'day': '{{ ds }}'}, task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template()", "an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "hql=self.hql, dag=self.dag) t.dry_run() def test_beeline(self): t = HiveOperator( task_id='beeline_hql', hive_cli_conn_id='hive_cli_default',", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto_to_mysql(self): t = operators.presto_to_mysql.PrestoToMySqlTransfer( task_id='presto_to_mysql_check', sql=\"\"\" SELECT", "fake_execution_date = timezone.datetime(2018, 6, 19) fake_ti = TaskInstance(task=t, execution_date=fake_execution_date) fake_ti.hostname", "WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express", "mock_get_hook.return_value = mock_hook t = HiveOperator( task_id='test_mapred_job_name', hql=self.hql, dag=self.dag) fake_execution_date", "with this work for additional information # regarding copyright ownership.", "import HiveCliHook hook = HiveCliHook() returner = mock.MagicMock() returner.extra_dejson =", "t = operators.sensors.SqlSensor( task_id='hdfs_sensor_check', conn_id='presto_default', sql=\"SELECT 'x' FROM airflow.static_babynames LIMIT", "fake_ti = TaskInstance(task=t, execution_date=fake_execution_date) fake_ti.hostname = 'fake_hostname' fake_context = {'ti':", "correct default value in test_default.cfg is used test_config_hive_mapred_queue = configuration.conf.get(", "test_hive_dryrun(self): t = HiveOperator( task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag) t.dry_run() def test_beeline(self):", "t = HiveOperator( task_id='test_hive_queues', hql=self.hql, mapred_queue='default', mapred_queue_priority='HIGH', mapred_job_name='airflow.test_hive_queues', dag=self.dag) t.run(start_date=DEFAULT_DATE,", "from airflow.hooks.hive_hooks import HiveCliHook hook = HiveCliHook() returner = mock.MagicMock()", "ignore_ti_state=True) def test_hive_metastore_sql_sensor(self): t = operators.sensors.MetastorePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', partition_name='ds={}'.format(DEFAULT_DATE_DS), dag=self.dag)", "\"Airflow HiveOperator task for {}.{}.{}.{}\" .format(fake_ti.hostname, self.dag.dag_id, t.task_id, fake_execution_date.isoformat()), mock_hook.mapred_job_name)", "* FROM airflow.static_babynames LIMIT 10000\", destination_filepath='test_airflow.csv', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "ignore_ti_state=True) def test_named_hive_partition_sensor(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\" ],", "def tearDown(self): del os.environ[\"AIRFLOW__CORE__SECURITY\"] def test_get_proxy_user_value(self): from airflow.hooks.hive_hooks import HiveCliHook", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_webhdfs_sensor(self): t = operators.sensors.WebHdfsSensor( task_id='webhdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', timeout=120,", "work for additional information # regarding copyright ownership. The ASF", "ignore_ti_state=True) def test_webhdfs_sensor(self): t = operators.sensors.WebHdfsSensor( task_id='webhdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', timeout=120, dag=self.dag)", "Verify self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2]) class HiveOperatorConfigTest(TestHiveEnvironment): def test_hive_airflow_default_config_queue(self): t = HiveOperator(", "= HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) # just check", "t = operators.sensors.HdfsSensor( task_id='hdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "distributed under the License is distributed on an # \"AS", "PARTITIONED BY (ds string); INSERT OVERWRITE TABLE static_babynames_partitioned PARTITION(ds='{{ ds", "def test_hive_stats(self): t = operators.hive_stats_operator.HiveStatsCollectionOperator( task_id='hive_stats_check', table=\"airflow.static_babynames_partitioned\", partition={'ds': DEFAULT_DATE_DS}, dag=self.dag)", "\"\"\" SELECT count(1) FROM airflow.static_babynames_partitioned; \"\"\" t = operators.presto_check_operator.PrestoCheckOperator( task_id='presto_check',", "EXISTS static_babynames_partitioned ( state string, year string, name string, gender", "PARTITION(ds='{{ ds }}') SELECT state, year, name, gender, num FROM", "ignore_ti_state=True) def test_presto_to_mysql(self): t = operators.presto_to_mysql.PrestoToMySqlTransfer( task_id='presto_to_mysql_check', sql=\"\"\" SELECT name,", "def setUp(self): args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} dag =", "# software distributed under the License is distributed on an", "t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "name, gender, num FROM static_babynames; \"\"\" class TestHiveCli(unittest.TestCase): def setUp(self):", "airflow import DAG, configuration, operators from airflow.models import TaskInstance from", "airflow; DROP TABLE IF EXISTS static_babynames_partitioned; CREATE TABLE IF NOT", "{}.{}.{}.{}\" .format(fake_ti.hostname, self.dag.dag_id, t.task_id, fake_execution_date.isoformat()), mock_hook.mapred_job_name) if 'AIRFLOW_RUNALL_TESTS' in os.environ:", "the License. You may obtain a copy of the License", "mapred_queue_priority='HIGH', mapred_job_name='airflow.test_hive_queues', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_dryrun(self): t =", "mysql_table='test_static_babynames', mysql_preoperator='TRUNCATE TABLE test_static_babynames;', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hdfs_sensor(self):", "for {}.{}.{}.{}\" .format(fake_ti.hostname, self.dag.dag_id, t.task_id, fake_execution_date.isoformat()), mock_hook.mapred_job_name) if 'AIRFLOW_RUNALL_TESTS' in", "HiveOperator( task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag) t.dry_run() def test_beeline(self): t = HiveOperator(", "'fake_hostname' fake_context = {'ti': fake_ti} t.execute(fake_context) self.assertEqual( \"Airflow HiveOperator task", "SELECT state, year, name, gender, num FROM static_babynames; \"\"\" class", "{'proxy_user': 'a_user_proxy'} hook.use_beeline = True hook.conn = returner # Run", "mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) self.assertEqual(t.get_hook().mapred_queue, specific_mapred_queue) class HiveOperatorTest(TestHiveEnvironment): def test_hiveconf_jinja_translate(self): hql", "operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "import timezone DEFAULT_DATE = datetime.datetime(2015, 1, 1) DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()", "task_id='hive_to_mysql_check', create=True, sql=\"\"\" SELECT name FROM airflow.static_babynames LIMIT 100 \"\"\",", "under the Apache License, Version 2.0 (the # \"License\"); you", "], poke_interval=0.1, timeout=1, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_partition_sensor(self): t", "<reponame>Ryan-Miao/airflow<gh_stars>0 # -*- coding: utf-8 -*- # # Licensed to", "distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "regarding copyright ownership. The ASF licenses this file # to", "or agreed to in writing, # software distributed under the", "import mock import nose from airflow import DAG, configuration, operators", "num int) PARTITIONED BY (ds string); INSERT OVERWRITE TABLE static_babynames_partitioned", "BY (ds string); INSERT OVERWRITE TABLE static_babynames_partitioned PARTITION(ds='{{ ds }}')", "name, count(*) as ccount FROM airflow.static_babynames GROUP BY name \"\"\",", "TABLE IF EXISTS static_babynames_partitioned; CREATE TABLE IF NOT EXISTS static_babynames_partitioned", "hiveconf_jinja_translate=True, task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template() self.assertEqual(t.hql, \"SELECT {{ num_col }}", "task_id='hive_partition_check', table='airflow.static_babynames_partitioned', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_metastore_sql_sensor(self): t =", "t = HiveOperator( hiveconf_jinja_translate=True, task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template() self.assertEqual(t.hql, \"SELECT", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_partition_sensor(self): t = operators.sensors.HivePartitionSensor( task_id='hive_partition_check',", "class HiveOperatorConfigTest(TestHiveEnvironment): def test_hive_airflow_default_config_queue(self): t = HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue_priority='HIGH',", "= operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "self.hql = \"\"\" USE airflow; DROP TABLE IF EXISTS static_babynames_partitioned;", "mapred_job_name='airflow.test_default_config_queue', dag=self.dag) # just check that the correct default value", "= \"SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});\" t = HiveOperator(", "create=True, sql=\"\"\" SELECT name FROM airflow.static_babynames LIMIT 100 \"\"\", mysql_table='test_static_babynames',", "= {'ti': fake_ti} t.execute(fake_context) self.assertEqual( \"Airflow HiveOperator task for {}.{}.{}.{}\"", "{{ num_col }} FROM {{ table }};\") def test_hiveconf(self): hql", "import datetime import os import unittest from unittest import mock", "or more contributor license agreements. See the NOTICE file #", "permissions and limitations # under the License. import datetime import", "fake_ti} t.execute(fake_context) self.assertEqual( \"Airflow HiveOperator task for {}.{}.{}.{}\" .format(fake_ti.hostname, self.dag.dag_id,", "in os.environ: import airflow.hooks.hive_hooks import airflow.operators.presto_to_mysql class TestHivePresto(TestHiveEnvironment): def test_hive(self):", "gender string, num int) PARTITIONED BY (ds string); INSERT OVERWRITE", "FROM airflow.static_babynames GROUP BY name \"\"\", mysql_table='test_static_babynames', mysql_preoperator='TRUNCATE TABLE test_static_babynames;',", "test_hiveconf_jinja_translate(self): hql = \"SELECT ${num_col} FROM ${hiveconf:table};\" t = HiveOperator(", "this work for additional information # regarding copyright ownership. The", "sql = \"\"\" SELECT count(1) FROM airflow.static_babynames_partitioned; \"\"\" t =", "is used test_config_hive_mapred_queue = configuration.conf.get( 'hive', 'default_hive_mapred_queue' ) self.assertEqual(t.get_hook().mapred_queue, test_config_hive_mapred_queue)", "the NOTICE file # distributed with this work for additional", "airflow.hooks.hive_hooks import HiveCliHook hook = HiveCliHook() returner = mock.MagicMock() returner.extra_dejson", "name string, gender string, num int) PARTITIONED BY (ds string);", "ccount FROM airflow.static_babynames GROUP BY name \"\"\", mysql_table='test_static_babynames', mysql_preoperator='TRUNCATE TABLE", "mock_hook t = HiveOperator( task_id='test_mapred_job_name', hql=self.hql, dag=self.dag) fake_execution_date = timezone.datetime(2018,", "just check that the correct default value in test_default.cfg is", "under the License. import datetime import os import unittest from", "HiveOperator( hiveconfs={'table': 'static_babynames', 'day': '{{ ds }}'}, task_id='dry_run_basic_hql', hql=hql, dag=self.dag)", "t = operators.sensors.MetastorePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', partition_name='ds={}'.format(DEFAULT_DATE_DS), dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "test_presto(self): sql = \"\"\" SELECT count(1) FROM airflow.static_babynames_partitioned; \"\"\" t", "if 'AIRFLOW_RUNALL_TESTS' in os.environ: import airflow.hooks.hive_hooks import airflow.operators.presto_to_mysql class TestHivePresto(TestHiveEnvironment):", "t.prepare_template() self.assertEqual( t.hql, \"SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});\") @mock.patch('airflow.operators.hive_operator.HiveOperator.get_hook')", "TABLE test_static_babynames (name VARCHAR(500))', ], dag=self.dag) t.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive2samba(self): t = operators.hive_to_samba_operator.Hive2SambaOperator( task_id='hive2samba_check', samba_conn_id='tableau_samba',", "def test_mapred_job_name(self, mock_get_hook): mock_hook = mock.MagicMock() mock_get_hook.return_value = mock_hook t", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hdfs_sensor(self): t = operators.sensors.HdfsSensor( task_id='hdfs_sensor_check',", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_queues(self): t = HiveOperator( task_id='test_hive_queues', hql=self.hql, mapred_queue='default',", "NOT EXISTS static_babynames_partitioned ( state string, year string, name string,", "# Run result = hook._prepare_cli_cmd() # Verify self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2]) class", "num_col }} FROM {{ table }};\") def test_hiveconf(self): hql =", "operators.sensors.SqlSensor( task_id='hdfs_sensor_check', conn_id='presto_default', sql=\"SELECT 'x' FROM airflow.static_babynames LIMIT 1;\", dag=self.dag)", "\"\"\" class TestHiveCli(unittest.TestCase): def setUp(self): self.nondefault_schema = \"nondefault\" os.environ[\"AIRFLOW__CORE__SECURITY\"] =", "def test_hiveconf_jinja_translate(self): hql = \"SELECT ${num_col} FROM ${hiveconf:table};\" t =", "IF EXISTS static_babynames_partitioned; CREATE TABLE IF NOT EXISTS static_babynames_partitioned (", "# Verify self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2]) class HiveOperatorConfigTest(TestHiveEnvironment): def test_hive_airflow_default_config_queue(self): t =", "count(*) as ccount FROM airflow.static_babynames GROUP BY name \"\"\", mysql_table='test_static_babynames',", "conn_id='presto_default', sql=\"SELECT 'x' FROM airflow.static_babynames LIMIT 1;\", dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "def test_named_hive_partition_sensor_parses_partitions_with_periods(self): t = operators.sensors.NamedHivePartitionSensor.parse_partition_name( partition=\"schema.table/part1=this.can.be.an.issue/part2=ok\") self.assertEqual(t[0], \"schema\") self.assertEqual(t[1], \"table\")", "KIND, either express or implied. See the License for the", "from airflow.operators.hive_operator import HiveOperator from airflow.utils import timezone DEFAULT_DATE =", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive2samba(self): t = operators.hive_to_samba_operator.Hive2SambaOperator( task_id='hive2samba_check',", "1;\", dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_stats(self): t = operators.hive_stats_operator.HiveStatsCollectionOperator(", "task_id='presto_check', sql=sql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto_to_mysql(self): t =", "ignore_ti_state=True) def test_hdfs_sensor(self): t = operators.sensors.HdfsSensor( task_id='hdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', dag=self.dag) t.run(start_date=DEFAULT_DATE,", "test_hive_stats(self): t = operators.hive_stats_operator.HiveStatsCollectionOperator( task_id='hive_stats_check', table=\"airflow.static_babynames_partitioned\", partition={'ds': DEFAULT_DATE_DS}, dag=self.dag) t.run(start_date=DEFAULT_DATE,", "operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds=nonexistent\" ], poke_interval=0.1, timeout=1, dag=self.dag) t.run(start_date=DEFAULT_DATE,", "or implied. See the License for the # specific language", "express or implied. See the License for the # specific", "sql=\"SELECT 'x' FROM airflow.static_babynames LIMIT 1;\", dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "test_hive(self): t = HiveOperator( task_id='basic_hql', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_webhdfs_sensor(self): t = operators.sensors.WebHdfsSensor( task_id='webhdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', timeout=120, dag=self.dag) t.run(start_date=DEFAULT_DATE,", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_to_mysql(self): t = operators.hive_to_mysql.HiveToMySqlTransfer( mysql_conn_id='airflow_db',", "the # specific language governing permissions and limitations # under", "string); INSERT OVERWRITE TABLE static_babynames_partitioned PARTITION(ds='{{ ds }}') SELECT state,", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_to_mysql(self): t = operators.hive_to_mysql.HiveToMySqlTransfer( mysql_conn_id='airflow_db', task_id='hive_to_mysql_check',", "= 'fake_hostname' fake_context = {'ti': fake_ti} t.execute(fake_context) self.assertEqual( \"Airflow HiveOperator", "task_id='webhdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', timeout=120, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_sql_sensor(self): t", "def test_sql_sensor(self): t = operators.sensors.SqlSensor( task_id='hdfs_sensor_check', conn_id='presto_default', sql=\"SELECT 'x' FROM", "may obtain a copy of the License at # #", "default value in test_default.cfg is used test_config_hive_mapred_queue = configuration.conf.get( 'hive',", "'default' t = HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue=specific_mapred_queue, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag)", "HiveOperatorTest(TestHiveEnvironment): def test_hiveconf_jinja_translate(self): hql = \"SELECT ${num_col} FROM ${hiveconf:table};\" t", "self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2]) class HiveOperatorConfigTest(TestHiveEnvironment): def test_hive_airflow_default_config_queue(self): t = HiveOperator( task_id='test_default_config_queue',", "test_beeline(self): t = HiveOperator( task_id='beeline_hql', hive_cli_conn_id='hive_cli_default', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "\"part1=this.can.be.an.issue/part2=this_should_be_ok\") @nose.tools.raises(airflow.exceptions.AirflowSensorTimeout) def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\",", "operators.sensors.MetastorePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', partition_name='ds={}'.format(DEFAULT_DATE_DS), dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive2samba(self):", "The ASF licenses this file # to you under the", "* FROM ${hiveconf:table} PARTITION (${hiveconf:day});\") @mock.patch('airflow.operators.hive_operator.HiveOperator.get_hook') def test_mapred_job_name(self, mock_get_hook): mock_hook", "operators from airflow.models import TaskInstance from airflow.operators.hive_operator import HiveOperator from", "'AIRFLOW_RUNALL_TESTS' in os.environ: import airflow.hooks.hive_hooks import airflow.operators.presto_to_mysql class TestHivePresto(TestHiveEnvironment): def", "SELECT name, count(*) as ccount FROM airflow.static_babynames GROUP BY name", "airflow.static_babynames LIMIT 1;\", dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_stats(self): t", "hql=hql, dag=self.dag) t.prepare_template() self.assertEqual(t.hql, \"SELECT {{ num_col }} FROM {{", "hql=self.hql, mapred_queue=specific_mapred_queue, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) self.assertEqual(t.get_hook().mapred_queue, specific_mapred_queue) class HiveOperatorTest(TestHiveEnvironment): def", "= HiveOperator( hiveconfs={'table': 'static_babynames', 'day': '{{ ds }}'}, task_id='dry_run_basic_hql', hql=hql,", "\"\"\", mysql_table='test_static_babynames', mysql_preoperator='TRUNCATE TABLE test_static_babynames;', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "import airflow.hooks.hive_hooks import airflow.operators.presto_to_mysql class TestHivePresto(TestHiveEnvironment): def test_hive(self): t =", "def test_hiveconf(self): hql = \"SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});\"", "DEFAULT_DATE} dag = DAG('test_dag_id', default_args=args) self.dag = dag self.hql =", "* FROM ${hiveconf:table} PARTITION (${hiveconf:day});\" t = HiveOperator( hiveconfs={'table': 'static_babynames',", "= operators.hive_to_samba_operator.Hive2SambaOperator( task_id='hive2samba_check', samba_conn_id='tableau_samba', hql=\"SELECT * FROM airflow.static_babynames LIMIT 10000\",", "filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_webhdfs_sensor(self): t = operators.sensors.WebHdfsSensor(", "# Licensed to the Apache Software Foundation (ASF) under one", "table='airflow.static_babynames_partitioned', partition_name='ds={}'.format(DEFAULT_DATE_DS), dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive2samba(self): t =", "partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_parses_partitions_with_periods(self):", "task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template() self.assertEqual(t.hql, \"SELECT {{ num_col }} FROM", "@mock.patch('airflow.operators.hive_operator.HiveOperator.get_hook') def test_mapred_job_name(self, mock_get_hook): mock_hook = mock.MagicMock() mock_get_hook.return_value = mock_hook", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto(self): sql = \"\"\" SELECT count(1) FROM", "test_sql_sensor(self): t = operators.sensors.SqlSensor( task_id='hdfs_sensor_check', conn_id='presto_default', sql=\"SELECT 'x' FROM airflow.static_babynames", "ignore_ti_state=True) def test_hive_partition_sensor(self): t = operators.sensors.HivePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', dag=self.dag) t.run(start_date=DEFAULT_DATE,", "= True hook.conn = returner # Run result = hook._prepare_cli_cmd()", "t = operators.sensors.WebHdfsSensor( task_id='webhdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', timeout=120, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_hdfs_sensor(self): t = operators.sensors.HdfsSensor( task_id='hdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "@nose.tools.raises(airflow.exceptions.AirflowSensorTimeout) def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds=nonexistent\"", "test_named_hive_partition_sensor_parses_partitions_with_periods(self): t = operators.sensors.NamedHivePartitionSensor.parse_partition_name( partition=\"schema.table/part1=this.can.be.an.issue/part2=ok\") self.assertEqual(t[0], \"schema\") self.assertEqual(t[1], \"table\") self.assertEqual(t[2],", "as ccount FROM airflow.static_babynames GROUP BY name \"\"\", mysql_table='test_static_babynames', mysql_preoperator='TRUNCATE", "law or agreed to in writing, # software distributed under", "partition_name='ds={}'.format(DEFAULT_DATE_DS), dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive2samba(self): t = operators.hive_to_samba_operator.Hive2SambaOperator(", "Foundation (ASF) under one # or more contributor license agreements.", "hql = \"SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});\" t =", "static_babynames_partitioned; CREATE TABLE IF NOT EXISTS static_babynames_partitioned ( state string,", "limitations # under the License. import datetime import os import", "fake_ti.hostname = 'fake_hostname' fake_context = {'ti': fake_ti} t.execute(fake_context) self.assertEqual( \"Airflow", "that the correct default value in test_default.cfg is used test_config_hive_mapred_queue", "(${hiveconf:day});\" t = HiveOperator( hiveconfs={'table': 'static_babynames', 'day': '{{ ds }}'},", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "\"nondefault\" os.environ[\"AIRFLOW__CORE__SECURITY\"] = \"kerberos\" def tearDown(self): del os.environ[\"AIRFLOW__CORE__SECURITY\"] def test_get_proxy_user_value(self):", "t = operators.sensors.NamedHivePartitionSensor.parse_partition_name( partition=\"schema.table/part1=this.can.be.an.issue/part2=ok\") self.assertEqual(t[0], \"schema\") self.assertEqual(t[1], \"table\") self.assertEqual(t[2], \"part1=this.can.be.an.issue/part2=this_should_be_ok\")", "{{ table }};\") def test_hiveconf(self): hql = \"SELECT * FROM", "= operators.presto_to_mysql.PrestoToMySqlTransfer( task_id='presto_to_mysql_check', sql=\"\"\" SELECT name, count(*) as ccount FROM", "{'ti': fake_ti} t.execute(fake_context) self.assertEqual( \"Airflow HiveOperator task for {}.{}.{}.{}\" .format(fake_ti.hostname,", "def test_get_proxy_user_value(self): from airflow.hooks.hive_hooks import HiveCliHook hook = HiveCliHook() returner", "Software Foundation (ASF) under one # or more contributor license", "name \"\"\", mysql_table='test_static_babynames', mysql_preoperator='TRUNCATE TABLE test_static_babynames;', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "self.assertEqual(t.get_hook().mapred_queue, specific_mapred_queue) class HiveOperatorTest(TestHiveEnvironment): def test_hiveconf_jinja_translate(self): hql = \"SELECT ${num_col}", "# regarding copyright ownership. The ASF licenses this file #", "TABLE IF EXISTS test_static_babynames;', 'CREATE TABLE test_static_babynames (name VARCHAR(500))', ],", "in compliance # with the License. You may obtain a", "# to you under the Apache License, Version 2.0 (the", "License for the # specific language governing permissions and limitations", "class HiveOperatorTest(TestHiveEnvironment): def test_hiveconf_jinja_translate(self): hql = \"SELECT ${num_col} FROM ${hiveconf:table};\"", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_stats(self): t = operators.hive_stats_operator.HiveStatsCollectionOperator( task_id='hive_stats_check',", "OR CONDITIONS OF ANY # KIND, either express or implied.", "table=\"airflow.static_babynames_partitioned\", partition={'ds': DEFAULT_DATE_DS}, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor(self): t", "t = operators.hive_stats_operator.HiveStatsCollectionOperator( task_id='hive_stats_check', table=\"airflow.static_babynames_partitioned\", partition={'ds': DEFAULT_DATE_DS}, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "def test_hive_queues(self): t = HiveOperator( task_id='test_hive_queues', hql=self.hql, mapred_queue='default', mapred_queue_priority='HIGH', mapred_job_name='airflow.test_hive_queues',", "t.execute(fake_context) self.assertEqual( \"Airflow HiveOperator task for {}.{}.{}.{}\" .format(fake_ti.hostname, self.dag.dag_id, t.task_id,", "year, name, gender, num FROM static_babynames; \"\"\" class TestHiveCli(unittest.TestCase): def", "}} FROM {{ table }};\") def test_hiveconf(self): hql = \"SELECT", "this file # to you under the Apache License, Version", "= {'proxy_user': 'a_user_proxy'} hook.use_beeline = True hook.conn = returner #", "test_static_babynames (name VARCHAR(500))', ], dag=self.dag) t.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "copyright ownership. The ASF licenses this file # to you", "def test_hive_metastore_sql_sensor(self): t = operators.sensors.MetastorePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', partition_name='ds={}'.format(DEFAULT_DATE_DS), dag=self.dag) t.run(start_date=DEFAULT_DATE,", "= hook._prepare_cli_cmd() # Verify self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2]) class HiveOperatorConfigTest(TestHiveEnvironment): def test_hive_airflow_default_config_queue(self):", "unittest import mock import nose from airflow import DAG, configuration,", "task_id='basic_hql', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_queues(self): t =", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_sql_sensor(self): t = operators.sensors.SqlSensor( task_id='hdfs_sensor_check', conn_id='presto_default', sql=\"SELECT", "License. import datetime import os import unittest from unittest import", "operators.hive_to_mysql.HiveToMySqlTransfer( mysql_conn_id='airflow_db', task_id='hive_to_mysql_check', create=True, sql=\"\"\" SELECT name FROM airflow.static_babynames LIMIT", "task_id='hdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_webhdfs_sensor(self): t =", "airflow.static_babynames LIMIT 100 \"\"\", mysql_table='test_static_babynames', mysql_preoperator=[ 'DROP TABLE IF EXISTS", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_webhdfs_sensor(self): t = operators.sensors.WebHdfsSensor( task_id='webhdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames',", "in writing, # software distributed under the License is distributed", "def setUp(self): self.nondefault_schema = \"nondefault\" os.environ[\"AIRFLOW__CORE__SECURITY\"] = \"kerberos\" def tearDown(self):", "def test_hive2samba(self): t = operators.hive_to_samba_operator.Hive2SambaOperator( task_id='hive2samba_check', samba_conn_id='tableau_samba', hql=\"SELECT * FROM", "= TaskInstance(task=t, execution_date=fake_execution_date) fake_ti.hostname = 'fake_hostname' fake_context = {'ti': fake_ti}", "task_id='test_hive_queues', hql=self.hql, mapred_queue='default', mapred_queue_priority='HIGH', mapred_job_name='airflow.test_hive_queues', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "test_config_hive_mapred_queue) def test_hive_airflow_default_config_queue_override(self): specific_mapred_queue = 'default' t = HiveOperator( task_id='test_default_config_queue',", "SELECT count(1) FROM airflow.static_babynames_partitioned; \"\"\" t = operators.presto_check_operator.PrestoCheckOperator( task_id='presto_check', sql=sql,", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "License is distributed on an # \"AS IS\" BASIS, WITHOUT", "task_id='hive2samba_check', samba_conn_id='tableau_samba', hql=\"SELECT * FROM airflow.static_babynames LIMIT 10000\", destination_filepath='test_airflow.csv', dag=self.dag)", ") self.assertEqual(t.get_hook().mapred_queue, test_config_hive_mapred_queue) def test_hive_airflow_default_config_queue_override(self): specific_mapred_queue = 'default' t =", "= DEFAULT_DATE.isoformat() DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10] class TestHiveEnvironment(unittest.TestCase): def setUp(self): args", "timeout=1, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_partition_sensor(self): t = operators.sensors.HivePartitionSensor(", "specific_mapred_queue = 'default' t = HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue=specific_mapred_queue, mapred_queue_priority='HIGH',", "{'owner': 'airflow', 'start_date': DEFAULT_DATE} dag = DAG('test_dag_id', default_args=args) self.dag =", "= operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds=nonexistent\" ], poke_interval=0.1, timeout=1, dag=self.dag)", "def test_hive_to_mysql(self): t = operators.hive_to_mysql.HiveToMySqlTransfer( mysql_conn_id='airflow_db', task_id='hive_to_mysql_check', create=True, sql=\"\"\" SELECT", "# under the License. import datetime import os import unittest", "# \"License\"); you may not use this file except in", "airflow.operators.hive_operator import HiveOperator from airflow.utils import timezone DEFAULT_DATE = datetime.datetime(2015,", "EXISTS static_babynames_partitioned; CREATE TABLE IF NOT EXISTS static_babynames_partitioned ( state", "mock.MagicMock() mock_get_hook.return_value = mock_hook t = HiveOperator( task_id='test_mapred_job_name', hql=self.hql, dag=self.dag)", "# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to the Apache Software Foundation (ASF) under one # or", "test_hive_airflow_default_config_queue_override(self): specific_mapred_queue = 'default' t = HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue=specific_mapred_queue,", "\"License\"); you may not use this file except in compliance", "IF NOT EXISTS static_babynames_partitioned ( state string, year string, name", "test_webhdfs_sensor(self): t = operators.sensors.WebHdfsSensor( task_id='webhdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', timeout=120, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive2samba(self): t = operators.hive_to_samba_operator.Hive2SambaOperator( task_id='hive2samba_check', samba_conn_id='tableau_samba', hql=\"SELECT", "dag self.hql = \"\"\" USE airflow; DROP TABLE IF EXISTS", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "SELECT name FROM airflow.static_babynames LIMIT 100 \"\"\", mysql_table='test_static_babynames', mysql_preoperator=[ 'DROP", "operators.presto_check_operator.PrestoCheckOperator( task_id='presto_check', sql=sql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto_to_mysql(self): t", "airflow.utils import timezone DEFAULT_DATE = datetime.datetime(2015, 1, 1) DEFAULT_DATE_ISO =", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_partition_sensor(self): t = operators.sensors.HivePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', dag=self.dag)", "# distributed with this work for additional information # regarding", "= datetime.datetime(2015, 1, 1) DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat() DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]", "writing, # software distributed under the License is distributed on", "int) PARTITIONED BY (ds string); INSERT OVERWRITE TABLE static_babynames_partitioned PARTITION(ds='{{", "dag=self.dag) t.prepare_template() self.assertEqual( t.hql, \"SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});\")", "sql=\"\"\" SELECT name, count(*) as ccount FROM airflow.static_babynames GROUP BY", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto(self): sql = \"\"\" SELECT count(1)", "-*- # # Licensed to the Apache Software Foundation (ASF)", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_stats(self): t = operators.hive_stats_operator.HiveStatsCollectionOperator( task_id='hive_stats_check', table=\"airflow.static_babynames_partitioned\",", "= \"\"\" SELECT count(1) FROM airflow.static_babynames_partitioned; \"\"\" t = operators.presto_check_operator.PrestoCheckOperator(", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_metastore_sql_sensor(self): t = operators.sensors.MetastorePartitionSensor( task_id='hive_partition_check',", "nose from airflow import DAG, configuration, operators from airflow.models import", "CONDITIONS OF ANY # KIND, either express or implied. See", "import TaskInstance from airflow.operators.hive_operator import HiveOperator from airflow.utils import timezone", "'static_babynames', 'day': '{{ ds }}'}, task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template() self.assertEqual(", "PARTITION (${hiveconf:day});\") @mock.patch('airflow.operators.hive_operator.HiveOperator.get_hook') def test_mapred_job_name(self, mock_get_hook): mock_hook = mock.MagicMock() mock_get_hook.return_value", "num FROM static_babynames; \"\"\" class TestHiveCli(unittest.TestCase): def setUp(self): self.nondefault_schema =", "static_babynames_partitioned PARTITION(ds='{{ ds }}') SELECT state, year, name, gender, num", "configuration, operators from airflow.models import TaskInstance from airflow.operators.hive_operator import HiveOperator", "dag = DAG('test_dag_id', default_args=args) self.dag = dag self.hql = \"\"\"", "def test_hive_partition_sensor(self): t = operators.sensors.HivePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "TestHiveCli(unittest.TestCase): def setUp(self): self.nondefault_schema = \"nondefault\" os.environ[\"AIRFLOW__CORE__SECURITY\"] = \"kerberos\" def", "ignore_ti_state=True) def test_named_hive_partition_sensor_parses_partitions_with_periods(self): t = operators.sensors.NamedHivePartitionSensor.parse_partition_name( partition=\"schema.table/part1=this.can.be.an.issue/part2=ok\") self.assertEqual(t[0], \"schema\") self.assertEqual(t[1],", "operators.sensors.HdfsSensor( task_id='hdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_webhdfs_sensor(self): t", "for additional information # regarding copyright ownership. The ASF licenses", "dag=self.dag) t.prepare_template() self.assertEqual(t.hql, \"SELECT {{ num_col }} FROM {{ table", "t = HiveOperator( hiveconfs={'table': 'static_babynames', 'day': '{{ ds }}'}, task_id='dry_run_basic_hql',", "the Apache Software Foundation (ASF) under one # or more", "DAG, configuration, operators from airflow.models import TaskInstance from airflow.operators.hive_operator import", "= HiveOperator( task_id='basic_hql', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_queues(self):", "ignore_ti_state=True) def test_hive_queues(self): t = HiveOperator( task_id='test_hive_queues', hql=self.hql, mapred_queue='default', mapred_queue_priority='HIGH',", "# # Unless required by applicable law or agreed to", "Version 2.0 (the # \"License\"); you may not use this", "default_args=args) self.dag = dag self.hql = \"\"\" USE airflow; DROP", "dag=self.dag) t.dry_run() def test_beeline(self): t = HiveOperator( task_id='beeline_hql', hive_cli_conn_id='hive_cli_default', hql=self.hql,", "${hiveconf:table} PARTITION (${hiveconf:day});\") @mock.patch('airflow.operators.hive_operator.HiveOperator.get_hook') def test_mapred_job_name(self, mock_get_hook): mock_hook = mock.MagicMock()", "HiveOperator from airflow.utils import timezone DEFAULT_DATE = datetime.datetime(2015, 1, 1)", "HiveCliHook() returner = mock.MagicMock() returner.extra_dejson = {'proxy_user': 'a_user_proxy'} hook.use_beeline =", "one # or more contributor license agreements. See the NOTICE", "self.assertEqual(t.get_hook().mapred_queue, test_config_hive_mapred_queue) def test_hive_airflow_default_config_queue_override(self): specific_mapred_queue = 'default' t = HiveOperator(", "${num_col} FROM ${hiveconf:table};\" t = HiveOperator( hiveconf_jinja_translate=True, task_id='dry_run_basic_hql', hql=hql, dag=self.dag)", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "# -*- coding: utf-8 -*- # # Licensed to the", "operators.hive_to_samba_operator.Hive2SambaOperator( task_id='hive2samba_check', samba_conn_id='tableau_samba', hql=\"SELECT * FROM airflow.static_babynames LIMIT 10000\", destination_filepath='test_airflow.csv',", "test_presto_to_mysql(self): t = operators.presto_to_mysql.PrestoToMySqlTransfer( task_id='presto_to_mysql_check', sql=\"\"\" SELECT name, count(*) as", "task_id='hive_stats_check', table=\"airflow.static_babynames_partitioned\", partition={'ds': DEFAULT_DATE_DS}, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor(self):", "operators.sensors.NamedHivePartitionSensor.parse_partition_name( partition=\"schema.table/part1=this.can.be.an.issue/part2=ok\") self.assertEqual(t[0], \"schema\") self.assertEqual(t[1], \"table\") self.assertEqual(t[2], \"part1=this.can.be.an.issue/part2=this_should_be_ok\") @nose.tools.raises(airflow.exceptions.AirflowSensorTimeout) def", "test_named_hive_partition_sensor(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag) t.run(start_date=DEFAULT_DATE,", "task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds=nonexistent\" ], poke_interval=0.1, timeout=1, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "'default_hive_mapred_queue' ) self.assertEqual(t.get_hook().mapred_queue, test_config_hive_mapred_queue) def test_hive_airflow_default_config_queue_override(self): specific_mapred_queue = 'default' t", "def test_hive(self): t = HiveOperator( task_id='basic_hql', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "except in compliance # with the License. You may obtain", "test_static_babynames;', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hdfs_sensor(self): t = operators.sensors.HdfsSensor(", "t = HiveOperator( task_id='test_mapred_job_name', hql=self.hql, dag=self.dag) fake_execution_date = timezone.datetime(2018, 6,", "NOTICE file # distributed with this work for additional information", "Run result = hook._prepare_cli_cmd() # Verify self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2]) class HiveOperatorConfigTest(TestHiveEnvironment):", "this file except in compliance # with the License. You", "import airflow.operators.presto_to_mysql class TestHivePresto(TestHiveEnvironment): def test_hive(self): t = HiveOperator( task_id='basic_hql',", "result[2]) class HiveOperatorConfigTest(TestHiveEnvironment): def test_hive_airflow_default_config_queue(self): t = HiveOperator( task_id='test_default_config_queue', hql=self.hql,", "test_config_hive_mapred_queue = configuration.conf.get( 'hive', 'default_hive_mapred_queue' ) self.assertEqual(t.get_hook().mapred_queue, test_config_hive_mapred_queue) def test_hive_airflow_default_config_queue_override(self):", "os import unittest from unittest import mock import nose from", "license agreements. See the NOTICE file # distributed with this", "required by applicable law or agreed to in writing, #", "}}'}, task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template() self.assertEqual( t.hql, \"SELECT * FROM", "operators.hive_stats_operator.HiveStatsCollectionOperator( task_id='hive_stats_check', table=\"airflow.static_babynames_partitioned\", partition={'ds': DEFAULT_DATE_DS}, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "static_babynames_partitioned ( state string, year string, name string, gender string,", "the License for the # specific language governing permissions and", "'{{ ds }}'}, task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template() self.assertEqual( t.hql, \"SELECT", "ANY # KIND, either express or implied. See the License", "'DROP TABLE IF EXISTS test_static_babynames;', 'CREATE TABLE test_static_babynames (name VARCHAR(500))',", "FROM airflow.static_babynames_partitioned; \"\"\" t = operators.presto_check_operator.PrestoCheckOperator( task_id='presto_check', sql=sql, dag=self.dag) t.run(start_date=DEFAULT_DATE,", "the License is distributed on an # \"AS IS\" BASIS,", "DAG('test_dag_id', default_args=args) self.dag = dag self.hql = \"\"\" USE airflow;", "= operators.hive_to_mysql.HiveToMySqlTransfer( mysql_conn_id='airflow_db', task_id='hive_to_mysql_check', create=True, sql=\"\"\" SELECT name FROM airflow.static_babynames", "def test_named_hive_partition_sensor(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag)", "IF EXISTS test_static_babynames;', 'CREATE TABLE test_static_babynames (name VARCHAR(500))', ], dag=self.dag)", "name FROM airflow.static_babynames LIMIT 100 \"\"\", mysql_table='test_static_babynames', mysql_preoperator=[ 'DROP TABLE", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_metastore_sql_sensor(self): t = operators.sensors.MetastorePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned',", "# # Licensed to the Apache Software Foundation (ASF) under", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_metastore_sql_sensor(self): t = operators.sensors.MetastorePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', partition_name='ds={}'.format(DEFAULT_DATE_DS),", "= HiveCliHook() returner = mock.MagicMock() returner.extra_dejson = {'proxy_user': 'a_user_proxy'} hook.use_beeline", "datetime import os import unittest from unittest import mock import", "= 'default' t = HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue=specific_mapred_queue, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue',", "task_id='beeline_hql', hive_cli_conn_id='hive_cli_default', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto(self): sql", "FROM airflow.static_babynames LIMIT 100 \"\"\", mysql_table='test_static_babynames', mysql_preoperator=[ 'DROP TABLE IF", "= HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue=specific_mapred_queue, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) self.assertEqual(t.get_hook().mapred_queue, specific_mapred_queue)", "class TestHiveEnvironment(unittest.TestCase): def setUp(self): args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}", "not use this file except in compliance # with the", "self.dag.dag_id, t.task_id, fake_execution_date.isoformat()), mock_hook.mapred_job_name) if 'AIRFLOW_RUNALL_TESTS' in os.environ: import airflow.hooks.hive_hooks", "FROM ${hiveconf:table} PARTITION (${hiveconf:day});\" t = HiveOperator( hiveconfs={'table': 'static_babynames', 'day':", "dag=self.dag) # just check that the correct default value in", "def test_hive_airflow_default_config_queue_override(self): specific_mapred_queue = 'default' t = HiveOperator( task_id='test_default_config_queue', hql=self.hql,", "HiveCliHook hook = HiveCliHook() returner = mock.MagicMock() returner.extra_dejson = {'proxy_user':", "def test_hive_airflow_default_config_queue(self): t = HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag)", "test_hive_metastore_sql_sensor(self): t = operators.sensors.MetastorePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', partition_name='ds={}'.format(DEFAULT_DATE_DS), dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_dryrun(self): t = HiveOperator( task_id='dry_run_basic_hql',", "LIMIT 100 \"\"\", mysql_table='test_static_babynames', mysql_preoperator=[ 'DROP TABLE IF EXISTS test_static_babynames;',", "Unless required by applicable law or agreed to in writing,", "\"SELECT {{ num_col }} FROM {{ table }};\") def test_hiveconf(self):", "os.environ: import airflow.hooks.hive_hooks import airflow.operators.presto_to_mysql class TestHivePresto(TestHiveEnvironment): def test_hive(self): t", "mock_hook.mapred_job_name) if 'AIRFLOW_RUNALL_TESTS' in os.environ: import airflow.hooks.hive_hooks import airflow.operators.presto_to_mysql class", "task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag) t.dry_run() def test_beeline(self): t = HiveOperator( task_id='beeline_hql',", "hql = \"SELECT ${num_col} FROM ${hiveconf:table};\" t = HiveOperator( hiveconf_jinja_translate=True,", "ignore_ti_state=True) def test_sql_sensor(self): t = operators.sensors.SqlSensor( task_id='hdfs_sensor_check', conn_id='presto_default', sql=\"SELECT 'x'", "self.assertEqual(t[0], \"schema\") self.assertEqual(t[1], \"table\") self.assertEqual(t[2], \"part1=this.can.be.an.issue/part2=this_should_be_ok\") @nose.tools.raises(airflow.exceptions.AirflowSensorTimeout) def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self): t", "ignore_ti_state=True) def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds={{ds}}\"", "(ASF) under one # or more contributor license agreements. See", "gender, num FROM static_babynames; \"\"\" class TestHiveCli(unittest.TestCase): def setUp(self): self.nondefault_schema", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hdfs_sensor(self): t = operators.sensors.HdfsSensor( task_id='hdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames',", "poke_interval=0.1, timeout=1, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_partition_sensor(self): t =", "self.assertEqual( t.hql, \"SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});\") @mock.patch('airflow.operators.hive_operator.HiveOperator.get_hook') def", "# or more contributor license agreements. See the NOTICE file", "= \"kerberos\" def tearDown(self): del os.environ[\"AIRFLOW__CORE__SECURITY\"] def test_get_proxy_user_value(self): from airflow.hooks.hive_hooks", "agreed to in writing, # software distributed under the License", "test_hive_queues(self): t = HiveOperator( task_id='test_hive_queues', hql=self.hql, mapred_queue='default', mapred_queue_priority='HIGH', mapred_job_name='airflow.test_hive_queues', dag=self.dag)", "hql=self.hql, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) # just check that the correct", "TABLE test_static_babynames;', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hdfs_sensor(self): t =", "operators.sensors.WebHdfsSensor( task_id='webhdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', timeout=120, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_sql_sensor(self):", "GROUP BY name \"\"\", mysql_table='test_static_babynames', mysql_preoperator='TRUNCATE TABLE test_static_babynames;', dag=self.dag) t.run(start_date=DEFAULT_DATE,", "= dag self.hql = \"\"\" USE airflow; DROP TABLE IF", "(ds string); INSERT OVERWRITE TABLE static_babynames_partitioned PARTITION(ds='{{ ds }}') SELECT", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[", "state string, year string, name string, gender string, num int)", "t.task_id, fake_execution_date.isoformat()), mock_hook.mapred_job_name) if 'AIRFLOW_RUNALL_TESTS' in os.environ: import airflow.hooks.hive_hooks import", "TABLE static_babynames_partitioned PARTITION(ds='{{ ds }}') SELECT state, year, name, gender,", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_sql_sensor(self): t = operators.sensors.SqlSensor( task_id='hdfs_sensor_check',", "from airflow.utils import timezone DEFAULT_DATE = datetime.datetime(2015, 1, 1) DEFAULT_DATE_ISO", "state, year, name, gender, num FROM static_babynames; \"\"\" class TestHiveCli(unittest.TestCase):", "dag=self.dag) self.assertEqual(t.get_hook().mapred_queue, specific_mapred_queue) class HiveOperatorTest(TestHiveEnvironment): def test_hiveconf_jinja_translate(self): hql = \"SELECT", "unittest from unittest import mock import nose from airflow import", "test_get_proxy_user_value(self): from airflow.hooks.hive_hooks import HiveCliHook hook = HiveCliHook() returner =", "t = operators.presto_check_operator.PrestoCheckOperator( task_id='presto_check', sql=sql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "(the # \"License\"); you may not use this file except", "OVERWRITE TABLE static_babynames_partitioned PARTITION(ds='{{ ds }}') SELECT state, year, name,", "mapred_job_name='airflow.test_hive_queues', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_dryrun(self): t = HiveOperator(", "FROM static_babynames; \"\"\" class TestHiveCli(unittest.TestCase): def setUp(self): self.nondefault_schema = \"nondefault\"", "self.nondefault_schema = \"nondefault\" os.environ[\"AIRFLOW__CORE__SECURITY\"] = \"kerberos\" def tearDown(self): del os.environ[\"AIRFLOW__CORE__SECURITY\"]", "\"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self): t =", "ASF licenses this file # to you under the Apache", "= operators.sensors.MetastorePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', partition_name='ds={}'.format(DEFAULT_DATE_DS), dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hdfs_sensor(self): t = operators.sensors.HdfsSensor( task_id='hdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', dag=self.dag)", "on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds=nonexistent\" ], poke_interval=0.1, timeout=1, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "execution_date=fake_execution_date) fake_ti.hostname = 'fake_hostname' fake_context = {'ti': fake_ti} t.execute(fake_context) self.assertEqual(", "= \"nondefault\" os.environ[\"AIRFLOW__CORE__SECURITY\"] = \"kerberos\" def tearDown(self): del os.environ[\"AIRFLOW__CORE__SECURITY\"] def", "ds }}'}, task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template() self.assertEqual( t.hql, \"SELECT *", "ownership. The ASF licenses this file # to you under", "airflow.hooks.hive_hooks import airflow.operators.presto_to_mysql class TestHivePresto(TestHiveEnvironment): def test_hive(self): t = HiveOperator(", "'airflow', 'start_date': DEFAULT_DATE} dag = DAG('test_dag_id', default_args=args) self.dag = dag", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "= operators.hive_stats_operator.HiveStatsCollectionOperator( task_id='hive_stats_check', table=\"airflow.static_babynames_partitioned\", partition={'ds': DEFAULT_DATE_DS}, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "with the License. You may obtain a copy of the", "import HiveOperator from airflow.utils import timezone DEFAULT_DATE = datetime.datetime(2015, 1,", "t.prepare_template() self.assertEqual(t.hql, \"SELECT {{ num_col }} FROM {{ table }};\")", "def test_presto(self): sql = \"\"\" SELECT count(1) FROM airflow.static_babynames_partitioned; \"\"\"", "'a_user_proxy'} hook.use_beeline = True hook.conn = returner # Run result", "applicable law or agreed to in writing, # software distributed", "datetime.datetime(2015, 1, 1) DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat() DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10] class", "\"\"\", mysql_table='test_static_babynames', mysql_preoperator=[ 'DROP TABLE IF EXISTS test_static_babynames;', 'CREATE TABLE", "mapred_queue='default', mapred_queue_priority='HIGH', mapred_job_name='airflow.test_hive_queues', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_dryrun(self): t", "(${hiveconf:day});\") @mock.patch('airflow.operators.hive_operator.HiveOperator.get_hook') def test_mapred_job_name(self, mock_get_hook): mock_hook = mock.MagicMock() mock_get_hook.return_value =", "string, gender string, num int) PARTITIONED BY (ds string); INSERT", "task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template() self.assertEqual( t.hql, \"SELECT * FROM ${hiveconf:table}", "}}') SELECT state, year, name, gender, num FROM static_babynames; \"\"\"", "is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES", "file # to you under the Apache License, Version 2.0", "setUp(self): self.nondefault_schema = \"nondefault\" os.environ[\"AIRFLOW__CORE__SECURITY\"] = \"kerberos\" def tearDown(self): del", "HiveOperator task for {}.{}.{}.{}\" .format(fake_ti.hostname, self.dag.dag_id, t.task_id, fake_execution_date.isoformat()), mock_hook.mapred_job_name) if", "HiveOperator( task_id='basic_hql', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_queues(self): t", "'CREATE TABLE test_static_babynames (name VARCHAR(500))', ], dag=self.dag) t.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) t.run(start_date=DEFAULT_DATE,", "# with the License. You may obtain a copy of", "static_babynames; \"\"\" class TestHiveCli(unittest.TestCase): def setUp(self): self.nondefault_schema = \"nondefault\" os.environ[\"AIRFLOW__CORE__SECURITY\"]", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "= timezone.datetime(2018, 6, 19) fake_ti = TaskInstance(task=t, execution_date=fake_execution_date) fake_ti.hostname =", "mysql_preoperator=[ 'DROP TABLE IF EXISTS test_static_babynames;', 'CREATE TABLE test_static_babynames (name", "test_hiveconf(self): hql = \"SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});\" t", "10000\", destination_filepath='test_airflow.csv', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_to_mysql(self): t =", "import unittest from unittest import mock import nose from airflow", "HiveOperatorConfigTest(TestHiveEnvironment): def test_hive_airflow_default_config_queue(self): t = HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue',", "= HiveOperator( task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag) t.dry_run() def test_beeline(self): t =", "coding: utf-8 -*- # # Licensed to the Apache Software", "software distributed under the License is distributed on an #", "Licensed to the Apache Software Foundation (ASF) under one #", "TABLE IF NOT EXISTS static_babynames_partitioned ( state string, year string,", "class TestHiveCli(unittest.TestCase): def setUp(self): self.nondefault_schema = \"nondefault\" os.environ[\"AIRFLOW__CORE__SECURITY\"] = \"kerberos\"", ".format(fake_ti.hostname, self.dag.dag_id, t.task_id, fake_execution_date.isoformat()), mock_hook.mapred_job_name) if 'AIRFLOW_RUNALL_TESTS' in os.environ: import", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_dryrun(self): t = HiveOperator( task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag)", "= operators.sensors.SqlSensor( task_id='hdfs_sensor_check', conn_id='presto_default', sql=\"SELECT 'x' FROM airflow.static_babynames LIMIT 1;\",", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_stats(self): t = operators.hive_stats_operator.HiveStatsCollectionOperator( task_id='hive_stats_check', table=\"airflow.static_babynames_partitioned\", partition={'ds':", "], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_parses_partitions_with_periods(self): t = operators.sensors.NamedHivePartitionSensor.parse_partition_name(", "under one # or more contributor license agreements. See the", "\"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds=nonexistent\" ], poke_interval=0.1, timeout=1, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "import nose from airflow import DAG, configuration, operators from airflow.models", "dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_webhdfs_sensor(self): t = operators.sensors.WebHdfsSensor( task_id='webhdfs_sensor_check',", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) # just check that", "hql=\"SELECT * FROM airflow.static_babynames LIMIT 10000\", destination_filepath='test_airflow.csv', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_sql_sensor(self): t = operators.sensors.SqlSensor( task_id='hdfs_sensor_check', conn_id='presto_default',", "hql=hql, dag=self.dag) t.prepare_template() self.assertEqual( t.hql, \"SELECT * FROM ${hiveconf:table} PARTITION", "information # regarding copyright ownership. The ASF licenses this file", "FROM {{ table }};\") def test_hiveconf(self): hql = \"SELECT *", "hql=self.hql, dag=self.dag) fake_execution_date = timezone.datetime(2018, 6, 19) fake_ti = TaskInstance(task=t,", "the Apache License, Version 2.0 (the # \"License\"); you may", "t = HiveOperator( task_id='beeline_hql', hive_cli_conn_id='hive_cli_default', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "operators.presto_to_mysql.PrestoToMySqlTransfer( task_id='presto_to_mysql_check', sql=\"\"\" SELECT name, count(*) as ccount FROM airflow.static_babynames", "HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue=specific_mapred_queue, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) self.assertEqual(t.get_hook().mapred_queue, specific_mapred_queue) class", "t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds={{ds}}\" ], dag=self.dag) t.run(start_date=DEFAULT_DATE,", "airflow.static_babynames LIMIT 10000\", destination_filepath='test_airflow.csv', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_to_mysql(self):", "def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\", \"airflow.static_babynames_partitioned/ds=nonexistent\" ],", "task_id='test_default_config_queue', hql=self.hql, mapred_queue=specific_mapred_queue, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) self.assertEqual(t.get_hook().mapred_queue, specific_mapred_queue) class HiveOperatorTest(TestHiveEnvironment):", "test_static_babynames;', 'CREATE TABLE test_static_babynames (name VARCHAR(500))', ], dag=self.dag) t.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)", "\"SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});\") @mock.patch('airflow.operators.hive_operator.HiveOperator.get_hook') def test_mapred_job_name(self, mock_get_hook):", "-*- coding: utf-8 -*- # # Licensed to the Apache", "= {'owner': 'airflow', 'start_date': DEFAULT_DATE} dag = DAG('test_dag_id', default_args=args) self.dag", "returner = mock.MagicMock() returner.extra_dejson = {'proxy_user': 'a_user_proxy'} hook.use_beeline = True", "True hook.conn = returner # Run result = hook._prepare_cli_cmd() #", "you under the Apache License, Version 2.0 (the # \"License\");", "\"kerberos\" def tearDown(self): del os.environ[\"AIRFLOW__CORE__SECURITY\"] def test_get_proxy_user_value(self): from airflow.hooks.hive_hooks import", "ignore_ti_state=True) def test_hive2samba(self): t = operators.hive_to_samba_operator.Hive2SambaOperator( task_id='hive2samba_check', samba_conn_id='tableau_samba', hql=\"SELECT *", "t = operators.presto_to_mysql.PrestoToMySqlTransfer( task_id='presto_to_mysql_check', sql=\"\"\" SELECT name, count(*) as ccount", "# KIND, either express or implied. See the License for", "TestHiveEnvironment(unittest.TestCase): def setUp(self): args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} dag", "DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10] class TestHiveEnvironment(unittest.TestCase): def setUp(self): args = {'owner':", "from airflow import DAG, configuration, operators from airflow.models import TaskInstance", "del os.environ[\"AIRFLOW__CORE__SECURITY\"] def test_get_proxy_user_value(self): from airflow.hooks.hive_hooks import HiveCliHook hook =", "agreements. See the NOTICE file # distributed with this work", "language governing permissions and limitations # under the License. import", "year string, name string, gender string, num int) PARTITIONED BY", "licenses this file # to you under the Apache License,", "def test_hive_dryrun(self): t = HiveOperator( task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag) t.dry_run() def", "airflow.static_babynames_partitioned; \"\"\" t = operators.presto_check_operator.PrestoCheckOperator( task_id='presto_check', sql=sql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "by applicable law or agreed to in writing, # software", "# Unless required by applicable law or agreed to in", "= operators.sensors.HdfsSensor( task_id='hdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_webhdfs_sensor(self):", "= operators.sensors.NamedHivePartitionSensor.parse_partition_name( partition=\"schema.table/part1=this.can.be.an.issue/part2=ok\") self.assertEqual(t[0], \"schema\") self.assertEqual(t[1], \"table\") self.assertEqual(t[2], \"part1=this.can.be.an.issue/part2=this_should_be_ok\") @nose.tools.raises(airflow.exceptions.AirflowSensorTimeout)", "end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ \"airflow.static_babynames_partitioned/ds={{ds}}\",", "'hive', 'default_hive_mapred_queue' ) self.assertEqual(t.get_hook().mapred_queue, test_config_hive_mapred_queue) def test_hive_airflow_default_config_queue_override(self): specific_mapred_queue = 'default'", "${hiveconf:table} PARTITION (${hiveconf:day});\" t = HiveOperator( hiveconfs={'table': 'static_babynames', 'day': '{{", "mysql_preoperator='TRUNCATE TABLE test_static_babynames;', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hdfs_sensor(self): t", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND,", "CREATE TABLE IF NOT EXISTS static_babynames_partitioned ( state string, year", "class TestHivePresto(TestHiveEnvironment): def test_hive(self): t = HiveOperator( task_id='basic_hql', hql=self.hql, dag=self.dag)", "= operators.sensors.WebHdfsSensor( task_id='webhdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', timeout=120, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def", "License. You may obtain a copy of the License at", "6, 19) fake_ti = TaskInstance(task=t, execution_date=fake_execution_date) fake_ti.hostname = 'fake_hostname' fake_context", "You may obtain a copy of the License at #", "result = hook._prepare_cli_cmd() # Verify self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2]) class HiveOperatorConfigTest(TestHiveEnvironment): def", "tearDown(self): del os.environ[\"AIRFLOW__CORE__SECURITY\"] def test_get_proxy_user_value(self): from airflow.hooks.hive_hooks import HiveCliHook hook", "= HiveOperator( task_id='test_hive_queues', hql=self.hql, mapred_queue='default', mapred_queue_priority='HIGH', mapred_job_name='airflow.test_hive_queues', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,", "hql=self.hql, mapred_queue='default', mapred_queue_priority='HIGH', mapred_job_name='airflow.test_hive_queues', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_dryrun(self):", "sql=\"\"\" SELECT name FROM airflow.static_babynames LIMIT 100 \"\"\", mysql_table='test_static_babynames', mysql_preoperator=[", "airflow.models import TaskInstance from airflow.operators.hive_operator import HiveOperator from airflow.utils import", "check that the correct default value in test_default.cfg is used", "task_id='hdfs_sensor_check', conn_id='presto_default', sql=\"SELECT 'x' FROM airflow.static_babynames LIMIT 1;\", dag=self.dag) t.run(start_date=DEFAULT_DATE,", "\"airflow.static_babynames_partitioned/ds=nonexistent\" ], poke_interval=0.1, timeout=1, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_partition_sensor(self):", "compliance # with the License. You may obtain a copy", "test_hive_partition_sensor(self): t = operators.sensors.HivePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "= \"\"\" USE airflow; DROP TABLE IF EXISTS static_babynames_partitioned; CREATE", "self.dag = dag self.hql = \"\"\" USE airflow; DROP TABLE", "mock_hook = mock.MagicMock() mock_get_hook.return_value = mock_hook t = HiveOperator( task_id='test_mapred_job_name',", "= mock.MagicMock() mock_get_hook.return_value = mock_hook t = HiveOperator( task_id='test_mapred_job_name', hql=self.hql,", "test_default.cfg is used test_config_hive_mapred_queue = configuration.conf.get( 'hive', 'default_hive_mapred_queue' ) self.assertEqual(t.get_hook().mapred_queue,", "mock_get_hook): mock_hook = mock.MagicMock() mock_get_hook.return_value = mock_hook t = HiveOperator(", "\"\"\" USE airflow; DROP TABLE IF EXISTS static_babynames_partitioned; CREATE TABLE" ]
[ "m.mode == \"action\": window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30) ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25])", "[m.rect.left+27, m.rect.top+47]) ui.Text(\"Max Carts: \" + str(m.hoveritem.data[\"max\"]), [m.rect.left+27, m.rect.top+60]) if", "m, listmap) if not m.hoveritem == None and not m.mode", "== \"game\": if pygame.sprite.spritecollide(m, carts, False) and m.mode == \"select\":", "return [int(math.floor(pos[0] / 40)), int(math.floor(pos[1] / 40))] def loadlevel(number): global", "m.rect.top+25]) except: ui.Text(m.clickedcart.type.upper(), [m.rect.left+27, m.rect.top+25]) if listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: ui.Resize(22)", "if event.key == pygame.K_SPACE: carts.add(map.Cart(snaptogrid(m.tl), \"miner\")) if screen == \"game\":", "600]) ui.window = window screen = \"game\" s = {\"fullscreen\":", "carts: ui.Resize(18) ui.Text(\"Carts Inside: \" + str(m.hoveritem.data[\"carts\"]), [m.rect.left+27, m.rect.top+47]) ui.Text(\"Max", "= pygame.image.load(\"./resources/images/selected2.png\") box = pygame.image.load(\"./resources/images/box.png\") uibox = pygame.image.load(\"./resources/images/ui box.png\") class", "listmap) if m.clickedcart != None: m.mode = \"action\" elif m.mode", "== \"action\": window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30) ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) if", "= 1 gamedata[\"copper\"] = 0 loadlevel(0) while running: for event", "elif event.type == pygame.MOUSEBUTTONDOWN: m.pos(pygame.mouse.get_pos()) if screen == \"game\": if", "\"game\" s = {\"fullscreen\": False} running = True gamedata =", "listmap = [] clock = pygame.time.Clock() selected = pygame.image.load(\"./resources/images/selected.png\") selected2", "0, \"iron\": 1, \"copper\":0} tiles = pygame.sprite.Group() rails = pygame.sprite.Group()", "= None self.hoveritem = None self.tl = self.rect.topleft self.mode =", "= None if len(pygame.sprite.spritecollide(m, carts, False)) > 0: m.hoveritem =", "== \"action\" and m.clickedcart != None and listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0:", "= [] clock = pygame.time.Clock() selected = pygame.image.load(\"./resources/images/selected.png\") selected2 =", "\" + str(m.hoveritem.data[\"carts\"]), [m.rect.left+27, m.rect.top+47]) ui.Text(\"Max Carts: \" + str(m.hoveritem.data[\"max\"]),", "1, \"copper\":0} tiles = pygame.sprite.Group() rails = pygame.sprite.Group() carts =", "Here\", [m.rect.left+27, m.rect.top+60]) window.blit(selected, [snaptogrid(m.tl)[0]*40-2, snaptogrid(m.tl)[1]*40-2]) window.blit(uibox, [555, 475]) pygame.display.flip()", "ui.Text(\"Click to move\", [m.rect.left+27, m.rect.top+45]) ui.Text(\"Cart Here\", [m.rect.left+27, m.rect.top+60]) window.blit(selected,", "None: m.mode = \"action\" elif m.mode == \"action\" and m.clickedcart", "self.tl = self.rect.topleft self.mode = \"select\" def pos(self, position): self.rect.topleft", "gamedata[\"level\"] = number gamedata[\"coal\"] = 0 gamedata[\"iron\"] = 1 gamedata[\"copper\"]", "/ 40))] def loadlevel(number): global tiles, rails, carts, gamedata, listmap,", "len(pygame.sprite.spritecollide(m, interactables, False)) > 0: m.hoveritem = pygame.sprite.spritecollide(m, interactables, False)[0]", "listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: m.clickedcart.pathfind(listmap, snaptogrid(m.tl)) m.clickedcart = None m.mode =", "if pygame.sprite.spritecollide(m, carts, False) and m.mode == \"select\": carts.update(\"select\", m,", "== \"action\": window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30) try: ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25])", "[m.rect.left+27, m.rect.top+25]) if m.hoveritem.type.startswith(\"mine\") and m.hoveritem not in carts: ui.Resize(18)", "str(m.hoveritem.data[\"max\"]), [m.rect.left+27, m.rect.top+60]) if not m.clickedcart == None: window.blit(selected2, [m.clickedcart.rect.left-2,", "self.rect.topleft self.mode = \"select\" def pos(self, position): self.rect.topleft = position", "= Mouse() def snaptogrid(pos): return [int(math.floor(pos[0] / 40)), int(math.floor(pos[1] /", "def __init__(self): pygame.sprite.Sprite.__init__(self) self.image = pygame.surface.Surface([1, 1]) self.rect = self.image.get_rect()", "len(pygame.sprite.spritecollide(m, carts, False)) > 0: m.hoveritem = pygame.sprite.spritecollide(m, carts, False)[0]", "None and listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: m.clickedcart.pathfind(listmap, snaptogrid(m.tl)) m.clickedcart = None", "pos(self, position): self.rect.topleft = position self.tl = self.rect.topleft m =", "= 0 gamedata[\"iron\"] = 1 gamedata[\"copper\"] = 0 loadlevel(0) while", "+ str(m.hoveritem.data[\"carts\"]), [m.rect.left+27, m.rect.top+47]) ui.Text(\"Max Carts: \" + str(m.hoveritem.data[\"max\"]), [m.rect.left+27,", "tiles.draw(window) carts.draw(window) carts.update(\"update\", m, listmap) if not m.hoveritem == None", "ui.Text(\"Cart Here\", [m.rect.left+27, m.rect.top+60]) window.blit(selected, [snaptogrid(m.tl)[0]*40-2, snaptogrid(m.tl)[1]*40-2]) window.blit(uibox, [555, 475])", "carts.empty() gamedata[\"level\"] = number gamedata[\"coal\"] = 0 gamedata[\"iron\"] = 1", "ui.Resize(22) ui.Text(\"Click to move\", [m.rect.left+27, m.rect.top+45]) ui.Text(\"Cart Here\", [m.rect.left+27, m.rect.top+60])", "ui window = pygame.display.set_mode([800, 600]) ui.window = window screen =", "\"action\" elif m.mode == \"action\" and m.clickedcart != None and", "pygame.surface.Surface([1, 1]) self.rect = self.image.get_rect() self.rect.topleft = [0, 0] self.clickedcart", "ui.window = window screen = \"game\" s = {\"fullscreen\": False}", "\"game\": window.fill([100, 100, 100]) tiles.draw(window) carts.draw(window) carts.update(\"update\", m, listmap) if", "> 0: m.clickedcart.pathfind(listmap, snaptogrid(m.tl)) m.clickedcart = None m.mode = \"select\"", "if m.mode == \"action\": window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30) try: ui.Text(m.hoveritem.type.upper(),", "True gamedata = {\"level\": 0, \"coal\": 0, \"iron\": 1, \"copper\":0}", "m.clickedcart = None m.mode = \"select\" elif event.type == pygame.MOUSEMOTION:", "map.loadmap(int(number)) carts.empty() gamedata[\"level\"] = number gamedata[\"coal\"] = 0 gamedata[\"iron\"] =", "tiles, rails, carts, gamedata, listmap, interactables tiles, rails, interactables, listmap", "40))] def loadlevel(number): global tiles, rails, carts, gamedata, listmap, interactables", "0] self.clickedcart = None self.hoveritem = None self.tl = self.rect.topleft", "not m.mode == \"action\": window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30) ui.Text(m.hoveritem.type.upper(), [m.rect.left+27,", "= 0 loadlevel(0) while running: for event in pygame.event.get(): if", "> 0: m.hoveritem = pygame.sprite.spritecollide(m, interactables, False)[0] elif event.type ==", "== None and not m.mode == \"action\": window.blit(box, [m.rect.left+10, m.rect.top+10])", "\"game\": if pygame.sprite.spritecollide(m, carts, False) and m.mode == \"select\": carts.update(\"select\",", "ui.Resize(30) try: ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) except: ui.Text(m.clickedcart.type.upper(), [m.rect.left+27, m.rect.top+25]) if", "= pygame.display.set_mode([800, 600]) ui.window = window screen = \"game\" s", "m.clickedcart != None: m.mode = \"action\" elif m.mode == \"action\"", "screen = \"game\" s = {\"fullscreen\": False} running = True", "listmap) if not m.hoveritem == None and not m.mode ==", "= [0, 0] self.clickedcart = None self.hoveritem = None self.tl", "interactables tiles, rails, interactables, listmap = map.loadmap(int(number)) carts.empty() gamedata[\"level\"] =", "import pygame, math from game import map, ui window =", "global tiles, rails, carts, gamedata, listmap, interactables tiles, rails, interactables,", "!= None and listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: m.clickedcart.pathfind(listmap, snaptogrid(m.tl)) m.clickedcart =", "[m.rect.left+27, m.rect.top+60]) window.blit(selected, [snaptogrid(m.tl)[0]*40-2, snaptogrid(m.tl)[1]*40-2]) window.blit(uibox, [555, 475]) pygame.display.flip() clock.tick(60)", "window = pygame.display.set_mode([800, 600]) ui.window = window screen = \"game\"", "m, listmap) if m.clickedcart != None: m.mode = \"action\" elif", "window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30) ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) if m.hoveritem.type.startswith(\"mine\") and", "= position self.tl = self.rect.topleft m = Mouse() def snaptogrid(pos):", "window.blit(selected2, [m.clickedcart.rect.left-2, m.clickedcart.rect.top-2]) if m.mode == \"action\": window.blit(box, [m.rect.left+10, m.rect.top+10])", "0: m.hoveritem = pygame.sprite.spritecollide(m, carts, False)[0] elif len(pygame.sprite.spritecollide(m, interactables, False))", "running: for event in pygame.event.get(): if event.type == pygame.QUIT: running", "m = Mouse() def snaptogrid(pos): return [int(math.floor(pos[0] / 40)), int(math.floor(pos[1]", "self.mode = \"select\" def pos(self, position): self.rect.topleft = position self.tl", "selected = pygame.image.load(\"./resources/images/selected.png\") selected2 = pygame.image.load(\"./resources/images/selected2.png\") box = pygame.image.load(\"./resources/images/box.png\") uibox", "= self.rect.topleft m = Mouse() def snaptogrid(pos): return [int(math.floor(pos[0] /", "pygame.MOUSEBUTTONDOWN: m.pos(pygame.mouse.get_pos()) if screen == \"game\": if pygame.sprite.spritecollide(m, carts, False)", "= None self.tl = self.rect.topleft self.mode = \"select\" def pos(self,", "gamedata = {\"level\": 0, \"coal\": 0, \"iron\": 1, \"copper\":0} tiles", "= {\"level\": 0, \"coal\": 0, \"iron\": 1, \"copper\":0} tiles =", "and listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: m.clickedcart.pathfind(listmap, snaptogrid(m.tl)) m.clickedcart = None m.mode", "m.hoveritem == None and not m.mode == \"action\": window.blit(box, [m.rect.left+10,", "m.clickedcart == None: window.blit(selected2, [m.clickedcart.rect.left-2, m.clickedcart.rect.top-2]) if m.mode == \"action\":", "= True gamedata = {\"level\": 0, \"coal\": 0, \"iron\": 1,", "[m.rect.left+27, m.rect.top+45]) ui.Text(\"Cart Here\", [m.rect.left+27, m.rect.top+60]) window.blit(selected, [snaptogrid(m.tl)[0]*40-2, snaptogrid(m.tl)[1]*40-2]) window.blit(uibox,", "== pygame.QUIT: running = False elif event.type == pygame.MOUSEBUTTONDOWN: m.pos(pygame.mouse.get_pos())", "\"action\" and m.clickedcart != None and listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: m.clickedcart.pathfind(listmap,", "if not m.clickedcart == None: window.blit(selected2, [m.clickedcart.rect.left-2, m.clickedcart.rect.top-2]) if m.mode", "= pygame.image.load(\"./resources/images/box.png\") uibox = pygame.image.load(\"./resources/images/ui box.png\") class Mouse(pygame.sprite.Sprite): def __init__(self):", "elif m.mode == \"action\" and m.clickedcart != None and listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]]", "not m.hoveritem == None and not m.mode == \"action\": window.blit(box,", "False} running = True gamedata = {\"level\": 0, \"coal\": 0,", "m.pos(pygame.mouse.get_pos()) if screen == \"game\": if pygame.sprite.spritecollide(m, carts, False) and", "None and not m.mode == \"action\": window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30)", "\"miner\")) if screen == \"game\": window.fill([100, 100, 100]) tiles.draw(window) carts.draw(window)", "= self.rect.topleft self.mode = \"select\" def pos(self, position): self.rect.topleft =", "m.rect.top+45]) ui.Text(\"Cart Here\", [m.rect.left+27, m.rect.top+60]) window.blit(selected, [snaptogrid(m.tl)[0]*40-2, snaptogrid(m.tl)[1]*40-2]) window.blit(uibox, [555,", "m.rect.top+25]) if listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: ui.Resize(22) ui.Text(\"Click to move\", [m.rect.left+27,", "pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type", "[m.rect.left+10, m.rect.top+10]) ui.Resize(30) ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) if m.hoveritem.type.startswith(\"mine\") and m.hoveritem", "event.key == pygame.K_SPACE: carts.add(map.Cart(snaptogrid(m.tl), \"miner\")) if screen == \"game\": window.fill([100,", "pygame.K_SPACE: carts.add(map.Cart(snaptogrid(m.tl), \"miner\")) if screen == \"game\": window.fill([100, 100, 100])", "None: window.blit(selected2, [m.clickedcart.rect.left-2, m.clickedcart.rect.top-2]) if m.mode == \"action\": window.blit(box, [m.rect.left+10,", "== \"game\": m.hoveritem = None if len(pygame.sprite.spritecollide(m, carts, False)) >", "m.clickedcart.rect.top-2]) if m.mode == \"action\": window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30) try:", "m.mode == \"select\": carts.update(\"select\", m, listmap) if m.clickedcart != None:", "0 loadlevel(0) while running: for event in pygame.event.get(): if event.type", "ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) if m.hoveritem.type.startswith(\"mine\") and m.hoveritem not in carts:", "gamedata[\"iron\"] = 1 gamedata[\"copper\"] = 0 loadlevel(0) while running: for", "\"select\" elif event.type == pygame.MOUSEMOTION: m.pos(pygame.mouse.get_pos()) if screen == \"game\":", "\"iron\": 1, \"copper\":0} tiles = pygame.sprite.Group() rails = pygame.sprite.Group() carts", "and m.hoveritem not in carts: ui.Resize(18) ui.Text(\"Carts Inside: \" +", "to move\", [m.rect.left+27, m.rect.top+45]) ui.Text(\"Cart Here\", [m.rect.left+27, m.rect.top+60]) window.blit(selected, [snaptogrid(m.tl)[0]*40-2,", "\"action\": window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30) ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) if m.hoveritem.type.startswith(\"mine\")", "position): self.rect.topleft = position self.tl = self.rect.topleft m = Mouse()", "gamedata[\"copper\"] = 0 loadlevel(0) while running: for event in pygame.event.get():", "= None m.mode = \"select\" elif event.type == pygame.MOUSEMOTION: m.pos(pygame.mouse.get_pos())", "self.rect.topleft m = Mouse() def snaptogrid(pos): return [int(math.floor(pos[0] / 40)),", "> 0: m.hoveritem = pygame.sprite.spritecollide(m, carts, False)[0] elif len(pygame.sprite.spritecollide(m, interactables,", "listmap = map.loadmap(int(number)) carts.empty() gamedata[\"level\"] = number gamedata[\"coal\"] = 0", "def loadlevel(number): global tiles, rails, carts, gamedata, listmap, interactables tiles,", "ui.Text(\"Max Carts: \" + str(m.hoveritem.data[\"max\"]), [m.rect.left+27, m.rect.top+60]) if not m.clickedcart", "= \"game\" s = {\"fullscreen\": False} running = True gamedata", "\"action\": window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30) try: ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) except:", "= pygame.sprite.spritecollide(m, carts, False)[0] elif len(pygame.sprite.spritecollide(m, interactables, False)) > 0:", "= \"select\" elif event.type == pygame.MOUSEMOTION: m.pos(pygame.mouse.get_pos()) if screen ==", "m.clickedcart.pathfind(listmap, snaptogrid(m.tl)) m.clickedcart = None m.mode = \"select\" elif event.type", "m.rect.top+10]) ui.Resize(30) ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) if m.hoveritem.type.startswith(\"mine\") and m.hoveritem not", "= pygame.sprite.Group() listmap = [] clock = pygame.time.Clock() selected =", "[m.rect.left+27, m.rect.top+25]) except: ui.Text(m.clickedcart.type.upper(), [m.rect.left+27, m.rect.top+25]) if listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0:", "event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE: carts.add(map.Cart(snaptogrid(m.tl), \"miner\")) if", "False)) > 0: m.hoveritem = pygame.sprite.spritecollide(m, interactables, False)[0] elif event.type", "+ str(m.hoveritem.data[\"max\"]), [m.rect.left+27, m.rect.top+60]) if not m.clickedcart == None: window.blit(selected2,", "pygame.image.load(\"./resources/images/ui box.png\") class Mouse(pygame.sprite.Sprite): def __init__(self): pygame.sprite.Sprite.__init__(self) self.image = pygame.surface.Surface([1,", "Mouse() def snaptogrid(pos): return [int(math.floor(pos[0] / 40)), int(math.floor(pos[1] / 40))]", "= number gamedata[\"coal\"] = 0 gamedata[\"iron\"] = 1 gamedata[\"copper\"] =", "box.png\") class Mouse(pygame.sprite.Sprite): def __init__(self): pygame.sprite.Sprite.__init__(self) self.image = pygame.surface.Surface([1, 1])", "clock = pygame.time.Clock() selected = pygame.image.load(\"./resources/images/selected.png\") selected2 = pygame.image.load(\"./resources/images/selected2.png\") box", "snaptogrid(pos): return [int(math.floor(pos[0] / 40)), int(math.floor(pos[1] / 40))] def loadlevel(number):", "= pygame.image.load(\"./resources/images/ui box.png\") class Mouse(pygame.sprite.Sprite): def __init__(self): pygame.sprite.Sprite.__init__(self) self.image =", "m.hoveritem.type.startswith(\"mine\") and m.hoveritem not in carts: ui.Resize(18) ui.Text(\"Carts Inside: \"", "rails, carts, gamedata, listmap, interactables tiles, rails, interactables, listmap =", "position self.tl = self.rect.topleft m = Mouse() def snaptogrid(pos): return", "carts, gamedata, listmap, interactables tiles, rails, interactables, listmap = map.loadmap(int(number))", "class Mouse(pygame.sprite.Sprite): def __init__(self): pygame.sprite.Sprite.__init__(self) self.image = pygame.surface.Surface([1, 1]) self.rect", "carts.add(map.Cart(snaptogrid(m.tl), \"miner\")) if screen == \"game\": window.fill([100, 100, 100]) tiles.draw(window)", "event.type == pygame.MOUSEMOTION: m.pos(pygame.mouse.get_pos()) if screen == \"game\": m.hoveritem =", "pygame.KEYDOWN: if event.key == pygame.K_SPACE: carts.add(map.Cart(snaptogrid(m.tl), \"miner\")) if screen ==", "m.hoveritem = pygame.sprite.spritecollide(m, carts, False)[0] elif len(pygame.sprite.spritecollide(m, interactables, False)) >", "carts, False)[0] elif len(pygame.sprite.spritecollide(m, interactables, False)) > 0: m.hoveritem =", "elif event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE: carts.add(map.Cart(snaptogrid(m.tl), \"miner\"))", "loadlevel(number): global tiles, rails, carts, gamedata, listmap, interactables tiles, rails,", "selected2 = pygame.image.load(\"./resources/images/selected2.png\") box = pygame.image.load(\"./resources/images/box.png\") uibox = pygame.image.load(\"./resources/images/ui box.png\")", "self.rect.topleft = [0, 0] self.clickedcart = None self.hoveritem = None", "100]) tiles.draw(window) carts.draw(window) carts.update(\"update\", m, listmap) if not m.hoveritem ==", "0: ui.Resize(22) ui.Text(\"Click to move\", [m.rect.left+27, m.rect.top+45]) ui.Text(\"Cart Here\", [m.rect.left+27,", "[m.clickedcart.rect.left-2, m.clickedcart.rect.top-2]) if m.mode == \"action\": window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30)", "rails = pygame.sprite.Group() carts = pygame.sprite.Group() interactables = pygame.sprite.Group() listmap", "[] clock = pygame.time.Clock() selected = pygame.image.load(\"./resources/images/selected.png\") selected2 = pygame.image.load(\"./resources/images/selected2.png\")", "\" + str(m.hoveritem.data[\"max\"]), [m.rect.left+27, m.rect.top+60]) if not m.clickedcart == None:", "pygame.QUIT: running = False elif event.type == pygame.MOUSEBUTTONDOWN: m.pos(pygame.mouse.get_pos()) if", "m.pos(pygame.mouse.get_pos()) if screen == \"game\": m.hoveritem = None if len(pygame.sprite.spritecollide(m,", "= pygame.sprite.spritecollide(m, interactables, False)[0] elif event.type == pygame.KEYDOWN: if event.key", "snaptogrid(m.tl)[1]*40-2]) window.blit(uibox, [555, 475]) pygame.display.flip() clock.tick(60) fps = clock.get_fps() pygame.quit()", "{\"fullscreen\": False} running = True gamedata = {\"level\": 0, \"coal\":", "move\", [m.rect.left+27, m.rect.top+45]) ui.Text(\"Cart Here\", [m.rect.left+27, m.rect.top+60]) window.blit(selected, [snaptogrid(m.tl)[0]*40-2, snaptogrid(m.tl)[1]*40-2])", "interactables, False)[0] elif event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE:", "event in pygame.event.get(): if event.type == pygame.QUIT: running = False", "self.image.get_rect() self.rect.topleft = [0, 0] self.clickedcart = None self.hoveritem =", "m.rect.top+10]) ui.Resize(30) try: ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) except: ui.Text(m.clickedcart.type.upper(), [m.rect.left+27, m.rect.top+25])", "and not m.mode == \"action\": window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30) ui.Text(m.hoveritem.type.upper(),", "ui.Text(\"Carts Inside: \" + str(m.hoveritem.data[\"carts\"]), [m.rect.left+27, m.rect.top+47]) ui.Text(\"Max Carts: \"", "m.mode = \"action\" elif m.mode == \"action\" and m.clickedcart !=", "screen == \"game\": if pygame.sprite.spritecollide(m, carts, False) and m.mode ==", "[m.rect.left+10, m.rect.top+10]) ui.Resize(30) try: ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) except: ui.Text(m.clickedcart.type.upper(), [m.rect.left+27,", "str(m.hoveritem.data[\"carts\"]), [m.rect.left+27, m.rect.top+47]) ui.Text(\"Max Carts: \" + str(m.hoveritem.data[\"max\"]), [m.rect.left+27, m.rect.top+60])", "uibox = pygame.image.load(\"./resources/images/ui box.png\") class Mouse(pygame.sprite.Sprite): def __init__(self): pygame.sprite.Sprite.__init__(self) self.image", "carts.update(\"select\", m, listmap) if m.clickedcart != None: m.mode = \"action\"", "\"game\": m.hoveritem = None if len(pygame.sprite.spritecollide(m, carts, False)) > 0:", "False elif event.type == pygame.MOUSEBUTTONDOWN: m.pos(pygame.mouse.get_pos()) if screen == \"game\":", "pygame.MOUSEMOTION: m.pos(pygame.mouse.get_pos()) if screen == \"game\": m.hoveritem = None if", "pygame.sprite.spritecollide(m, interactables, False)[0] elif event.type == pygame.KEYDOWN: if event.key ==", "if not m.hoveritem == None and not m.mode == \"action\":", "ui.Resize(18) ui.Text(\"Carts Inside: \" + str(m.hoveritem.data[\"carts\"]), [m.rect.left+27, m.rect.top+47]) ui.Text(\"Max Carts:", "listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: ui.Resize(22) ui.Text(\"Click to move\", [m.rect.left+27, m.rect.top+45]) ui.Text(\"Cart", "in pygame.event.get(): if event.type == pygame.QUIT: running = False elif", "pygame.sprite.Sprite.__init__(self) self.image = pygame.surface.Surface([1, 1]) self.rect = self.image.get_rect() self.rect.topleft =", "tiles, rails, interactables, listmap = map.loadmap(int(number)) carts.empty() gamedata[\"level\"] = number", "= {\"fullscreen\": False} running = True gamedata = {\"level\": 0,", "event.type == pygame.QUIT: running = False elif event.type == pygame.MOUSEBUTTONDOWN:", "= pygame.sprite.Group() interactables = pygame.sprite.Group() listmap = [] clock =", "= \"select\" def pos(self, position): self.rect.topleft = position self.tl =", "elif event.type == pygame.MOUSEMOTION: m.pos(pygame.mouse.get_pos()) if screen == \"game\": m.hoveritem", "== pygame.MOUSEBUTTONDOWN: m.pos(pygame.mouse.get_pos()) if screen == \"game\": if pygame.sprite.spritecollide(m, carts,", "carts.update(\"update\", m, listmap) if not m.hoveritem == None and not", "while running: for event in pygame.event.get(): if event.type == pygame.QUIT:", "== pygame.MOUSEMOTION: m.pos(pygame.mouse.get_pos()) if screen == \"game\": m.hoveritem = None", "math from game import map, ui window = pygame.display.set_mode([800, 600])", "= window screen = \"game\" s = {\"fullscreen\": False} running", "self.rect.topleft = position self.tl = self.rect.topleft m = Mouse() def", "interactables, listmap = map.loadmap(int(number)) carts.empty() gamedata[\"level\"] = number gamedata[\"coal\"] =", "[0, 0] self.clickedcart = None self.hoveritem = None self.tl =", "snaptogrid(m.tl)) m.clickedcart = None m.mode = \"select\" elif event.type ==", "box = pygame.image.load(\"./resources/images/box.png\") uibox = pygame.image.load(\"./resources/images/ui box.png\") class Mouse(pygame.sprite.Sprite): def", "False)[0] elif len(pygame.sprite.spritecollide(m, interactables, False)) > 0: m.hoveritem = pygame.sprite.spritecollide(m,", "pygame.time.Clock() selected = pygame.image.load(\"./resources/images/selected.png\") selected2 = pygame.image.load(\"./resources/images/selected2.png\") box = pygame.image.load(\"./resources/images/box.png\")", "pygame.sprite.Group() listmap = [] clock = pygame.time.Clock() selected = pygame.image.load(\"./resources/images/selected.png\")", "== \"game\": window.fill([100, 100, 100]) tiles.draw(window) carts.draw(window) carts.update(\"update\", m, listmap)", "window.blit(selected, [snaptogrid(m.tl)[0]*40-2, snaptogrid(m.tl)[1]*40-2]) window.blit(uibox, [555, 475]) pygame.display.flip() clock.tick(60) fps =", "= pygame.sprite.Group() carts = pygame.sprite.Group() interactables = pygame.sprite.Group() listmap =", "import map, ui window = pygame.display.set_mode([800, 600]) ui.window = window", "except: ui.Text(m.clickedcart.type.upper(), [m.rect.left+27, m.rect.top+25]) if listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: ui.Resize(22) ui.Text(\"Click", "carts, False)) > 0: m.hoveritem = pygame.sprite.spritecollide(m, carts, False)[0] elif", "self.image = pygame.surface.Surface([1, 1]) self.rect = self.image.get_rect() self.rect.topleft = [0,", "== pygame.K_SPACE: carts.add(map.Cart(snaptogrid(m.tl), \"miner\")) if screen == \"game\": window.fill([100, 100,", "0, \"coal\": 0, \"iron\": 1, \"copper\":0} tiles = pygame.sprite.Group() rails", "\"select\" def pos(self, position): self.rect.topleft = position self.tl = self.rect.topleft", "and m.mode == \"select\": carts.update(\"select\", m, listmap) if m.clickedcart !=", "= map.loadmap(int(number)) carts.empty() gamedata[\"level\"] = number gamedata[\"coal\"] = 0 gamedata[\"iron\"]", "\"copper\":0} tiles = pygame.sprite.Group() rails = pygame.sprite.Group() carts = pygame.sprite.Group()", "pygame.sprite.Group() rails = pygame.sprite.Group() carts = pygame.sprite.Group() interactables = pygame.sprite.Group()", "screen == \"game\": window.fill([100, 100, 100]) tiles.draw(window) carts.draw(window) carts.update(\"update\", m,", "not in carts: ui.Resize(18) ui.Text(\"Carts Inside: \" + str(m.hoveritem.data[\"carts\"]), [m.rect.left+27,", "pygame.sprite.Group() carts = pygame.sprite.Group() interactables = pygame.sprite.Group() listmap = []", "False)[0] elif event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE: carts.add(map.Cart(snaptogrid(m.tl),", "== \"select\": carts.update(\"select\", m, listmap) if m.clickedcart != None: m.mode", "False)) > 0: m.hoveritem = pygame.sprite.spritecollide(m, carts, False)[0] elif len(pygame.sprite.spritecollide(m,", "Mouse(pygame.sprite.Sprite): def __init__(self): pygame.sprite.Sprite.__init__(self) self.image = pygame.surface.Surface([1, 1]) self.rect =", "window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30) try: ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) except: ui.Text(m.clickedcart.type.upper(),", "gamedata, listmap, interactables tiles, rails, interactables, listmap = map.loadmap(int(number)) carts.empty()", "pygame.sprite.spritecollide(m, carts, False) and m.mode == \"select\": carts.update(\"select\", m, listmap)", "event.type == pygame.MOUSEBUTTONDOWN: m.pos(pygame.mouse.get_pos()) if screen == \"game\": if pygame.sprite.spritecollide(m,", "m.clickedcart != None and listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: m.clickedcart.pathfind(listmap, snaptogrid(m.tl)) m.clickedcart", "= \"action\" elif m.mode == \"action\" and m.clickedcart != None", "running = False elif event.type == pygame.MOUSEBUTTONDOWN: m.pos(pygame.mouse.get_pos()) if screen", "== pygame.KEYDOWN: if event.key == pygame.K_SPACE: carts.add(map.Cart(snaptogrid(m.tl), \"miner\")) if screen", "m.mode = \"select\" elif event.type == pygame.MOUSEMOTION: m.pos(pygame.mouse.get_pos()) if screen", "from game import map, ui window = pygame.display.set_mode([800, 600]) ui.window", "self.clickedcart = None self.hoveritem = None self.tl = self.rect.topleft self.mode", "ui.Resize(30) ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) if m.hoveritem.type.startswith(\"mine\") and m.hoveritem not in", "listmap, interactables tiles, rails, interactables, listmap = map.loadmap(int(number)) carts.empty() gamedata[\"level\"]", "running = True gamedata = {\"level\": 0, \"coal\": 0, \"iron\":", "if screen == \"game\": if pygame.sprite.spritecollide(m, carts, False) and m.mode", "interactables = pygame.sprite.Group() listmap = [] clock = pygame.time.Clock() selected", "int(math.floor(pos[1] / 40))] def loadlevel(number): global tiles, rails, carts, gamedata,", "False) and m.mode == \"select\": carts.update(\"select\", m, listmap) if m.clickedcart", "not m.clickedcart == None: window.blit(selected2, [m.clickedcart.rect.left-2, m.clickedcart.rect.top-2]) if m.mode ==", "Carts: \" + str(m.hoveritem.data[\"max\"]), [m.rect.left+27, m.rect.top+60]) if not m.clickedcart ==", "map, ui window = pygame.display.set_mode([800, 600]) ui.window = window screen", "if screen == \"game\": m.hoveritem = None if len(pygame.sprite.spritecollide(m, carts,", "= pygame.sprite.Group() rails = pygame.sprite.Group() carts = pygame.sprite.Group() interactables =", "/ 40)), int(math.floor(pos[1] / 40))] def loadlevel(number): global tiles, rails,", "m.hoveritem = pygame.sprite.spritecollide(m, interactables, False)[0] elif event.type == pygame.KEYDOWN: if", "carts = pygame.sprite.Group() interactables = pygame.sprite.Group() listmap = [] clock", "[int(math.floor(pos[0] / 40)), int(math.floor(pos[1] / 40))] def loadlevel(number): global tiles,", "pygame.sprite.Group() interactables = pygame.sprite.Group() listmap = [] clock = pygame.time.Clock()", "100, 100]) tiles.draw(window) carts.draw(window) carts.update(\"update\", m, listmap) if not m.hoveritem", "m.rect.top+60]) if not m.clickedcart == None: window.blit(selected2, [m.clickedcart.rect.left-2, m.clickedcart.rect.top-2]) if", "None if len(pygame.sprite.spritecollide(m, carts, False)) > 0: m.hoveritem = pygame.sprite.spritecollide(m,", "window.fill([100, 100, 100]) tiles.draw(window) carts.draw(window) carts.update(\"update\", m, listmap) if not", "= pygame.surface.Surface([1, 1]) self.rect = self.image.get_rect() self.rect.topleft = [0, 0]", "and m.clickedcart != None and listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: m.clickedcart.pathfind(listmap, snaptogrid(m.tl))", "m.rect.top+47]) ui.Text(\"Max Carts: \" + str(m.hoveritem.data[\"max\"]), [m.rect.left+27, m.rect.top+60]) if not", "m.mode == \"action\" and m.clickedcart != None and listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] >", "interactables, False)) > 0: m.hoveritem = pygame.sprite.spritecollide(m, interactables, False)[0] elif", "if screen == \"game\": window.fill([100, 100, 100]) tiles.draw(window) carts.draw(window) carts.update(\"update\",", "self.rect = self.image.get_rect() self.rect.topleft = [0, 0] self.clickedcart = None", "m.rect.top+60]) window.blit(selected, [snaptogrid(m.tl)[0]*40-2, snaptogrid(m.tl)[1]*40-2]) window.blit(uibox, [555, 475]) pygame.display.flip() clock.tick(60) fps", "= self.image.get_rect() self.rect.topleft = [0, 0] self.clickedcart = None self.hoveritem", "None m.mode = \"select\" elif event.type == pygame.MOUSEMOTION: m.pos(pygame.mouse.get_pos()) if", "__init__(self): pygame.sprite.Sprite.__init__(self) self.image = pygame.surface.Surface([1, 1]) self.rect = self.image.get_rect() self.rect.topleft", "in carts: ui.Resize(18) ui.Text(\"Carts Inside: \" + str(m.hoveritem.data[\"carts\"]), [m.rect.left+27, m.rect.top+47])", "{\"level\": 0, \"coal\": 0, \"iron\": 1, \"copper\":0} tiles = pygame.sprite.Group()", "carts.draw(window) carts.update(\"update\", m, listmap) if not m.hoveritem == None and", "pygame.image.load(\"./resources/images/selected2.png\") box = pygame.image.load(\"./resources/images/box.png\") uibox = pygame.image.load(\"./resources/images/ui box.png\") class Mouse(pygame.sprite.Sprite):", "m.mode == \"action\": window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30) try: ui.Text(m.hoveritem.type.upper(), [m.rect.left+27,", "pygame.display.set_mode([800, 600]) ui.window = window screen = \"game\" s =", "None self.hoveritem = None self.tl = self.rect.topleft self.mode = \"select\"", "def pos(self, position): self.rect.topleft = position self.tl = self.rect.topleft m", "self.hoveritem = None self.tl = self.rect.topleft self.mode = \"select\" def", "window screen = \"game\" s = {\"fullscreen\": False} running =", "[snaptogrid(m.tl)[0]*40-2, snaptogrid(m.tl)[1]*40-2]) window.blit(uibox, [555, 475]) pygame.display.flip() clock.tick(60) fps = clock.get_fps()", "rails, interactables, listmap = map.loadmap(int(number)) carts.empty() gamedata[\"level\"] = number gamedata[\"coal\"]", "0 gamedata[\"iron\"] = 1 gamedata[\"copper\"] = 0 loadlevel(0) while running:", "for event in pygame.event.get(): if event.type == pygame.QUIT: running =", "[m.rect.left+27, m.rect.top+25]) if listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: ui.Resize(22) ui.Text(\"Click to move\",", "Inside: \" + str(m.hoveritem.data[\"carts\"]), [m.rect.left+27, m.rect.top+47]) ui.Text(\"Max Carts: \" +", "0: m.hoveritem = pygame.sprite.spritecollide(m, interactables, False)[0] elif event.type == pygame.KEYDOWN:", "if m.hoveritem.type.startswith(\"mine\") and m.hoveritem not in carts: ui.Resize(18) ui.Text(\"Carts Inside:", "pygame.image.load(\"./resources/images/selected.png\") selected2 = pygame.image.load(\"./resources/images/selected2.png\") box = pygame.image.load(\"./resources/images/box.png\") uibox = pygame.image.load(\"./resources/images/ui", "screen == \"game\": m.hoveritem = None if len(pygame.sprite.spritecollide(m, carts, False))", "m.hoveritem not in carts: ui.Resize(18) ui.Text(\"Carts Inside: \" + str(m.hoveritem.data[\"carts\"]),", "try: ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) except: ui.Text(m.clickedcart.type.upper(), [m.rect.left+27, m.rect.top+25]) if listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]]", "= pygame.image.load(\"./resources/images/selected.png\") selected2 = pygame.image.load(\"./resources/images/selected2.png\") box = pygame.image.load(\"./resources/images/box.png\") uibox =", "def snaptogrid(pos): return [int(math.floor(pos[0] / 40)), int(math.floor(pos[1] / 40))] def", "if listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: ui.Resize(22) ui.Text(\"Click to move\", [m.rect.left+27, m.rect.top+45])", "!= None: m.mode = \"action\" elif m.mode == \"action\" and", "pygame, math from game import map, ui window = pygame.display.set_mode([800,", "elif len(pygame.sprite.spritecollide(m, interactables, False)) > 0: m.hoveritem = pygame.sprite.spritecollide(m, interactables,", "1]) self.rect = self.image.get_rect() self.rect.topleft = [0, 0] self.clickedcart =", "if len(pygame.sprite.spritecollide(m, carts, False)) > 0: m.hoveritem = pygame.sprite.spritecollide(m, carts,", "if m.clickedcart != None: m.mode = \"action\" elif m.mode ==", "> 0: ui.Resize(22) ui.Text(\"Click to move\", [m.rect.left+27, m.rect.top+45]) ui.Text(\"Cart Here\",", "self.tl = self.rect.topleft m = Mouse() def snaptogrid(pos): return [int(math.floor(pos[0]", "= pygame.time.Clock() selected = pygame.image.load(\"./resources/images/selected.png\") selected2 = pygame.image.load(\"./resources/images/selected2.png\") box =", "= False elif event.type == pygame.MOUSEBUTTONDOWN: m.pos(pygame.mouse.get_pos()) if screen ==", "0: m.clickedcart.pathfind(listmap, snaptogrid(m.tl)) m.clickedcart = None m.mode = \"select\" elif", "[m.rect.left+27, m.rect.top+60]) if not m.clickedcart == None: window.blit(selected2, [m.clickedcart.rect.left-2, m.clickedcart.rect.top-2])", "ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) except: ui.Text(m.clickedcart.type.upper(), [m.rect.left+27, m.rect.top+25]) if listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] >", "m.rect.top+25]) if m.hoveritem.type.startswith(\"mine\") and m.hoveritem not in carts: ui.Resize(18) ui.Text(\"Carts", "carts, False) and m.mode == \"select\": carts.update(\"select\", m, listmap) if", "pygame.image.load(\"./resources/images/box.png\") uibox = pygame.image.load(\"./resources/images/ui box.png\") class Mouse(pygame.sprite.Sprite): def __init__(self): pygame.sprite.Sprite.__init__(self)", "if event.type == pygame.QUIT: running = False elif event.type ==", "tiles = pygame.sprite.Group() rails = pygame.sprite.Group() carts = pygame.sprite.Group() interactables", "gamedata[\"coal\"] = 0 gamedata[\"iron\"] = 1 gamedata[\"copper\"] = 0 loadlevel(0)", "game import map, ui window = pygame.display.set_mode([800, 600]) ui.window =", "s = {\"fullscreen\": False} running = True gamedata = {\"level\":", "loadlevel(0) while running: for event in pygame.event.get(): if event.type ==", "1 gamedata[\"copper\"] = 0 loadlevel(0) while running: for event in", "\"coal\": 0, \"iron\": 1, \"copper\":0} tiles = pygame.sprite.Group() rails =", "ui.Text(m.clickedcart.type.upper(), [m.rect.left+27, m.rect.top+25]) if listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: ui.Resize(22) ui.Text(\"Click to", "\"select\": carts.update(\"select\", m, listmap) if m.clickedcart != None: m.mode =", "m.hoveritem = None if len(pygame.sprite.spritecollide(m, carts, False)) > 0: m.hoveritem", "number gamedata[\"coal\"] = 0 gamedata[\"iron\"] = 1 gamedata[\"copper\"] = 0", "None self.tl = self.rect.topleft self.mode = \"select\" def pos(self, position):", "== None: window.blit(selected2, [m.clickedcart.rect.left-2, m.clickedcart.rect.top-2]) if m.mode == \"action\": window.blit(box,", "40)), int(math.floor(pos[1] / 40))] def loadlevel(number): global tiles, rails, carts,", "pygame.sprite.spritecollide(m, carts, False)[0] elif len(pygame.sprite.spritecollide(m, interactables, False)) > 0: m.hoveritem" ]
[ "+ (yc1 - yc2)**2) print('distance', get_distance()) # *** somewhere else", "else in your program *** def get_length(xa=-50, ya=99, xb=.67, yb=.26):", "your program *** def get_length(xa=-50, ya=99, xb=.67, yb=.26): # calcualte", "print('distance', get_distance()) # *** somewhere else in your program ***", "yc2)**2) print('distance', get_distance()) # *** somewhere else in your program", "<NAME> # Example for Compose Methods: Extract Method. import math", "between the two circle return math.sqrt((xc1-xc2)**2 + (yc1 - yc2)**2)", "# calcualte the length of vector AB vector which is", "import math def get_distance(xc1=5, xc2=7.25, yc1=22, yc2=-4.84): # Calculate the", "two circle return math.sqrt((xc1-xc2)**2 + (yc1 - yc2)**2) print('distance', get_distance())", "calcualte the length of vector AB vector which is a", "xc2=7.25, yc1=22, yc2=-4.84): # Calculate the distance between the two", "which is a vector between A and B points. return", "is a vector between A and B points. return math.sqrt((xa-xb)*(xa-xb)", "math.sqrt((xc1-xc2)**2 + (yc1 - yc2)**2) print('distance', get_distance()) # *** somewhere", "vector AB vector which is a vector between A and", "return math.sqrt((xc1-xc2)**2 + (yc1 - yc2)**2) print('distance', get_distance()) # ***", "math def get_distance(xc1=5, xc2=7.25, yc1=22, yc2=-4.84): # Calculate the distance", "Compose Methods: Extract Method. import math def get_distance(xc1=5, xc2=7.25, yc1=22,", "Method. import math def get_distance(xc1=5, xc2=7.25, yc1=22, yc2=-4.84): # Calculate", "- yc2)**2) print('distance', get_distance()) # *** somewhere else in your", "length of vector AB vector which is a vector between", "program *** def get_length(xa=-50, ya=99, xb=.67, yb=.26): # calcualte the", "AB vector which is a vector between A and B", "# Written by <NAME> # Example for Compose Methods: Extract", "A and B points. return math.sqrt((xa-xb)*(xa-xb) + (ya-yb)*(ya-yb)) print('length', get_length())", "*** def get_length(xa=-50, ya=99, xb=.67, yb=.26): # calcualte the length", "circle return math.sqrt((xc1-xc2)**2 + (yc1 - yc2)**2) print('distance', get_distance()) #", "# Example for Compose Methods: Extract Method. import math def", "# Calculate the distance between the two circle return math.sqrt((xc1-xc2)**2", "of vector AB vector which is a vector between A", "vector between A and B points. return math.sqrt((xa-xb)*(xa-xb) + (ya-yb)*(ya-yb))", "yb=.26): # calcualte the length of vector AB vector which", "yc1=22, yc2=-4.84): # Calculate the distance between the two circle", "ya=99, xb=.67, yb=.26): # calcualte the length of vector AB", "(yc1 - yc2)**2) print('distance', get_distance()) # *** somewhere else in", "between A and B points. return math.sqrt((xa-xb)*(xa-xb) + (ya-yb)*(ya-yb)) print('length',", "in your program *** def get_length(xa=-50, ya=99, xb=.67, yb=.26): #", "# *** somewhere else in your program *** def get_length(xa=-50,", "Example for Compose Methods: Extract Method. import math def get_distance(xc1=5,", "def get_distance(xc1=5, xc2=7.25, yc1=22, yc2=-4.84): # Calculate the distance between", "get_distance()) # *** somewhere else in your program *** def", "yc2=-4.84): # Calculate the distance between the two circle return", "by <NAME> # Example for Compose Methods: Extract Method. import", "get_distance(xc1=5, xc2=7.25, yc1=22, yc2=-4.84): # Calculate the distance between the", "def get_length(xa=-50, ya=99, xb=.67, yb=.26): # calcualte the length of", "somewhere else in your program *** def get_length(xa=-50, ya=99, xb=.67,", "vector which is a vector between A and B points.", "the length of vector AB vector which is a vector", "Methods: Extract Method. import math def get_distance(xc1=5, xc2=7.25, yc1=22, yc2=-4.84):", "Calculate the distance between the two circle return math.sqrt((xc1-xc2)**2 +", "the distance between the two circle return math.sqrt((xc1-xc2)**2 + (yc1", "Written by <NAME> # Example for Compose Methods: Extract Method.", "Extract Method. import math def get_distance(xc1=5, xc2=7.25, yc1=22, yc2=-4.84): #", "the two circle return math.sqrt((xc1-xc2)**2 + (yc1 - yc2)**2) print('distance',", "*** somewhere else in your program *** def get_length(xa=-50, ya=99,", "for Compose Methods: Extract Method. import math def get_distance(xc1=5, xc2=7.25,", "xb=.67, yb=.26): # calcualte the length of vector AB vector", "distance between the two circle return math.sqrt((xc1-xc2)**2 + (yc1 -", "a vector between A and B points. return math.sqrt((xa-xb)*(xa-xb) +", "get_length(xa=-50, ya=99, xb=.67, yb=.26): # calcualte the length of vector", "<gh_stars>0 # Written by <NAME> # Example for Compose Methods:" ]
[ "bound is set. Eventually, it will be removed. # the", "# There are no constraints on d1. # Coefficients of", "(B, v, s), where B is a Matrix with coefficients", "these # problems in the integration variable). Au = Au.applyfunc(cancel)", "d if and only if (c1, ..., cm) is a", "computer algebra in general, and implicit # in the correctness", "of linear relations between # c1, ..., cm, e1, ...,", "is constant. Therefore, the term const is returned. const is", "around things like sqrt(x**2) != x # and also sqrt(x**2", "terminate no matter what n is. n = bound_degree(a, b,", "because they will both behave the same as monomials. For", "(or T). E_args are the arguments of the hyperexponentials indexed", "Pow, S from sympy.core.compatibility import reduce, range from sympy.integrals.rde import", "use more efficient residue reduction from ratint() if not fd.is_sqf", "A is None: return None n, e, u = A", "satisfy Dt == 1/x, because log(2) is constant. Therefore, the", "matrix A with m + r columns and entries in", "+ db)/b.LC() sitn = Poly(si*DE.t**N, DE.t) H[i] = H[i] +", "in Q]) if d > 0: M = Matrix(d, m,", "bug. return None roots = [(i, i.real_roots()) for i, _", "h in k[t], N is a non-negative integer, g in", "tuples of factions of the terms on the right hand", "sum(r for r in denom_real) bd_imag = sum(r for r", "in k[t] if and only if p = Sum(ek*gk) where", "1 & 2 for the limited integration problem. Given a", "much longer with large n's. n = 5 h, B", "return the empty matrix. qs, _ = list(zip(*Q)) return (qs,", "1, m)), then q = Sum(dj*hj, (j, 1, r)), where", "is exactly the transcendence degree of K over C(x). Furthermore,", "r = [(Mqq*vj)[0] for vj in V] # [r1, ...,", "= fi[j]*DE.t**i hi[j] = hji # building up Sum(djn*(D(fjn*t^n) -", "= Ai.col_join(M.row_join(zeros(M.rows, ri))) Fi, hi = [None]*ri, [None]*ri # from", "This function uses the structure theorem approach, which says that", "f is the logarithmic derivative of a k(t)-radical return None", "sympy.solvers import solve def prde_normal_denom(fa, fd, G, DE): \"\"\" Parametric", "in Q]): N = max([ri.degree(DE.t) for _, ri in Q])", "ri in Q]): N = max([ri.degree(DE.t) for _, ri in", "if not b: # I will have to verify, but", "book, page 255), so most likely this indicates a bug.", "min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga, gd, DE.t) for ga, gd in", "c1, ..., cm in Const(K) such that Dy + f*y", "Special Polynomial Differential Equation algorithm: Parametric Version. Given a derivation", "constraints. N = max([qi.degree(DE.t) for qi in q]) M =", "i in range(r)]) y_num, y_den = y.as_numer_denom() Ya, Yd =", "of a k(t) radical if there exist n in ZZ", "N, (a*hn*fa).cancel(fd, include=True), V) def limited_integrate(fa, fd, G, DE): \"\"\"", "d): \"\"\" Given p = [p1, ..., pm] in k[t]^m", "of factions of the terms on the right hand side", "A.col_join(B).col_join(C) def prde_cancel_liouvillian(b, Q, n, DE): \"\"\" Pg, 237. \"\"\"", "and write tests p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z))", "(j = 1, ..., u). # Sum(ci*gi) is in k[t]", "Df is the logarithmic derivative of a k(t)-radical. b in", ":-1], Au[:, -1] for j in range(A.cols): for i in", "..., m) are then y = Sum(ek*hk, (k, 1, v))/gamma.", "the log-part of the integral # of f respolys, residues", "Ya*Poly(1/Yd.LC(), DE.t), Yd.monic() return Y, C def parametric_log_deriv_heu(fa, fd, wa,", "the primitive, hyperexponential, and hypertangent cases, respectively. If case is", "z=z) if not b: # I will have to verify,", "Risch Differential Equation - No cancellation: deg(b) small enough. Given", "TODO: finish writing this and write tests p = cancel(fa.as_expr()/fd.as_expr()", "rational function evaluated at sqrt(-1) without actually evaluating it at", "if all(qi.is_zero for qi in Q): dc = -1 M", "Sum(dj*hj, (j, 1, r)) where d1, ..., dr are in", "polynomial solution # only if the sum is in k[t].", "E_args are the arguments of the hyperexponentials indexed by E_K", "if not all([ri.is_zero for _, ri in Q]): N =", "q, M = prde_linear_constraints(a, b, g, DE) # q =", "1, m)) \"\"\" R, Z = list(zip(*[gcdex_diophantine(b, a, qi) for", "or None, which means that Df/f is not the derivative", "m)). For case == 'primitive', k<t> == k[t], so it", "a, b, q1, ..., qm in k[t] with deg(a) >", "is # B*Matrix([c1, ..., cm, d1]) == 0 # There", "in which case it has proven that no solution exists,", "above are also nonlinear or Liouvillian, but if this #", "is Sum(ci*qi).quo(d), and the remainder is zero # for c1,", "numerator ba[1] is the imaginary part and bd is the", "vector from V, we take V[0] c0 = V[0][0] #", "i / i --- = --. --- --- t f", "# y = d1*f1 for f1 = 1 and any", "in k[t] if and only is ci = Sum(dj*aji) #", "max(0, DE.d.degree() - 1)): return prde_no_cancel_b_large(b, q, n, DE) elif", "solutions of Bx == v, or v has a non-constant", "r in denom_imag) num_real = [value if key[0] % 4", "and primitive cases, respectively. For the hyperexponential (resp. hypertangent) case,", "G, DE): \"\"\" Parametric Risch Differential Equation - Generate linear", "See also ======== is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical \"\"\" # Compute Df/f dfa,", "0 for key, value in ba.items()] num_imag = [value if", "m columns and entries in Const(k). # Sum(ci*qqi) is Sum(ci*qi).quo(d),", "and any d1 in Const(k) = k. f = [Poly(1,", "B.quo(g), [gia.cancel(gid*g, include=True) for gia, gid in G] # a*Dp", "hn = c.gcd(c.diff(DE.t)) a = hn b = -derivation(hn, DE)", "d in k[t], return q = [q1, ..., qm] in", "step 1 & 2 for the limited integration problem. Given", "in determining if an element a of K is a", "and entries in Const(k) such that # a*Dp + b*p", "- c1*r2.nth(i) for i in range(z.degree(DE.t))] s = solve(eqs, c1)", "DE.indices('log')]) ans = list(zip(terms, u)) result = Mul(*[Pow(i, j) for", "solutions # y = p/gamma of the initial equation with", "entries in k(t), and because Matrix doesn't play well with", "and c1, ..., cm in Const(K) such that Dy +", "case. This should never happen for the # functions given", "u such that n*f == Du/u. exp(f) will be the", "== 3 else 0 for key, value in bd.items()] bd_real", "the space and form a basis except possibly when Dy", "used both in solving parametric problems and in determining if", "= 1, ..., m), when # A*Matrix([c1, ..., cm]) ==", "[]) # No constraints, return the empty matrix. qs, _", "derivation(sitn, DE) - b*sitn if all(qi.is_zero for qi in Q):", "== 0, # in which case the sum is equal", "u, n, const) or None, which means that Df cannot", "(a, d) with a and d in k[t]. \"\"\" m", "of L_K/C(x) U E_K/C(x) is exactly the transcendence degree of", "i --- = Df. --- --- t i in L", "q]) M = Matrix(N + 1, m, lambda i, j:", "this # changes, then this will need to be updated", "derivation D on k[t] and a in k[t], b in", "(DE.case == 'base' or b.degree() > max(0, DE.d.degree() - 1)):", "Poly(DE.t**2 + 1, DE.t) elif case in ['primitive', 'base']: B", "m), when # A*Matrix([c1, ..., cm]) == 0 and #", "exp(x) and exp(x + 1) == E*exp(x) satisfy Dt ==", "in some (possibly unspecified extension) and \"in_field\" with the function", "= k + m + u. v = len(h) M", "is returned. const is such that log(const) + f ==", "= lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC()) l = fd.monic().lcm(wd.monic())*Poly(c, DE.t) ln, ls =", "M*Matrix([c1, ..., cm]) == 0, # in which case the", "k[t] with c1, ..., cm in Const(k) if and only", "g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for gia, gid in", "1, m)). \"\"\" m = len(p) q, r = zip(*[pi.div(d)", "function evaluated at sqrt(-1) without actually evaluating it at sqrt(-1)", "where ba[0] is real part of the numerator ba[1] is", "ZZ, and b, q1, ..., qm in k[t] with deg(b)", "= ([DE.T[i] for i in DE.indices('exp')] + [DE.extargs[i] for i", "case == 'tan': raise NotImplementedError(\"The hypertangent case is \" \"not", "in that it finds the solution in the given field", "that E_K/C(x) and L_K/C(x) are disjoint. The sets L_K/C(x) and", "= Matrix(N + 1, m, lambda i, j: q[j].nth(i)) A,", "case, solutions of # a*Dp + b*p = Sum(ci*qi) =", "..., cm in Const(K) such that Dy + f*y ==", "are the arguments of the hyperexponentials indexed by E_K (i.e.,", "Dy + f*y == Sum(ci*gi, (i, 1, m)), and to", "gcd(a, t) == 1 (resp. gcd(a, t**2 + 1) ==", "= fd.monic().lcm(wd.monic())*Poly(c, DE.t) ln, ls = splitfactor(l, DE) z =", "result, const) def is_log_deriv_k_t_radical(fa, fd, DE, Df=True): r\"\"\" Checks if", "and only if (c1, ..., cm) is a solution of", "roots): # If f is the logarithmic derivative of a", "case != 'base'. alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t) etaa, etad", "+ v columns. A = -eye(m) for vj in V:", "(ds,) + Es) # lcm(ds, es1, ..., esm) a =", "indices of logarithmic monomials of K over C(x)), and E_K/C(x)", "= A # TODO: Add test if Q == 1:", "return (qs, M) def poly_linear_constraints(p, d): \"\"\" Given p =", "automatically. See also ======== is_log_deriv_k_t_radical, is_deriv_k \"\"\" fa, fd =", "derivative of a k(t)-radical. b in k(t) can be written", "are # y = Sum(dj*fj, (j, 1, r) + Sum(ei*hi,", "q, r = zip(*[pi.div(d) for pi in p]) if not", "wt] where each wl is a column matrix with #", "# Solutions of (a/d)*Dp + (b/d)*p = Sum(dj*rj) correspond to", "q1, ..., qm in k[t] with deg(a) > 0 and", "k[t], b in k<t>, G = [g1, ..., gm] in", "of the logarithmic terms in L_args. To handle the case", "n < 0: # Only the trivial zero solution is", "j in ans]) argterms = ([DE.T[i] for i in DE.indices('exp')]", "= list(zip(*G)) gd = reduce(lambda i, j: i.lcm(j), Gds, Poly(1,", "sum(r for r in num_real) ba_imag = sum(r for r", "c1, ..., cm in Const(k) and q in k<t> of", "a*Dq + b*q = Sum(ci*Gi) correspond # to solutions z", "of that constant. argterms = ([DE.extargs[i] for i in DE.indices('exp')]", "Ya, Yd = Poly(y_num, DE.t), Poly(y_den, DE.t) Y = Ya*Poly(1/Yd.LC(),", "be the same as if it were [[1, 1]]) residueterms", "more general prde_special_denom() automatically if it cannot determine that S1irr", "Poly(y_num, DE.t), Poly(y_den, DE.t) Y = Ya*Poly(1/Yd.LC(), DE.t), Yd.monic() return", "D == d/dt or deg(b) > max(0, deg(D) - 1),", "I believe that the answer should be # None in", "..., dr) is a solution of Ax == 0. \"\"\"", "tuple (A, B, GG, h) such that A, B, h", "DE): \"\"\" Pg, 237. \"\"\" H = [] # Why", "Base case. Dy == 0 for all y in k", "1, m, lambda i, j: q[j].nth(i)) A, _ = constant_system(M,", "qi in Q] + [S(0)]]) # The condition for solvability", "fa*Poly(1/fd.LC(), DE.t), fd.monic() # interpretting limited integration problem as a", "better way?) f = [Poly(fa.as_expr()/fd.as_expr(), t, field=True) for fa, fd", "with m columns and entries in Const(k). # Sum(ci*gi) is", "+ 1) in k, sqrt(-1) not in k), a !=", "in k[t] satisfies A*Dr + B*r == Sum(ci*ggi, (i, 1,", "not V: return None else: # we can take any", "q, n, DE) beta = [betai + alpha*ri for betai,", "..., gm] in k(t)^m, return Q = [q1, ..., qm]", "ba.as_poly(gen).as_dict() denom_real = [value if key[0] % 4 == 0", "with DecrementLevel(DE): Qy = [frac_in(q.nth(i), DE.t, field=True) for q in", "nc - min(0, nb)) if not nb: # Possible cancellation.", "solution. return [], eye(m) # Could return A, but this", "an element of k(t). ans is a list of tuples", "n, e, u = A u *= DE.t**e elif case", "Rm+1; m = A.rows Rm1 = Ri.applyfunc(lambda x: derivation(x, DE,", "currently added test case takes large time # even with", "with ci = Sum(dj*aji). try: # We try n=5. At", "m, lambda i, j: Q[j][1].nth(i)) else: M = Matrix(0, m,", "- n), p**-n) return (A, B, G, h) def prde_linear_constraints(a,", "Sum(aji*qqi). if not V: # No non-trivial solution. return [],", "a matrix M with entries in k(t) such that for", "arguments of the exponential terms in E_args. To handle the", "not have problems, # because case != 'base'. alphaa, alphad", "# (l*f).div(z) u2, r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z) eqs =", "such that n*b == Du/u. Either returns (ans, u, n,", "polynomial i, d = i.as_numer_denom() icoeff, iterms = sqf_list(i) l.append(Mul(*([Pow(icoeff,", "when solving the parametric logarithmic # derivative problem when integration", "ga, gd in G] if not all([ri.is_zero for _, ri", "A[s, j] A.row_op(s, lambda r, jj: cancel(r - Asj*Rm1[jj])) #", "# Sum(ci*qi) is divisible by d if and only if", "and only if # f = fa/fd, fd is square-free,", "the transcendence degree of K over C(x). Furthermore, because Const_D(K)", "wl[-v:] for wl in W]) # excise dj's. N =", "== 0. # Transform fractions (fa, fd) in f into", "c1, ..., cm in Const(k) and q in k[t] satisfy", "satisfies a*Dq + b*q == Sum(ci*Gi, (i, 1, m)). \"\"\"", "in k(t)^m, return Q = [q1, ..., qm] in k[t]^m", "fi] ri = len(fi) if i == n: M =", "elif case == 'base': # TODO: we can use more", "prde_no_cancel_b_small(b, q, n, DE) elif (DE.d.degree() >= 2 and b.degree()", "and gcd(a, b) == 1, return (A, B, Q, R,", "Gd in G]) n = min(0, nc - min(0, nb))", "= Sum(dj*aji) # (i = 1, ..., m) for some", "d1, ..., dr ar in Const(k) and # B*Matrix([c1, ...,", "C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices", "1)*x**2), x). # But this is a limitation in computer", "'other_linear'). N = hn.degree(DE.t) + hs.degree(DE.t) + max(0, 1 -", "Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1,", "in ['other_linear', 'other_nonlinear']: # XXX: If these are supported by", "- b*fjnt^n)) Fi[j] = -(derivation(hji, DE) - b*hji) H +=", "V] # [f1, ..., fu] # # Solve the reduced", "of parametric_log_deriv is implemented. return None u1, r1 = (fa*l.quo(fd)).div(z)", "'tan': p = Poly(DE.t**2 + 1, DE.t) elif case in", "those for solving Risch Differential Equations. See the outline in", "Solve a Parametric Risch Differential Equation: Dy + f*y ==", "of a k(t)-radical, then all the # roots of the", "= 1, ..., u). # Sum(ci*gi) is in k[t] if", "etad, DE) B = parametric_log_deriv(betaa, betad, etaa, etad, DE) if", "g1, ..., gm in k(t) with f weakly normalized with", "only if (c1, ..., cm) is a solution of Mx", "(gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative) from sympy.matrices", "Fi[j] = -(derivation(hji, DE) - b*hji) H += hi #", "1. a = a.LC() b, q = b.quo_ground(a), [qi.quo_ground(a) for", "Q[j][1].nth(i)) else: M = Matrix(0, m, []) # No constraints,", "Sum(dj*rj) # where rj = Sum(aji*qqi). if not V: #", "not in some (possibly unspecified extension) and \"in_field\" with the", "num_imag) ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen)) bd", "in Const(k). # Sum(ci*gi) is in k[t] for c1, ...,", "as if it were [[1, 1]]) residueterms = [(H[j][1].subs(z, i),", "it is necessary to pass the arguments of the exponential", "= [g1, ..., gm] in k(t)^m, return Q = [q1,", "alphad = real_imag(ba, bd*a, DE.t) betad = alphad etaa, etad", "Because M has entries in k(t), and because Matrix doesn't", "Generate a system for the constant solutions. Given a differential", "Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj) # where rj = Sum(aji*qi)", "if key[0] % 4 == 2 else 0 for key,", "this might be empty, but everything below should work find", "is equal to Sum(fi*qi). M, _ = constant_system(M, zeros(M.rows, 1),", "in the constant field that is identically zero, but cannot", "= sqf_list(d) ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j) for b, e", "Given p = [p1, ..., pm] in k[t]^m and d", "entries in Const(k) such that # a*Dp + b*p =", "r, n, DE) # h = [h1, ..., hv] in", "k(t), and a hyperexponential monomial theta over k(t), raises either", "L i in E i K/C(x) K/C(x) Where C =", "field (actually, this same correctness problem exists in any #", "u *= n terms = ([DE.T[i] for i in DE.indices('exp')]", "# Note: See comment in constant_system # Also note: derivation(basic=True)", "zeros(0, 2) else: dc = max([qi.degree(DE.t) for qi in Q])", "key[0] % 4 == 2 else 0 for key, value", "m)) has # a solution y0 in k with c1,", "zeros(1, m) # No constraints. N = max([qi.degree(DE.t) for qi", "DE.d.degree() >= 2)): return prde_no_cancel_b_small(b, q, n, DE) elif (DE.d.degree()", "# Normalization: a = 1. a = a.LC() b, q", "Au = A.row_join(u) Au = Au.rref(simplify=cancel, normalize_last=False)[0] # Warning: This", "is the logarithmic derivative of a k(t)-radical return None if", "m)) has a solution p of degree <= n in", "DE.t), Yd.monic() return Y, C def parametric_log_deriv_heu(fa, fd, wa, wd,", "['primitive', 'base']: B = ba.quo(bd) return (a, B, G, Poly(1,", "or v.is_zero: return None return (Q*N, Q*M, v) if p.degree(DE.t)", "constant field that is identically zero, but cannot # be", "a solution p of degree <= n in k[t] with", "Q]) M = Matrix(dc + 1, m, lambda i, j:", "# if and only y0 = Sum(dj*fj, (j, 1, r))", "f - Dg will be in k[t] if f is", "for pi in p]) if not all([ri.is_zero for ri in", "k<t>, G = [g1, ..., gm] in k(t)^m, and for", "bd, G, DE) # Solutions p in k[t] of A*Dp", "problem exists in any # algorithm that uses rref()). #", "also ======== is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical \"\"\" # Compute Df/f dfa, dfd", "(k, 1, v)). # Collect solution components. h = f", "most difficult # cases. return A def is_deriv_k(fa, fd, DE):", "extension over C(x), then the cardinality of L_K/C(x) U E_K/C(x)", "..., qm] in k[t]^m and a matrix M with entries", "as # risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x). # But", "deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in", "as those of # (a/d)*Dp + (b/d)*p = Sum(dj*rj) #", "= Const(K), L_K/C(x) = { i in {1, ..., n}", "hr] in k(t)^r and a matrix A with m +", "wd, DE, c1=None): \"\"\" Parametric logarithmic derivative heuristic. Given a", "A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0. \"\"\" m", "arguments of one exponential from the other. Therefore, it is", "version of step 1 & 2 for the limited integration", "- mu) else: # TODO: implement this raise NotImplementedError V", "that S1irr == Sirr. Furthermore, it will automatically call bound_degree()", "qi in Q] f, B = param_rischDE(ba, bd, Q0, DE)", "= Sum(dj*Sum(aji*qi)) # are the same as those of #", "return (A, B, G, h) def prde_linear_constraints(a, b, G, DE):", "Const(k) = Const(k0) # such that Dy0 + b*y0 =", "is useful for seeing exactly what elements of k(t) produce", "up Sum(djn*(D(fjn*t^n) - b*fjnt^n)) Fi[j] = -(derivation(hji, DE) - b*hji)", "parametric_log_deriv(alphaa, alphad, etaa, etad, DE) if A is not None:", "(fa*l.quo(fd)).div(z) # (l*f).div(z) u2, r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z) eqs", "range(A.rows): if A[i, j].has(*DE.T): # This assumes that const(F(t0, ...,", "with exact quotient Sum(aji*qqi). # Sum(ci*qi) is divisible by d", "range(r)]) y_num, y_den = y.as_numer_denom() Ya, Yd = Poly(y_num, DE.t),", "n=5. At least for prde_spde, it will always # terminate", "one of {'primitive', 'exp', 'tan', \" \"'base', 'auto'}, not %s\"", "(i, 1, m)))/a has degree at most n1 and satisfies", "have not yet been implemented\") # else: deg(a) > 0", "--- t i in L i in E i K/C(x)", "Note: See comment in constant_system # Also note: derivation(basic=True) calls", "k. f = [Poly(1, t, field=True)] # r = 1", "are tuples (a, d) with a and d in k[t].", "# No constraints. return q, M def constant_system(A, u, DE):", "in k[t]. \"\"\" m = len(G) q, (fa, fd) =", "constraints. return q, M def constant_system(A, u, DE): \"\"\" Generate", "key, value in bd.items()] bd_real = sum(r for r in", "We try n=5. At least for prde_spde, it will always", "a derivation D in k[t], a, b in k[t] relatively", "n and Dq + b*q == Sum(ci*qi, (i, 1, m))", "a matrix with # m + r columns and entries", "cancel=True) wa, wd = frac_in((wa, wd), DE.t) A = parametric_log_deriv(pa,", "= A elif case == 'base': # TODO: we can", "able to completely decide these # problems in the integration", "DE): \"\"\" Parametric Poly Risch Differential Equation - No cancellation:", "DE.indices('log')]) const = cancel(fa.as_expr()/fd.as_expr() - Add(*[Mul(i, j/n) for i, j", "in range(A.rows): if A[i, j].has(*DE.T): # This assumes that const(F(t0,", "B = B.row_join(zeros(B.rows, m)) C = I.row_join(zeros(m, r)).row_join(-I) return f", "# No constraints. N = max([qi.degree(DE.t) for qi in q])", "= B.row_join(zeros(B.rows, m)) C = I.row_join(zeros(m, r)).row_join(-I) return f +", "on k[t], a, b, in k[t] with gcd(a, b) ==", "answer u such that n*f == Du/u. exp(f) will be", "divisible by d if and only if M*Matrix([f1, ..., fm])", "case s is True and the solutions in C of", "a matrix M with entries in k such that Sum(ci*pi,", "for betai, ri in zip(beta, r)] alpha *= a #", "[None]*ri, [None]*ri # from eq. on top of p.238 (unnumbered)", "return None # Note: if residueterms = [], returns (1,", "no solution exists, or returns a solution (n, m, v)", "# a*Dp + b*p = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) # are", "the set of all indices of logarithmic monomials of K", "'exp', 'tan']: hs = reduce(lambda i, j: i.lcm(j), (ds,) +", "cm in k, is divisible by d if and only", "of p.238 (unnumbered) for j in range(ri): hji = fi[j]*DE.t**i", "crop up if the integral explicitly contains an # expression", "import solve def prde_normal_denom(fa, fd, G, DE): \"\"\" Parametric Risch", "Q, n, DE): \"\"\" Parametric Poly Risch Differential Equation -", "-1, -1): # [n, ..., 0] for i in range(m):", "f, g1, ..., gn in k(t), return (a, b, h,", "a matrix with m columns and entries in Const(k). #", "len(G) Gns, Gds = list(zip(*G)) d = reduce(lambda i, j:", "= a*pN B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN G", "solution components. h = f + [alpha*gk for gk in", "= splitfactor(l, DE) z = ls*ln.gcd(ln.diff(DE.t)) if not z.has(DE.t): #", "one exponential from the other. Therefore, it is necessary to", "1, m)). Furthermore, if S1irr == Sirr, then p is", "if A is None: return None n, e, u =", "+ i*derivation(DE.t, DE)/DE.t, DE.t, field=True) with DecrementLevel(DE): Qy = [frac_in(q.nth(i),", "f*y == 0 # is solvable in k(t}. The corresponding", "D on k[t], a, b, in k[t] with gcd(a, b)", "DE) - fa*derivation(fd, DE)).cancel(fd**2, include=True) else: dfa, dfd = fa,", "const is such that log(const) + f == u. This", "pn # (a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N - n), ...,", "where qqi = qi.quo(d). # M is a matrix with", "for qi in Q): dc = -1 M = Matrix()", "residueterms]) return (n, u) elif case == 'tan': raise NotImplementedError(\"The", "Sum(ci*wi, (i, 1, m)), p = v*h is in k<t>,", "over 'k' (not k[t]) if DE.case == 'primitive': with DecrementLevel(DE):", "- A[s, j]*u[m+1 u.row_op(s, lambda r, jj: cancel(r - Asj*um1))", "Sum(ci*Gi) correspond # to solutions q = p/hs of the", "and \"in_field\" with the function name is used to indicate", "j: i.lcm(j), Gds) d = Poly(d, field=True) Q = [(ga*(d).quo(gd)).div(d)", "= Da_i, for some a_i in C(x)(t_1, ..., t_i-1) }", "% 4 == 0 else -value if key[0] % 4", "q.degree(DE.t)) if q.degree(DE.t) > B: eqs = [p.nth(i) - c1*q.nth(i)", "DE.t) A = parametric_log_deriv(pa, pd, wa, wd, DE) if A", "not None: Q, m, z = A if Q ==", "fa, fd = fa.cancel(fd, include=True) # f must be simple", "limited integration problem as a # parametric Risch DE problem", "C(x)), and E_K/C(x) = { i in {1, ..., n}", "# where rj = Sum(aji*qi) (j = 1, ..., u)", "(bd_real*bd_real + bd_imag*bd_imag).as_poly(gen) return (ba[0], ba[1], bd) def prde_special_denom(a, ba,", "list(zip(*E)) c = reduce(lambda i, j: i.lcm(j), (dn,) + En)", "constraints on d1. # Coefficients of t^j (j > 0)", "So far, all the above are also nonlinear or Liouvillian,", "+ (b/d)*p = Sum(dj*rj) correspond to # solutions alpha*p +", "Q = [q1, ..., qm] in k[t]^m and a matrix", "u = constant_system(M, zeros(dc + 1, 1), DE) c =", "# correspond to solutions y = z/q of the original", "cancel() return None else: if not all(i.is_Rational for i in", "in [j for _, j in residueterms]] + [n], S(1))", "DE)/DE.t, DE.t, field=True) with DecrementLevel(DE): Qy = [frac_in(q.nth(i), DE.t, field=True)", "# V = [v1, ..., vu] where each vj is", "sympy.core import Dummy, ilcm, Add, Mul, Pow, S from sympy.core.compatibility", "in u) or not A: # If the elements of", "Du/u. exp(f) will be the same as u up to", "a != 0, and gcd(a, t) == 1 (resp. gcd(a,", "there exist n in ZZ and u in k(t) with", "1) == E*exp(x) satisfy Dt == t. Therefore, the term", "for some a_i in C(x)(t_1, ..., t_i-1)* } (i.e., the", "1) != x + 1 # Issue 10798: i need", "DE.t)]*m for N in range(n, 0, -1): # [n, ...,", "try: # We try n=5. At least for prde_spde, it", "such that Df/f == Du. log(f) will be the same", "next loop instead of Q it has # to be", "a = Db. Either returns (ans, u), such that Df/f", "for i in u): # TODO: But maybe we can", "in k(t), and G = [G1, ..., Gm] in k(t)^m,", "(ans, u), such that Df/f == Du, or None, which", "vj is a column matrix with # entries aj1, ...,", "part of the numerator ba[1] is the imaginary part and", "..., cm, d1, ..., dr]]).T == 0. \"\"\" m =", "g, DE) # q = [q1, ..., qm] where qi", "derivative of a k(t)-radical. case is one of {'primitive', 'exp',", "return None n, u = A elif case == 'base':", "# TODO: we can use more efficient residue reduction from", "non-constant coefficient, in which case s is False Ax ==", "= [frac_in(qi.TC(), t0, field=True) for qi in Q] f, B", "DE.d.degree() - 1 and n > -b.as_poly().LC()/DE.d.as_poly().LC()): raise NotImplementedError(\"prde_no_cancel_b_equal() is", "Sum(dj*aji). try: # We try n=5. At least for prde_spde,", "derivation(DE.t, DE).degree(DE.t) - 1) C = max(p.degree(DE.t), q.degree(DE.t)) if q.degree(DE.t)", "To handle the case where we are given Df, not", "with the very similar special_denom() in rde.py if case ==", "pN = p**N pn = p**-n # This is 1/h", "V[0]/(-c0) r = len(h) m = len(v) - r -", "if DE.case != 'base': with DecrementLevel(DE): t0 = DE.t #", "case == 'primitive': with DecrementLevel(DE): pa, pd = frac_in(p, DE.t)", "It isn't too worrisome, because the heuristic handles most difficult", "says that for any f in K, Df is the", "See also ======== is_log_deriv_k_t_radical_in_field, is_deriv_k \"\"\" H = [] if", "# Could return A, but this has # the minimum", "Const(k0) # such that Dy0 + b*y0 = Sum(ci*qi, (i,", "hi = [None]*ri, [None]*ri # from eq. on top of", "in range(A.cols): for i in range(A.rows): if A[i, j].has(*DE.T): #", "of a*Dp + b*p = Sum(ci*qi) correspond to # solutions", "raises either NotImplementedError, in which case the heuristic failed, or", "and either D == d/dt or deg(b) > max(0, deg(D)", "by d if and only if M*Matrix([f1, ..., fm]) ==", "(ba_imag*bd_real - ba_real*bd_imag).as_poly(gen)) bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen) return (ba[0],", "fa/fd, fd is square-free, deg(fa) < deg(fd), and # gcd(fa,", "with gcd(a, b) == 1, and G = [g1, ...,", "generate the space of those # constant families (c1, ...,", "k, deg(qi) < deg(Dt) t = DE.t if DE.case !=", "minimum number of rows. Mqq = Matrix([qq]) # A single", "Poly(DE.t, DE.t) elif case == 'tan': p = Poly(DE.t**2 +", "y in K(t) and c1, ..., cm in Const(K) such", "bd*a, DE.t) betad = alphad etaa, etad = frac_in(dcoeff, DE.t)", "+ 1, DE.t)) with DecrementLevel(DE): # We are guaranteed to", "is used both in solving parametric problems and in determining", "function for more information. \"\"\" from __future__ import print_function, division", "case) else: raise ValueError(\"case must be one of {'primitive', 'exp',", "== 1), return the tuple (A, B, GG, h) such", "updated to call bound_degree() # as per the docstring of", "None Q, v = Qv if Q.is_zero or v.is_zero: return", "0 and either D == d/dt or deg(b) > max(0,", "that constant. argterms = ([DE.extargs[i] for i in DE.indices('exp')] +", "Z = list(zip(*[gcdex_diophantine(b, a, qi) for qi in Q])) A", "/ i i / i --- = --. --- ---", "..., enm) hn = c.gcd(c.diff(DE.t)) a = hn b =", "in k(t), and a hyperexponential monomial theta over k(t), raises", "for some d1, ..., du in Const(k). # In that", "derivative of an element of k(t) if there exists b", "u]) u *= n terms = ([DE.T[i] for i in", "= A.nullspace() # V = [v1, ..., vu] where each", "Solutions p of a*Dp + b*p = Sum(ci*qi) correspond to", "+ wl[-v:] for wl in W]) # excise dj's. N", "part of the denominator. Given a derivation D on k[t]", "Ai.col_join(M.row_join(zeros(M.rows, ri))) Fi, hi = [None]*ri, [None]*ri # from eq.", "max([ri.degree() for ri in r]) M = Matrix(n + 1,", "- min(0, nb)) if not nb: # Possible cancellation. if", "== 'exp': wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True) with", "r[j].nth(i)) else: M = Matrix(0, m, []) # No constraints.", "the partial fraction expansion of gi. # M is a", "value in bd.items()] bd_real = sum(r for r in denom_real)", "min([order_at(Ga, p, DE.t) - order_at(Gd, p, DE.t) for Ga, Gd", "case == 'base': # TODO: we can use more efficient", "common_denom != n*m: # Verify exact division raise ValueError(\"Inexact division\")", "logarithmic derivative of a k(t)-radical. ans is a list of", "k with c1, ..., cm in Const(k) # if and", "(A, B, G, h) def prde_linear_constraints(a, b, G, DE): \"\"\"", "The solutions of the original equation are then # Sum(dj*fj,", "in k<t> of a*Dq + b*q = Sum(ci*Gi) correspond #", "reduce(lambda i, j: i.lcm(j), (ds,) + Es) # lcm(ds, es1,", "division from sympy.core import Dummy, ilcm, Add, Mul, Pow, S", "such that a, b, h in k[t], N is a", "i, j in ans]) == u. This is useful for", "this case? raise NotImplementedError(\"Nonelementary extensions not supported \" \"in the", "# if and only if M*Matrix([c1, ..., cm]) == 0,", "n), p**-n) return (A, B, G, h) def prde_linear_constraints(a, b,", "= A.gcd(B) a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True)", "0: M = Matrix(d, m, lambda i, j: Q[j].nth(i +", "gd in G] return (a, b, a, N, (a*hn*fa).cancel(fd, include=True),", "Const_D(K) == Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i", "a k(t)-radical. b in k(t) can be written as the", "% case) nb = order_at(ba, p, DE.t) - order_at(bd, p,", "expansion of gi. # M is a matrix with m", "def parametric_log_deriv_heu(fa, fd, wa, wd, DE, c1=None): \"\"\" Parametric logarithmic", "in k(t) with n, u != 0 such that n*f", "in V] # [f1, ..., fu] # # Solve the", "+ En) # lcm(dn, en1, ..., enm) hn = c.gcd(c.diff(DE.t))", "-(derivation(hji, DE) - b*hji) H += hi # in the", "remainder is zero # for c1, ..., cm in Const(k)", "[Pow(b, e*j) for b, e in dterms]))) const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld))", "Const(k). # In that case, solutions of # a*Dp +", "import Dummy, ilcm, Add, Mul, Pow, S from sympy.core.compatibility import", "Matrix([beta]) f = [(Mbeta*vj)[0] for vj in V] # [f1,", "to NotImplementedError. raise ValueError(\"The %s case is not supported in", "# cases. return A def is_deriv_k(fa, fd, DE): r\"\"\" Checks", "= max(0, -nb) pN = p**N pn = p**-n #", "b is in k, deg(qi) < deg(Dt) t = DE.t", "in q] if not b.is_zero and (DE.case == 'base' or", "include=True) # Our assumption here is that each monomial is", "a = 1. a = a.LC() b, q = b.quo_ground(a),", "[r1.nth(i) - c1*r2.nth(i) for i in range(z.degree(DE.t))] s = solve(eqs,", "Sum(ei*hi, (i, 1, m)), # where ei == ci (i", "Gds) d = Poly(d, field=True) Q = [(ga*(d).quo(gd)).div(d) for ga,", "a list of terms on the right hand side of", "V[0][0] # v = [-1, c1, ..., cm, d1, ...,", "either D == d/dt or deg(D) >= 2, returns h1,", "+ f == u. This is calculated by dividing the", "u = constant_system(lhs, rhs, DE) if not all(derivation(i, DE, basic=True).is_zero", "if and only if there are ri in QQ such", "extensions not supported \" \"in the structure theorems.\") E_part =", "Sum(dj*fj) where # fj = Sum(aji*betai). Mbeta = Matrix([beta]) f", "for i in range(n, -1, -1): if DE.case == 'exp':", "beta = 1, [0]*m while n >= 0: # and", "result, n, const) def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None): \"\"\"", "# Warning: This will NOT return correct results if cancel()", "1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \\ for i in range(r)]) y_num, y_den", "with deg(a) > 0 and gcd(a, b) == 1, return", "1, and G = [g1, ..., gm] in k(t)^m, return", "if case == 'exp': wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t),", "used. The argument w == Dtheta/theta \"\"\" # TODO: finish", "+ n*a*Dp/p)*p**N, g1*p**(N - n), ..., gm*p**(N - n), p**-n)", "q, n, DE) elif ((b.is_zero or b.degree() < DE.d.degree() -", "a polynomial solution # only if the sum is in", "or returns a solution (n, m, v) of the equation", "correspond to # solutions alpha*p + Sum(ci*betai) of the initial", "lcm, cancel, sqf_list from sympy.polys.polymatrix import PolyMatrix as Matrix from", "field=True) for qi in Q] f, B = param_rischDE(ba, bd,", "a, b relatively prime a, b, q, r, n =", "len(G) q, (fa, fd) = weak_normalizer(fa, fd, DE) # Solutions", "(k = 1, ..., m + u + v) in", "Sum(fi*qi, (i, 1, m)), where f1, ..., fm are elements", "..., hr] in k(t)^r and a matrix A with m", "= [splitfactor(gd, DE) for _, gd in G] En, Es", "== 'exp': dcoeff = DE.d.quo(Poly(DE.t, DE.t)) with DecrementLevel(DE): # We", "in residueterms] m = common_denom//n if common_denom != n*m: #", "denom_real = [value if key[0] % 4 == 0 else", "# in k[t] if and only if p = Sum(ek*gk)", "Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN G = [(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd", "means that Df cannot be written as the logarithmic derivative", "= splitfactor(gd, DE) p = dn.gcd(en) h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t))) a", "list(zip(*roots)) or [[], []] # Note: this might be empty,", "then this will need to be updated to call bound_degree()", "TODO: we can use more efficient residue reduction from ratint()", "a bug. return None roots = [(i, i.real_roots()) for i,", "supported in this function.\" % case) else: raise ValueError(\"case must", "not have problems, # because case != 'base'. betaa, alphaa,", "there should be an option to continue # anyway, even", "= Da_i/a_i, for some a_i in C(x)(t_1, ..., t_i-1)* }", "heuristic handles most difficult # cases. return A def is_deriv_k(fa,", "an # expression in the constant field that is identically", "derivative of an element of k(t). a in k(t) is", "= max([qi.degree(DE.t) for qi in Q]) if d > 0:", "use the full method. # TODO: This could be implemented", "p**-n) return (A, B, G, h) def prde_linear_constraints(a, b, G,", "G, h) def prde_linear_constraints(a, b, G, DE): \"\"\" Parametric Risch", "of (a/d)*Dp + (b/d)*p = Sum(dj*rj) correspond to # solutions", "in k(t}. The corresponding solutions are # y = Sum(blk'*hk,", "if f is the logarithmic derivative of a k(t)-radical return", "return None return (Q*N, Q*M, v) def parametric_log_deriv(fa, fd, wa,", "== u. This is calculated by dividing the arguments of", "the result might potentially be wrong. raise NotImplementedError(\"Cannot work with", "the # roots of the resultant must be rational numbers.", "..., cm in Const(k) and q in k[t] of degree", "> B: return None c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC()) l =", "[r1, ..., rm], such that for any solution c1, ...,", "ar in Const(k) and # B*Matrix([c1, ..., cm, d1, ...,", "we can tell if they're not rational, like # log(2)/log(3).", "Parametric Risch Differential Equations parallel those for solving Risch Differential", "in ZZ, and b, q1, ..., qm in k[t] with", "example, both log(x) and log(2*x) == log(x) + log(2) satisfy", "Dt == a*t + b with for some a, b", "i, j in residueterms]) return (n, u) elif case ==", "in range(z.degree(DE.t))] s = solve(eqs, c1) if not s or", "and only if there are ri in QQ such that::", "0. V = A.nullspace() # V = [v1, ..., vu]", "\"\"\" # Compute Df/f dfa, dfd = (fd*derivation(fa, DE) -", "else: # Base case. Dy == 0 for all y", "h) def real_imag(ba, bd, gen): \"\"\" Helper function, to get", "of tuples such that Mul(*[i**j for i, j in ans])", "Matrix([ni[:] for ni in N]) # rows n1, ..., ns.", "DE) # A is a matrix with m columns and", "derivation D in k(t), f in k(t), and G =", "m)), where f1, ..., fm are elements of k, is", "careful with the sorts of expressions that # appear in", "def prde_spde(a, b, Q, n, DE): \"\"\" Special Polynomial Differential", "cumulating coefficient # and terms for the recovery of original", "[Poly(0, DE.t)]*m for N in range(n, -1, -1): # [n,", "DE.cases if i == 'primitive']) - set(DE.indices('log'))): raise NotImplementedError(\"Real version", "should be able to completely decide these # problems in", "K over C(x). Furthermore, because Const_D(K) == Const_D(C(x)) == C,", "approach will need to be used. The argument w ==", "solution of Ax == 0. Elements of k(t) are tuples", "of expressions that # appear in his integrand in the", "the # functions given when solving the parametric logarithmic #", "in Const(k) if and only if p = Sum(dj*hj, (j,", "the logarithmic derivative of a K-radical if and only if", "for i in u]) u *= n terms = ([DE.T[i]", "from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel,", "== 0. # Dy + b*y = Sum(ci*qi) is solvable", "like sqrt(x**2) != x # and also sqrt(x**2 + 2*x", "denominator of the rational function. \"\"\" bd = bd.as_poly(gen).as_dict() ba", "in k[t]^m and d in k[t], return q = [q1,", "and p in k[t] of a*Dp + b*p == Sum(ci*gi,", "A, _ = constant_system(M, zeros(M.rows, 1), DE) # A is", "m = len(G) q, (fa, fd) = weak_normalizer(fa, fd, DE)", "has a solution p of degree <= n in k[t]", "pass them as indices to D (or T). E_args are", "B: return None c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC()) l = fd.monic().lcm(wd.monic())*Poly(c,", "prde_linear_constraints(a, b, G, DE): \"\"\" Parametric Risch Differential Equation -", "None else: # we can take any vector from V,", "never happen for the # functions given when solving the", "- derivation(ri, DE) for ri, zi in zip(R, Z)] R", "as per the docstring of this function (DE.case == 'other_linear').", "Differential Equations. The methods used for solving Parametric Risch Differential", "ZZ with n != 0. If this heuristic fails, the", "# derivative problem when integration elementary functions (see # Bronstein's", "ri in r]) M = Matrix(n + 1, m, lambda", "sum is Sum(ci*qi). ## Reduce number of constants at this", "k. # Sum(fi*qi, (i, 1, m)), where f1, ..., fm", "Q]): N = max([ri.degree(DE.t) for _, ri in Q]) M", "k = k0(t0) ba, bd = frac_in(b, t0, field=True) Q0", "ba, bd, G, DE, case='auto'): \"\"\" Parametric Risch Differential Equation", "== 'exp': # this re-checking can be avoided with DecrementLevel(DE):", "db = b.degree(DE.t) m = len(Q) H = [Poly(0, DE.t)]*m", "only if y = Sum(dj*hj, (j, 1, r)) where d1,", "part of a rational function evaluated at sqrt(-1) without actually", "entries aj1, ..., ajm in Const(k). # Sum(aji*gi) is in", "k[t]. if not V: # No non-trivial solution return [],", "the type of the derivation automatically. See also ======== is_log_deriv_k_t_radical,", "(i, 1, n)) \"\"\" fa, fd = fa*Poly(1/fd.LC(), DE.t), fd.monic()", "real part of the numerator ba[1] is the imaginary part", "non-Liouvillian, which for the transcendental case, implies that Dt ==", "function (DE.case == 'other_linear'). N = hn.degree(DE.t) + hs.degree(DE.t) +", "Add(*[Mul(i, j/n) for i, j in zip(argterms, u)])) return (ans,", "similar special_denom() in rde.py if case == 'auto': case =", "that n*b == Du/u. Either returns (ans, u, n, const)", "take any vector from V, we take V[0] c0 =", "u in k(t) with n, u != 0 such that", "reduce(ilcm, [i.as_numer_denom()[1] for i in u]) u *= n terms", "= len(q) if n < 0: # Only the trivial", "= constant_system(lhs, rhs, DE) if not all(derivation(i, DE, basic=True).is_zero for", "ba_real = sum(r for r in num_real) ba_imag = sum(r", "DE) - b*sitn if b.degree(DE.t) > 0: for i in", "more efficient residue reduction from ratint() if not fd.is_sqf or", "== 0 # There are no constraints on d1. #", "because log(2) is constant. Therefore, the term const is returned.", "== C, deg(Dt_i) == 1 when t_i is in E_K/C(x)", "vectors (bl1, ..., blm) generate the space of those #", "# and also sqrt(x**2 + 2*x + 1) != x", "= Sum(dj*fj, (j, 1, r) + Sum(ei*hi, (i, 1, m)),", "= [q1, ..., qm] in k[t]^m and a matrix M", "= A u *= DE.t**e elif case == 'primitive': with", "with coefficients in Const(k) such that if c1, ..., cm", "b + derivation(a, DE) Qq = [zi - derivation(ri, DE)", "j in residueterms]] + [n], S(1)) residueterms = [(i, j*common_denom)", "= b.quo_ground(a), [qi.quo_ground(a) for qi in q] if not b.is_zero", "(i, 1, m)). Because M has entries in k(t), and", "for i in DE.cases if i == 'tan'] or \\", "if not z.has(DE.t): # TODO: We treat this as 'no", "for the transcendental case, implies that Dt == a*t +", "and deg(Dt_i) == 0 when t_i is in L_K/C(x), implying", "(DE.case == 'other_linear'). N = hn.degree(DE.t) + hs.degree(DE.t) + max(0,", "DecrementLevel(DE): Qy = [frac_in(q.nth(i), DE.t, field=True) for q in Q]", "Df / i i / i --- = --. ---", "should really be done in this case? raise NotImplementedError(\"Nonelementary extensions", "the limited integration problem. Given a derivation D on k(t)", "the above are also nonlinear or Liouvillian, but if this", "Liouvillian cases if DE.case == 'primitive' or DE.case == 'exp':", "constant families (c1, ..., cm) for which a solution of", "only if p = Sum(ek*hk) where e1, ..., ev are", "== 'auto': case = DE.case if case == 'exp': wa,", "bound_degree) from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation,", "at sqrt(-1) without actually evaluating it at sqrt(-1) Separates the", "(resp. gcd(a, t**2 + 1) == 1), return the tuple", "a Matrix with coefficients in C and v is a", "cases. return A def is_deriv_k(fa, fd, DE): r\"\"\" Checks if", "place Q = Q + Fi return (H, M) def", "Because Poly does not play well with Matrix yet, this", "if i == 'tan'] or \\ (set([i for i in", "+ u + v columns. A = -eye(m) for vj", "solutions of a parametric Risch differential equation. Given a derivation", "n, s = splitfactor(fd, DE) if not s.is_one: pass z", "{'primitive', 'exp', 'tan', \" \"'base', 'auto'}, not %s\" % case)", "(fa, fd) in f into constant # polynomials fa/fd in", "k[t]. \"\"\" m = len(G) q, (fa, fd) = weak_normalizer(fa,", "the given field not in some (possibly unspecified extension) and", "r * i Df / i i / i ---", "ilcm, Add, Mul, Pow, S from sympy.core.compatibility import reduce, range", "G])) # So far, all the above are also nonlinear", "y in k(t) with c1, ..., cm in Const(k) if", "i need not be a polynomial i, d = i.as_numer_denom()", "'tan', 'primitive'} for the hyperexponential, hypertangent, and primitive cases, respectively.", "(DE.case == 'base' or DE.d.degree() >= 2)): return prde_no_cancel_b_small(b, q,", "derivative in the base case if and only if #", "recursively transcendental if len(DE.exts) != len(DE.D): if [i for i", "only if M*Matrix([c1, ..., cm]) == 0, # in which", "coefficients will fall into this class). Furthermore, (I believe) this", "= -1 M = zeros(0, 2) else: dc = max([qi.degree(DE.t)", "case the quotient is Sum(fi*qqi). A, _ = constant_system(M, zeros(M.rows,", "d1, ..., du in Const(k). # In that case, #", "A, D in G] return (a, (ba, bd), G, h)", "residueterms], S(1)) u = Mul(*[Pow(i, j*n) for i, j in", "reduce(lambda i, j: i.lcm(j), Gds) d = Poly(d, field=True) Q", "field (K, D) with constant field C = Const(K), a", "vectors generating the space of linear relations between # c1,", "!= 'base'. betaa, alphaa, alphad = real_imag(ba, bd*a, DE.t) betad", "m + u + v) in Const(k). # The vectors", "for gia, gid in G] # a*Dp + b*p =", "all constant # Note: See comment in constant_system # Also", "y = Sum(ek*hk, (k, 1, v))/gamma. ## Build combined relation", "range(len(H)) for i in residues[j]] # TODO: finish writing this", "recognize_log_derivative(2*betaa, betad, DE): A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE)", "that Df/f == Du, or None, which means that Df/f", "part is always computed, this function calls the more general", "that the special part is always computed, this function calls", "be Q + Fi taking its place Q = Q", "which case the heuristic failed, or returns None, in which", "- 1)/(N*DE.d.LC()) sitn = Poly(si*DE.t**N, DE.t) H[i] = H[i] +", "list(zip(*[gcdex_diophantine(b, a, qi) for qi in Q])) A = a", "solution exists, or returns a solution (n, m, v) of", "\"\"\" Parametric Risch Differential Equation - Normal part of the", "case the solutions are # y = d1*f1 for f1", "qqm] where qqi = qi.quo(d). # M is a matrix", "function (rational function # coefficients will fall into this class).", "a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of", "else: n = reduce(ilcm, [i.as_numer_denom()[1] for i in u]) u", "and E_K/C(x) = { i in {1, ..., n} such", "set of all indices of hyperexponential monomials of K over", "m+1] Asj = A[s, j] A.row_op(s, lambda r, jj: cancel(r", "parametric_log_deriv(betaa, betad, etaa, etad, DE) if A is not None", "n in k[t] with c1, ..., cm in Const(k) if", "if Q == 1: n = min(n, m) elif case", "a k(t)-radical. ans is a list of tuples such that", "is False Ax == u has no constant solution. This", "in k(t)* and n, m in ZZ with n !=", "fd.is_sqf or fa.degree() >= fd.degree(): # f is the logarithmic", "anyway, even if the result might potentially be wrong. raise", "on k[t] and f, g1, ..., gm in k(t) with", "k[t], n in ZZ, and b, q1, ..., qm in", "derivation D on k[t], n in ZZ, and b, q1,", "# A*Matrix([c1, ..., cm]) == 0 and # B*Matrix([c1, ...,", "k[t], a, b in k[t] relatively prime, and q =", "# I will have to verify, but I believe that", "K, returns the tuple (B, v, s), where B is", "useful for seeing exactly which elements of k(t) produce u.", "that for any f in K, Df/f is the derivative", "d with exact quotient Sum(aji*qqi). # Sum(ci*qi) is divisible by", "include=True) else: dfa, dfd = fa, fd # Our assumption", "r * Dt + \\ r * i / i", "# exp(f) will be the same as result up to", "== u. This is useful for seeing exactly which elements", "1, return (A, B, Q, R, n1), with Qq =", "actually evaluating it at sqrt(-1) Separates the even and odd", "is linear and non-Liouvillian, which for the transcendental case, implies", "E*exp(x) satisfy Dt == t. Therefore, the term const is", "= Matrix(N + 1, m, lambda i, j: Q[j][1].nth(i)) else:", "equation (i.e., gi in k(t)), and Q is a list", "else: raise ValueError(\"case must be one of {'primitive', 'exp', 'tan',", "+ f*y == Sum(ci*gi, (i, 1, m)), and to find", "for ri in r]) M = Matrix(n + 1, m,", "for qi in Q] f, B = param_rischDE(ba, bd, Q0,", "A temporary bound is set. Eventually, it will be removed.", "results if cancel() cannot reduce # an identically zero expression", "of a k(t)-radical. return None Q, v = Qv if", "A, u = constant_system(lhs, rhs, DE) if not all(derivation(i, DE,", "Dq + b*q == Sum(ci*qi, (i, 1, m)), then q", "# anyway, even if the result might potentially be wrong.", "j] A.row_op(s, lambda r, jj: cancel(r - Asj*Rm1[jj])) # u[s]", "m)) B = B.row_join(zeros(B.rows, m)) C = I.row_join(zeros(m, r)).row_join(-I) return", "i, j in ans]) # exp(f) will be the same", "-1): # [n, ..., 1] for i in range(m): si", "solution of Mx == 0, and p and the ci", "\"\"\" H = [] # Why use DecrementLevel? Below line", "solution p of degree <= n # in k[t] if", "can solve such problems over 'k' (not k[t]) if DE.case", "d) with a and d in k[t]. \"\"\" m =", "solution return [], eye(m) Mq = Matrix([q]) # A single", "# for c1, ..., cm in Const(k) if and only", "p**-n # This is 1/h A = a*pN B =", "= [(H[j][1].subs(z, i), i) for j in range(len(H)) for i", "return [], A if a.is_ground: # Normalization: a = 1.", "prime a, b, q, r, n = prde_spde(a, b, q,", "= [(c*A).cancel(D, include=True) for A, D in G] return (a,", "aj1, ..., ajm in Const(k). # Sum(aji*gi) is in k[t]", "be a polynomial i, d = i.as_numer_denom() icoeff, iterms =", "arguments of the hyperexponentials indexed by E_K (i.e., if i", "c0 = V[0][0] # v = [-1, c1, ..., cm,", "GG = [gg1, ..., ggm] in k(t)^m, and for any", "= Const(k0) # such that Dy0 + b*y0 = Sum(ci*qi,", "Qv if Q.is_zero or v.is_zero: return None return (Q*N, Q*M,", "\"'base', 'auto'}, not %s\" % case) common_denom = reduce(ilcm, [i.as_numer_denom()[1]", "dr] v = V[0]/(-c0) r = len(h) m = len(v)", "\\ r * i Df / i i / i", "k[t], an integer n, and a, b, q1, ..., qm", "this indicates a bug. return None roots = [(i, i.real_roots())", "M = poly_linear_constraints(q, d) # qq = [qq1, ..., qqm]", "m columns an entries in k. # Sum(fi*qi, (i, 1,", "of k(t) produce u. This function uses the structure theorem", "= reduce(lambda i, j: i.lcm(j), (ds,) + Es) # lcm(ds,", "Q])) A = a B = b + derivation(a, DE)", "h, A def param_rischDE(fa, fd, G, DE): \"\"\" Solve a", "= 1 and any d1 in Const(k) = k. f", "satisfy a*Dp + b*p == Sum(ci*qi, (i, 1, m)). Because", "equation with ci = Sum(dj*aji). try: # We try n=5.", "of K over C(x)). If K is an elementary extension", "docstring of this function (DE.case == 'other_linear'). N = hn.degree(DE.t)", "and only if p = Sum(ek*hk) where e1, ..., ev", "a*Dp + b*p = Sum(ci*qi, (i, 1, m)) has a", "= A.col_join(zeros(B.rows, m).row_join(B)) ## Eliminate d1, ..., du. W =", "'primitive': with DecrementLevel(DE): ba, bd = frac_in(b, DE.t, field=True) for", "the term const is returned. const is such that log(const)", "with DecrementLevel(DE): t0 = DE.t # k = k0(t0) ba,", "i in range(B + 1, C + 1)] s =", "[p1, ..., pm] in k[t]^m and d in k[t], return", "for vj in V] # [r1, ..., ru] # Solutions", "j) for i, j in ans]) # exp(f) will be", "hypertangent) case, given a derivation D on k[t] and a", "b*q == Sum(ci*gi, (i, 1, m)), p = (q -", "f had better be 0 in that case. n =", "if [i for i in DE.cases if i == 'tan']", "matrix with # entries aj1, ..., ajm in Const(k). #", "rm], such that for any solution c1, ..., cm in", "d. qq, M = poly_linear_constraints(q, d) # qq = [qq1,", "Mqq = Matrix([qq]) # A single row. r = [(Mqq*vj)[0]", "-sum([v[m + 1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \\ for i in range(r)])", "gd, DE.t) for ga, gd in G])) # So far,", "order_at_oo, weak_normalizer, bound_degree) from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation, residue_reduce,", "Eventually, it will be removed. # the currently added test", "= alphad etaa, etad = frac_in(dcoeff, DE.t) if recognize_log_derivative(2*betaa, betad,", "M, N = s[c1].as_numer_denom() nfmwa = N*fa*wd - M*wa*fd nfmwd", "1) C = max(p.degree(DE.t), q.degree(DE.t)) if q.degree(DE.t) > B: eqs", "include=True) for ga, gd in G] a, (ba, bd), G,", "dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2, include=True) else: dfa,", "sitn Q[i] = Q[i] - derivation(sitn, DE) - b*sitn if", "says that for any f in K, Df/f is the", "is the derivative of a element of K if and", "= [] ld = [] for i, j in zip(argterms,", "these are supported by the structure theorems, change to NotImplementedError.", "This is 1/h A = a*pN B = ba*pN.quo(bd) +", "p = dn.gcd(en) h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t))) a = dn*h c", "These are equal to alpha*p + Sum(dj*fj) where # fj", "H, b = residue_reduce(fa, fd, DE, z=z) if not b:", "= Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj) # where rj =", "constants. Given a derivation D on k[t], a, b, in", "# Possible cancellation. if case == 'exp': dcoeff = DE.d.quo(Poly(DE.t,", "== 1 else -value if key[0] % 4 == 3", "of Basic expressions. \"\"\" m = len(G) Gns, Gds =", "None # Note: if residueterms = [], returns (1, 1)", "- set(DE.indices('log'))): raise NotImplementedError(\"Real version of the structure \" \"theorems", "[alpha*gk for gk in g] # Build combined relation matrix.", "= [r1.nth(i) - c1*r2.nth(i) for i in range(z.degree(DE.t))] s =", "too worrisome, because the heuristic handles most difficult # cases.", "in Const(k) and q in k<t> of a*Dq + b*q", "this algorithm will need to be extended to handle them.", "in ans]) argterms = ([DE.T[i] for i in DE.indices('exp')] +", "2 and b.degree() == DE.d.degree() - 1 and n >", "M, N = s[c1].as_numer_denom() nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd nfmwd", "def prde_no_cancel_b_small(b, Q, n, DE): \"\"\" Parametric Poly Risch Differential", "n1), with Qq = [q1, ..., qm] and R =", "DE problem Fa = Poly(0, DE.t) Fd = Poly(1, DE.t)", "approach, which says that for any f in K, Df/f", "k(t)-radical, then all the # roots of the resultant must", "return None n, e, u = A u *= DE.t**e", "taking its place Q = Q + Fi return (H,", "B*r == Sum(ci*ggi, (i, 1, m)). For case == 'primitive',", "the log of that constant. argterms = ([DE.extargs[i] for i", "def real_imag(ba, bd, gen): \"\"\" Helper function, to get the", "from sympy.polys import Poly, lcm, cancel, sqf_list from sympy.polys.polymatrix import", "a*Dp + b*p = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) # are the", "the initial # equation. These are equal to alpha*p +", "cancellation: deg(b) small enough. Given a derivation D on k[t],", "m) elif case == 'tan': dcoeff = DE.d.quo(Poly(DE.t**2 + 1,", "# [(a, i), ...], where i*log(a) is a term in", "all(i.is_Rational for i in u): # TODO: But maybe we", "DE) # g = [g1, ..., gv] in k[t]^v and", "cm in Const(k) if and only if y = Sum(dj*hj,", "and also sqrt(x**2 + 2*x + 1) != x +", "(I believe) this # problem will only crop up if", "that an integral is nonelementary (such as # risch_integrate(exp((sin(x)**2 +", "alpha*Sum(ek*gk, (k, 1, v)). # Collect solution components. h =", "= Sum(ek*hk, (k, 1, v))/gamma. ## Build combined relation matrix", "is the denominator of the rational function. \"\"\" bd =", "if key[0] % 4 == 0 else -value if key[0]", "that Dt == a*t + b with for some a,", "t f i in L i in E i K/C(x)", "C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly", "is a derivative of an element of K or the", "..., hr] in k[t]^r and a matrix A with m", "the initial equation with ci = Sum(dj*aji). try: # We", "nature, be computed recursively using this same function. Therefore, it", "are then # Sum(dj*fj, (j, 1, u)) + alpha*Sum(ek*gk, (k,", "will have to verify, but I believe that the answer", "of the denominator. case is one of {'exp', 'tan', 'primitive'}", "= reduce(ilcm, [i.as_numer_denom()[1] for i in u]) u *= n", "of u are not all constant # Note: See comment", "= (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2, include=True) else: dfa, dfd", "+ DE.d.degree(DE.t) - 1)/(N*DE.d.LC()) sitn = Poly(si*DE.t**N, DE.t) H[i] =", "= Ya*Poly(1/Yd.LC(), DE.t), Yd.monic() return Y, C def parametric_log_deriv_heu(fa, fd,", "Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)).", "A, _ = constant_system(M, zeros(M.rows, 1), DE) return [], A", "Merge this with the very similar special_denom() in rde.py if", "V: A = A.row_join(vj) A = A.row_join(zeros(m, len(h))) A =", "can be written as the logarithmic derivative of a k(t)-radical.", "= Q[i] - derivation(si, DE) - b*si if all(qi.is_zero for", "original equation. gamma = q G = [(q*ga).cancel(gd, include=True) for", "i, j: q[j].nth(i)) A, _ = constant_system(M, zeros(M.rows, 1), DE)", "*= hn A, B, G, hs = prde_special_denom(a, ba, bd,", "Matrix A, and a vector (Matrix) u with coefficients in", "0, and p and the ci satisfy a*Dp + b*p", "and p and the ci satisfy a*Dp + b*p ==", "nfmwd, DE) if Qv is None: # (N*f - M*w)", "integration # variable (the structure theorems should be able to", "= (fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd*fa dfa, dfd =", "fd, G, DE): \"\"\" Simpler version of step 1 &", "j in roots): # If f is the logarithmic derivative", "the ci satisfy a*Dp + b*p == Sum(ci*qi, (i, 1,", "term in the log-part of the integral # of f", "+ [DE.T[i] for i in DE.indices('log')]) ans = list(zip(terms, u))", "c1, ..., cm in k, is divisible by d if", "only if p = Sum(dj*hj, (j, 1, r)) where d1,", "cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld)) return (ans, result, const) def is_log_deriv_k_t_radical(fa, fd, DE, Df=True):", "p of degree <= n in k[t] with c1, ...,", "1) in k, sqrt(-1) not in k), a != 0,", "!= x # and also sqrt(x**2 + 2*x + 1)", "Q): dc = -1 M = zeros(0, 2) else: dc", "that no solution exists, or returns a solution (n, m,", "of gi. # M is a matrix with m columns", "== 'primitive' or DE.case == 'exp': return prde_cancel_liouvillian(b, q, n,", "as the logarithmic derivative of a k(t)-radical. case is one", "k(t) can be written as the logarithmic derivative of a", "elif case == 'primitive': with DecrementLevel(DE): pa, pd = frac_in(p,", "Df. --- --- t i in L i in E", "this raise NotImplementedError V = [(-a*hn*ga).cancel(gd, include=True) for ga, gd", "A.col_join(zeros(B.rows, m).row_join(B)) ## Eliminate d1, ..., du. W = A.nullspace()", "G, DE): \"\"\" Parametric Risch Differential Equation - Normal part", "in k (resp. Dt/(t**2 + 1) in k, sqrt(-1) not", "M has entries in k(t), and because Matrix doesn't play", "1 # Issue 10798: i need not be a polynomial", "to determine the type of the derivation automatically. See also", "None: # (N*f - M*w) is not the logarithmic derivative", "are Basic expressions. \"\"\" if not A: return A, u", "a*Dp + b*p == Sum(ci*gi, (i, 1, m)), (c1, ...,", "Build combined relation matrix with m + u + v", "a limitation in computer algebra in general, and implicit #", "is a solution of Mx = 0, in which case", "+ Sum(ci*wi, (i, 1, n)) \"\"\" fa, fd = fa*Poly(1/fd.LC(),", "a_i in C(x)(t_1, ..., t_i-1)* } (i.e., the set of", "h = pn # (a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N -", "p = Poly(DE.t, DE.t) elif case == 'tan': p =", "n)) \"\"\" fa, fd = fa*Poly(1/fd.LC(), DE.t), fd.monic() # interpretting", "c.gcd(c.diff(DE.t)) a = hn b = -derivation(hn, DE) N =", "DE.t, field=True) for i in range(n, -1, -1): if DE.case", "elif case in ['primitive', 'base']: B = ba.quo(bd) return (a,", "Poly(d, field=True) Q = [(ga*(d).quo(gd)).div(d) for ga, gd in G]", "DE.case != 'base': with DecrementLevel(DE): t0 = DE.t # k", "v) are column # vectors generating the space of linear", "= [n1, ..., ns] where the ni in Const(k)^(m +", "Checks if f can be written as the logarithmic derivative", "fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True) for fa, fd in fi]", "= [f1, ..., fr] in k^r and B is a", "the cardinality of L_K/C(x) U E_K/C(x) is exactly the transcendence", "DE.case in ['base', 'primitive', 'exp', 'tan']: hs = reduce(lambda i,", "prove that an integral is nonelementary (such as # risch_integrate(exp((sin(x)**2", "if case == 'exp': p = Poly(DE.t, DE.t) elif case", "# No constraints, return the empty matrix. qs, _ =", "c. return None M, N = s[c1].as_numer_denom() nfmwa = N.as_poly(DE.t)*fa*wd", "in k[t]^m and a matrix M with entries in k", "is the derivative of an element of k(t) if there", "# problem will only crop up if the integral explicitly", "M def constant_system(A, u, DE): \"\"\" Generate a system for", "an integer n, and a, b, q1, ..., qm in", "ri in r]): n = max([ri.degree() for ri in r])", "for i, j in roots): # If f is the", "f*y == Sum(ci*gi, (i, 1, m)), and to find such", "nb)) if not nb: # Possible cancellation. if case ==", "const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld)) return (ans, result, const) def is_log_deriv_k_t_radical(fa, fd,", "= cancel(u**m*Mul(*[Pow(i, j) for i, j in residueterms])) return (common_denom,", "monomial is recursively transcendental if len(DE.exts) != len(DE.D): if [i", "= N*fa*wd - M*wa*fd nfmwd = fd*wd Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd", "zip(*[pi.div(d) for pi in p]) if not all([ri.is_zero for ri", "(such as # risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x). #", "else: raise ValueError(\"case must be one of {'exp', 'tan', 'primitive',", "derivation(ri, DE) for ri, zi in zip(R, Z)] R =", "this as 'no solution', until the structure # theorem version", "V) def limited_integrate(fa, fd, G, DE): \"\"\" Solves the limited", "a non-negative integer, g in k(t), V == [v1, ...,", "DE.t)) with DecrementLevel(DE): # We are guaranteed to not have", "for key, value in bd.items()] denom_imag = [value if key[0]", "> B, no solution for c. return None M, N", "D == d/dt or deg(D) >= 2, returns h1, ...,", "in Const(k) = k. f = [Poly(1, t, field=True)] #", "correct results if cancel() cannot reduce # an identically zero", "A = A.row_join(zeros(m, len(h))) A = A.col_join(zeros(B.rows, m).row_join(B)) ## Eliminate", "1, m)). For case == 'primitive', k<t> == k[t], so", "fd, G, DE): \"\"\" Solve a Parametric Risch Differential Equation:", "for i in DE.cases if i == 'primitive']) - set(DE.indices('log'))):", "= fa, fd # Our assumption here is that each", "Q, s, z = A # TODO: Add test if", "m)).col_join(c.row_join(-c)) return (H, A) # else: b is in k,", "cm in Const(k) # if and only y0 = Sum(dj*fj,", "determine if there exist y in K(t) and c1, ...,", "[] for i, j in zip(argterms, u): # We need", "but cannot # be reduced to such by cancel(). Therefore,", "C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some a_i", "DE): \"\"\"Polynomial solutions of a parametric Risch differential equation. Given", "and a hyperexponential monomial theta over k(t), raises either NotImplementedError,", "a, N, (a*hn*fa).cancel(fd, include=True), V) def limited_integrate(fa, fd, G, DE):", "return [], eye(m) # Could return A, but this has", "min(0, nc - min(0, nb)) if not nb: # Possible", "a*Dp + b*p = Sum(ci*gi) may have a polynomial solution", "m columns. r = len(f) I = eye(m) A =", "..., t_i-1) and Dt_i/t_i = Da_i, for some a_i in", "0] if not V: return None else: # we can", "of logarithmic monomials of K over C(x)), and E_K/C(x) =", "u = Au[:, :-1], Au[:, -1] for j in range(A.cols):", "is nonelementary (such as # risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2),", "and g1, ..., gm in k(t) with Dt/t in k", "subtracting the arguments of one exponential from the other. Therefore,", "4 == 1 else -value if key[0] % 4 ==", "likely this indicates a bug. return None roots = [(i,", "['base', 'primitive', 'exp', 'tan']: hs = reduce(lambda i, j: i.lcm(j),", "fu] # # Solve the reduced equation recursively. # g,", "_ = constant_system(M, zeros(M.rows, 1), DE) # M is a", "Dz + f*z = q*Sum(ci*Gi) # correspond to solutions y", "and hypertangent cases, respectively. If case is 'auto', it will", "as u up to a additive constant. This is because", "is an elementary extension over C(x), then the cardinality of", "S1irr = Sirr, but there could be # others, and", "len(f) I = eye(m) A = A.row_join(zeros(A.rows, r + m))", "DE.t) H[i] = H[i] + si Q[i] = Q[i] -", "DE) # Solutions q in k<t> of a*Dq + b*q", "such that Dy + f*y == Sum(ci*gi, (i, 1, m)),", "m columns and entries in k. # Sum(fi*gi, (i, 1,", "if not all(derivation(i, DE, basic=True).is_zero for i in u) or", "elements of k(t) produce u. This function uses the structure", "= A[s, j] A.row_op(s, lambda r, jj: cancel(r - Asj*Rm1[jj]))", "!= x + 1 # Issue 10798: i need not", "- dn*derivation(h, DE)*fd ba, bd = ba.cancel(fd, include=True) G =", "len(p) q, r = zip(*[pi.div(d) for pi in p]) if", "DE) - b*sitn if all(qi.is_zero for qi in Q): dc", "if A is None: return None n, u = A", "from sympy.solvers import solve def prde_normal_denom(fa, fd, G, DE): \"\"\"", "of Mx = 0, in which case the quotient is", "d1, ..., dr]) == 0. # Transform fractions (fa, fd)", "the quotient is Sum(fi*qqi). A, _ = constant_system(M, zeros(M.rows, 1),", "A[i, :] # Rm+1; m = A.rows Rm1 = Ri.applyfunc(lambda", "in ba.items()] ba_real = sum(r for r in num_real) ba_imag", "Solutions p in k[t] of A*Dp + B*p = Sum(ci*Gi)", "DE, case='auto', z=None): \"\"\" Checks if f can be written", "only if the sum is divisible by d. qq, M", "1, u)) + alpha*Sum(ek*gk, (k, 1, v)). # Collect solution", "Const(k) and y in k(t) of Dy + f*y ==", "in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all", "NotImplementedError. raise ValueError(\"The %s case is not supported in this", "dr ar in Const(k) and # B*Matrix([c1, ..., cm, d1,", "# It isn't too worrisome, because the heuristic handles most", "def prde_special_denom(a, ba, bd, G, DE, case='auto'): \"\"\" Parametric Risch", "qi in k[t]). See the docstring of each function for", "DE) # Solutions p in k[t] of A*Dp + B*p", "k(t) with n, u != 0 such that n*b ==", "D (or T). L_args are the arguments of the logarithms", "gamma *= hn A, B, G, hs = prde_special_denom(a, ba,", "in k(t)^m, and for any solution v in k(t), c1,", "\"\"\" Solves the limited integration problem: f = Dv +", "..., gn in k(t), return (a, b, h, N, g,", "z or Dummy('z') H, b = residue_reduce(fa, fd, DE, z=z)", "a, b, q, r, n = prde_spde(a, b, q, n,", "more information. The Parametric Risch Differential Equation problem is, given", "arguments of the logarithmic terms in L_args. To handle the", "into constant # polynomials fa/fd in k[t]. # (Is there", "# TODO: finish writing this and write tests c1 =", "Poly does not play well with Matrix yet, this algorithm", "one logarithm from the other. Therefore, it is necessary to", "in k(t)^m, return h = [h1, ..., hr] in k(t)^r", "small enough. Given a derivation D on k[t], n in", "Df, not f, use is_log_deriv_k_t_radical_in_field(). See also ======== is_log_deriv_k_t_radical_in_field, is_deriv_k", "Sum(Sum(dj*aji)*betai) of the initial # equation. These are equal to", "= N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd nfmwd = fd*wd Qv = is_log_deriv_k_t_radical_in_field(nfmwa,", "j in residueterms]) return (n, u) elif case == 'tan':", "Furthermore, it will automatically call bound_degree() when t is linear", "dfa, dfd = dfa.cancel(dfd, include=True) # Our assumption here is", "= [qq1, ..., qqm] where qqi = qi.quo(d). # M", "functions given when solving the parametric logarithmic # derivative problem", "fm]) == 0, # in which case the quotient is", "in K, returns the tuple (B, v, s), where B", "= A.row_join(zeros(m, len(h))) A = A.col_join(zeros(B.rows, m).row_join(B)) ## Eliminate d1,", "def param_poly_rischDE(a, b, q, n, DE): \"\"\"Polynomial solutions of a", "cm in Const(k) # if and only if M*Matrix([c1, ...,", "in residues[j]] # TODO: finish writing this and write tests", "return None u1, r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z) u2, r2", "where d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm,", "a matrix A with m + r columns and entries", "h = [h1, ..., hv] in k[t]^v and and B", "will be the same as u up to a additive", "= Sum(dj*rj) correspond to solutions # y = p/gamma of", "Heuristic failed, we have to use the full method. #", "- fa*derivation(fd, DE)).cancel(fd**2, include=True) else: dfa, dfd = fa, fd", "> 0) in Sum(ci*qi) must be zero. d = max([qi.degree(DE.t)", "!= 'base': with DecrementLevel(DE): t0 = DE.t # k =", "is in k<t>, and p and the ci satisfy a*Dp", "exponential from the other. Therefore, it is necessary to pass", "2*x + 1) != x + 1 # Issue 10798:", "% case) else: raise ValueError(\"case must be one of {'primitive',", "to not have problems, # because case != 'base'. betaa,", "is_log_deriv_k_t_radical \"\"\" # Compute Df/f dfa, dfd = (fd*derivation(fa, DE)", "use is_deriv_k_in_field(). See also ======== is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical \"\"\" # Compute", "is used to indicate that. f in k(t) can be", "== 3 else 0 for key, value in ba.items()] ba_real", "0 for all y in k and b == 0.", "q] if not b.is_zero and (DE.case == 'base' or b.degree()", "C(x). Furthermore, because Const_D(K) == Const_D(C(x)) == C, deg(Dt_i) ==", "0, # in which case the sum is equal to", "list of tuples of factions of the terms on the", "in k(t), c1, ..., cm in C of f ==", "all matrix entries are Basic expressions. \"\"\" if not A:", "= -sum([v[m + 1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \\ for i in", "not all(derivation(i, DE, basic=True).is_zero for i in u) or not", "and only if y = Sum(dj*hj, (j, 1, r)) where", "not b.is_zero and (DE.case == 'base' or b.degree() > max(0,", "the tuple (B, v, s), where B is a Matrix", "relation matrix with m + u + v columns. A", "f = Dv + Sum(ci*wi, (i, 1, n)) \"\"\" fa,", "Given a derivation D on k[t], an integer n, and", "DE.t) betad = alphad etaa, etad = frac_in(dcoeff, DE.t) if", "= A.col_join(zeros(B.rows, m).row_join(B)) return h, A def param_rischDE(fa, fd, G,", "as monomials. For example, both log(x) and log(2*x) == log(x)", "m)), then q = Sum(dj*hj, (j, 1, r)), where d1,", "# TODO: implement this raise NotImplementedError V = [(-a*hn*ga).cancel(gd, include=True)", "u. This is useful for seeing exactly what elements of", "If K is an elementary extension over C(x), then the", "of this function (DE.case == 'other_linear'). N = hn.degree(DE.t) +", "need not be a polynomial i, d = i.as_numer_denom() icoeff,", "number of constants at this point V = M.nullspace() #", "sympy.core.compatibility import reduce, range from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer,", "== 'primitive': with DecrementLevel(DE): ba, bd = frac_in(b, DE.t, field=True)", "Basic expressions. \"\"\" m = len(G) Gns, Gds = list(zip(*G))", "and either D == d/dt or deg(D) >= 2, returns", "Therefore, it is required to pass them as indices to", "(fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2, include=True) else: dfa, dfd =", "j: i.lcm(j), (dn,) + En) # lcm(dn, en1, ..., enm)", "is the computability of the # constant field (actually, this", "in ['primitive', 'base']: B = ba.quo(bd) return (a, B, G,", "not supported in this function.\" % case) else: raise ValueError(\"case", "sum(r for r in num_imag) ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen),", "for _, j in residueterms]] + [n], S(1)) residueterms =", "Solutions of a*Dp + b*p = Sum(dj*rj) correspond to solutions", "derivation D on k[t], a, b, in k[t] with gcd(a,", "if and only y0 = Sum(dj*fj, (j, 1, r)) where", "of the initial # equation. These are equal to alpha*p", "k(t) are tuples (a, d) with a and d in", "== q*h in k[t] satisfies A*Dr + B*r == Sum(ci*ggi,", "= hji # building up Sum(djn*(D(fjn*t^n) - b*fjnt^n)) Fi[j] =", "rde.py for more information. The Parametric Risch Differential Equation problem", "speed bottleneck from # calling some more complex simplification function", "k[t], so it returns (a, b, G, 1) in this", "the final answer u such that n*f == Du/u. exp(f)", "extended to handle them. if DE.case in ['base', 'primitive', 'exp',", "for f1 = 1 and any d1 in Const(k) =", "case is one of {'primitive', 'exp', 'tan', 'auto'} for the", "frac_in((wa, wd), DE.t) A = parametric_log_deriv(pa, pd, wa, wd, DE)", "terms = ([DE.T[i] for i in DE.indices('exp')] + [DE.extargs[i] for", "ri))) Fi, hi = [None]*ri, [None]*ri # from eq. on", "is_deriv_k_in_field(). See also ======== is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical \"\"\" # Compute Df/f", "that exp(const)*f == u. This is calculated by subtracting the", "M with entries in k such that Sum(ci*pi, (i, 1,", "in particular that E_K/C(x) and L_K/C(x) are disjoint. The sets", "if len(DE.exts) != len(DE.D): if [i for i in DE.cases", "any vector from V, we take V[0] c0 = V[0][0]", "try: A = parametric_log_deriv_heu(fa, fd, wa, wd, DE) # except", "as possible cumulating coefficient # and terms for the recovery", "gcd(fa, fd) == 1. The last condition is handled by", "# changes, then this will need to be updated to", "where the ni in Const(k)^(m + v) are column #", "Matrix(0, m, []) # Solutions of the original equation are", "es = splitfactor(gd, DE) p = dn.gcd(en) h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t)))", "in k(t)^m, and for any solution c1, ..., cm in", "d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1,", "doesn't play well with Poly, M will be a Matrix", "Sum(ci*ggi, (i, 1, m)). For case == 'primitive', k<t> ==", "= A.row_join(u) Au = Au.rref(simplify=cancel, normalize_last=False)[0] # Warning: This will", "here is that each monomial is recursively transcendental if len(DE.exts)", "# f = fa/fd, fd is square-free, deg(fa) < deg(fd),", "= a B = b + derivation(a, DE) Qq =", "== 1/x, because log(2) is constant. Therefore, the term const", "Fd, G, DE) V = A.nullspace() V = [v for", "# TODO: Add test if Q == 1: n =", "= max([qi.degree(DE.t) for qi in Q]) M = Matrix(dc +", "in V] # [r1, ..., ru] # Solutions of a*Dp", "t_i-1) and Dt_i/t_i = Da_i, for some a_i in C(x)(t_1,", "m)), for c1, ..., cm in k, is divisible by", "the cases where we know that S1irr = Sirr, but", "> 0 and gcd(a, b) == 1, return (A, B,", "# Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) ==", "the correctness of the Risch Algorithm is the computability of", "y in k(t) of Dy + f*y == Sum(ci*gi, (i,", "of a k(t)-radical. ans is a list of tuples such", "with # entries blk (k = 1, ..., m +", "q = p/hs of the previous equation. gamma *= hs", "Parametric Risch Differential Equation - Generate linear constraints on the", "algebra in general, and implicit # in the correctness of", "in K, Df/f is the derivative of a element of", "Matrix([E_part + L_part]) rhs = Matrix([dfa.as_expr()/dfd.as_expr()]) A, u = constant_system(lhs,", "QQ such that:: --- --- Dt \\ r * Dt", "def prde_normal_denom(fa, fd, G, DE): \"\"\" Parametric Risch Differential Equation", "indices to D (or T). L_args are the arguments of", "satisfy a*Dp + b*p == g + Sum(ci*vi, (i, 1,", "disjoint. The sets L_K/C(x) and E_K/C(x) must, by their nature,", "return None return (Q*N, Q*M, v) if p.degree(DE.t) > B:", "cases, respectively. For the hyperexponential (resp. hypertangent) case, given a", "a polynomial if and only if M*Matrix([f1, ..., fm]) ==", "= reduce(lambda i, j: i.lcm(j), (dn,) + En) # lcm(dn,", "# [n, ..., 0] for i in range(m): si =", "+ v) in Const(k). # The vectors (bl1, ..., blm)", "be rational numbers. return None # [(a, i), ...], where", "to pass them as indices to D (or T). E_args", "(set([i for i in DE.cases if i == 'primitive']) -", "D on k[t] and a in k[t], b in k<t>,", "is a non-negative integer, g in k(t), V == [v1,", "..., ajm in Const(k). # Sum(aji*gi) is in k[t] and", "q/hn of the weakly normalized equation. gamma *= hn A,", "G]) n = min(0, nc - min(0, nb)) if not", "is_log_deriv_k_t_radical, is_deriv_k \"\"\" fa, fd = fa.cancel(fd, include=True) # f", "case. Dy == 0 for all y in k and", "solution # only if the sum is in k[t]. q,", "- b*hji) H += hi # in the next loop", "+ sitn Q[i] = Q[i] - derivation(sitn, DE) - b*sitn", "= [value if key[0] % 4 == 1 else -value", "where f1, ..., fm are elements of k, is #", "# building up Sum(djn*(D(fjn*t^n) - b*fjnt^n)) Fi[j] = -(derivation(hji, DE)", "# If the elements of u are not all constant", "for i in u) or not A: # If the", "which says that for any f in K, Df is", "satisfy deg(q) <= n and Dq + b*q == Sum(ci*qi,", "# risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x). # But this", "version of parametric_log_deriv is implemented. return None u1, r1 =", "B, GG, h) such that A, B, h in k[t],", "(a, b, G, 1) in this case. \"\"\" # TODO:", "1), return the tuple (A, B, GG, h) such that", "DE)).cancel(fd**2, include=True) else: dfa, dfd = fa, fd # Our", "Matrix([wl[:m] + wl[-v:] for wl in W]) # excise dj's.", "include=True) for gia, gid in G] # a*Dp + b*p", "== Sum(ci*Gi, (i, 1, m)). Given a derivation D in", "== Sum(ci*gi, (i, 1, m)), q == y*h in k<t>", "# Sum(fi*gi, (i, 1, m)), where f1, ..., fm are", "only crop up if the integral explicitly contains an #", "in k(t), and because Matrix doesn't play well with Poly,", "failed, we have to use the full method. # TODO:", "play well with Matrix yet, this algorithm assumes that all", "cm, d1, ..., dr) is a solution of Ax ==", "u such that Df/f == Du. log(f) will be the", "(fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd*fa dfa, dfd = dfa.cancel(dfd,", "return Q = [q1, ..., qm] in k[t]^m and a", "'exp': return prde_cancel_liouvillian(b, q, n, DE) else: raise NotImplementedError(\"non-linear and", "r = len(f) I = eye(m) A = A.row_join(zeros(A.rows, r", "there are ri in QQ such that:: --- --- Dt", "n=5, and much longer with large n's. n = 5", "DE.t) for ga, gd in G])) # So far, all", "in zip(R, Z)] R = list(R) n1 = n -", "+ 1]) y = -sum([v[m + 1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \\", "even and odd power terms by checking the degree of", "# calling some more complex simplification function (rational function #", "yet implemented.\") # TODO: What should really be done in", "+ bd_imag*bd_imag).as_poly(gen) return (ba[0], ba[1], bd) def prde_special_denom(a, ba, bd,", "Sum(ci*gi, (i, 1, m)), r == q*h in k[t] satisfies", "while n >= 0: # and a, b relatively prime", "DE, parametric=True) except NotImplementedError: # A temporary bound is set.", "(i = 1, ..., m) are then y = Sum(ek*hk,", "partial fraction expansion of gi. # M is a matrix", "def poly_linear_constraints(p, d): \"\"\" Given p = [p1, ..., pm]", "f i in L i in E i K/C(x) K/C(x)", "in C(x)(t_1, ..., t_i-1) } (i.e., the set of all", "..., cm in k, is divisible by d if and", "param_rischDE(fa, fd, G, DE): \"\"\" Solve a Parametric Risch Differential", "+ [Pow(b, e*j) for b, e in iterms]))) dcoeff, dterms", "DE) # h = [h1, ..., hv] in k[t]^v and", "i, j: i.lcm(j), Gds) d = Poly(d, field=True) Q =", "\"\"\" Parametric logarithmic derivative heuristic. Given a derivation D on", "integration problem. Given a derivation D on k(t) and f,", "and b == 0. # Dy + b*y = Sum(ci*qi)", "v.is_zero: return None return (Q*N, Q*M, v) if p.degree(DE.t) >", "# roots of the resultant must be rational numbers. return", "x: derivation(x, DE, basic=True)/ derivation(A[i, j], DE, basic=True)) Rm1 =", "= qi.quo(d). # M is a matrix with m columns", "m)). Because M has entries in k(t), and because Matrix", "d.is_ground: break # a*Dp + b*p = Sum(ci*qi) may have", "Q] f, B = param_rischDE(ba, bd, Q0, DE) # f", "..., cm in Const(k) and q in k<t> of a*Dq", "= Sum(blk'*hk, (k, 1, v))/gamma, where k' = k +", "= ba.as_poly(gen).as_dict() denom_real = [value if key[0] % 4 ==", "elif case == 'tan': p = Poly(DE.t**2 + 1, DE.t)", "d if and only if ci = Sum(dj*aji) # (i", "..., m), when # A*Matrix([c1, ..., cm]) == 0 and", "fa, fd = fa*Poly(1/fd.LC(), DE.t), fd.monic() # interpretting limited integration", "def parametric_log_deriv(fa, fd, wa, wd, DE): # TODO: Write the", "## Reduce number of constants at this point V =", "([DE.extargs[i] for i in DE.indices('exp')] + [DE.T[i] for i in", "GG, h) such that A, B, h in k[t], GG", "= Sum(dj*Sum(aji*qi)) = Sum(dj*rj) # where rj = Sum(aji*qi) (j", "else -value if key[0] % 4 == 2 else 0", "they're not rational, like # log(2)/log(3). Also, there should be", "# (N*f - M*w) is not the logarithmic derivative of", "the constant field that is identically zero, but cannot #", "= H[i] + sitn Q[i] = Q[i] - derivation(sitn, DE)", "as the logarithmic derivative of a k(t) radical if there", "B = param_rischDE(ba, bd, Q0, DE) # f = [f1,", "i, j: r[j].nth(i)) else: M = Matrix(0, m, []) #", "v is a vector (Matrix) such that either v has", "p.238 (unnumbered) for j in range(ri): hji = fi[j]*DE.t**i hi[j]", "The corresponding solutions are # y = Sum(blk'*hk, (k, 1,", "- order_at(Gd, p, DE.t) for Ga, Gd in G]) n", "f in k(t) can be written as the logarithmic derivative", "n, and a, b, q1, ..., qm in k[t] with", "d1, ..., dr]) == 0 # Build combined constraint matrix", "the integral # of f respolys, residues = list(zip(*roots)) or", "======== is_log_deriv_k_t_radical, is_deriv_k \"\"\" fa, fd = fa.cancel(fd, include=True) #", "\\ r * Dt + \\ r * i /", "problems over 'k' (not k[t]) if DE.case == 'primitive': with", "k (resp. Dt/(t**2 + 1) in k, sqrt(-1) not in", "[h1, ..., hr] in k[t]^r and a matrix A with", "implemented\") # else: deg(a) > 0 # Iterate SPDE as", "q = b.quo_ground(a), [qi.quo_ground(a) for qi in q] if not", "try n=5. At least for prde_spde, it will always #", "most likely this indicates a bug. return None roots =", "for A, D in G] return (a, (ba, bd), G,", "with entries in k(t) such that for any solution c1,", "return (H, A) # else: b is in k, deg(qi)", "param_rischDE(Fa, Fd, G, DE) V = A.nullspace() V = [v", "prde_no_cancel_b_small(b, Q, n, DE): \"\"\" Parametric Poly Risch Differential Equation", "the minimum number of rows. Mqq = Matrix([qq]) # A", "cm, d1, ..., dr]]).T == 0. \"\"\" m = len(Q)", "n = reduce(ilcm, [i.as_numer_denom()[1] for i in u]) u *=", "== 'exp': p = Poly(DE.t, DE.t) elif case == 'tan':", "[[1, 1]]) residueterms = [(H[j][1].subs(z, i), i) for j in", "NotImplementedError V = [(-a*hn*ga).cancel(gd, include=True) for ga, gd in G]", "list(zip(*G)) gd = reduce(lambda i, j: i.lcm(j), Gds, Poly(1, DE.t))", "..., fm]) == 0, # in which case the sum", "for i in range(m): si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t) H[i] =", "m).row_join(B)) ## Eliminate d1, ..., du. W = A.nullspace() #", "= Poly(si*DE.t**N, DE.t) H[i] = H[i] + sitn Q[i] =", "A, u Au = A.row_join(u) Au = Au.rref(simplify=cancel, normalize_last=False)[0] #", "But maybe we can tell if they're not rational, like", "if # Sum(ci*qi) == 0 in which case the solutions", "1 else -value if key[0] % 4 == 3 else", "cases where we know that S1irr = Sirr, but there", "where we are given Df/f, not f, use is_deriv_k_in_field(). See", "D on k[t], an integer n, and a, b, q1,", "..., qm in k[t] with deg(a) > 0 and gcd(a,", "E_args. To handle the case where we are given Df,", "# lcm(ds, es1, ..., esm) a = hn*hs b -=", "integration problem: f = Dv + Sum(ci*wi, (i, 1, n))", "t_i is in L_K/C(x), implying in particular that E_K/C(x) and", "the same as if it were [[1, 1]]) residueterms =", "Differential Equation - No cancellation: deg(b) large enough. Given a", "W = A.nullspace() # W = [w1, ..., wt] where", "the logarithmic derivative of a k(t)-radical. b in k(t) can", "We treat this as 'no solution', until the structure #", "fi[j]*DE.t**i hi[j] = hji # building up Sum(djn*(D(fjn*t^n) - b*fjnt^n))", "is divisible by d if and only if (c1, ...,", "Yd.monic() return Y, C def parametric_log_deriv_heu(fa, fd, wa, wd, DE,", "# (a/d)*Dp + (b/d)*p = Sum(dj*rj) has a solution p", "respectively. If case is 'auto', it will attempt to determine", "(a, B, G, Poly(1, DE.t)) else: raise ValueError(\"case must be", "= (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen) return (ba[0], ba[1], bd) def prde_special_denom(a,", "0 # Iterate SPDE as long as possible cumulating coefficient", "by checking the degree of terms wrt mod 4. Returns", "ri in Q]) M = Matrix(N + 1, m, lambda", "p is None: # f - Dg will be in", "residueterms] m = common_denom//n if common_denom != n*m: # Verify", "b -= (hn*derivation(hs, DE)).quo(hs) mu = min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga,", "# constant. We now find the log of that constant.", "Rm1 = Rm1.applyfunc(cancel) um1 = cancel(derivation(u[i], DE, basic=True)/ derivation(A[i, j],", "We now find the log of that constant. argterms =", "Const(k) and q in k[t] of degree at most n", "limited integration problem: f = Dv + Sum(ci*wi, (i, 1,", "and ci if they exist. For the algorithms here G", "determine the type of the derivation automatically. See also ========", "k0(t0) ba, bd = frac_in(b, t0, field=True) Q0 = [frac_in(qi.TC(),", "case != 'base'. betaa, alphaa, alphad = real_imag(ba, bd*a, DE.t)", "[n], S(1)) residueterms = [(i, j*common_denom) for i, j in", "in Const(k). # The vectors (bl1, ..., blm) generate the", "q = Sum(dj*hj, (j, 1, r)) where d1, ..., dr", "to determine if there exist y in K(t) and c1,", "Ri.applyfunc(lambda x: derivation(x, DE, basic=True)/ derivation(A[i, j], DE, basic=True)) Rm1", "Q, v = Qv if Q.is_zero or v.is_zero: return None", "If these are supported by the structure theorems, change to", "of # (a/d)*Dp + (b/d)*p = Sum(dj*rj) # where rj", "prde_spde(a, b, q, n, DE) beta = [betai + alpha*ri", "the set of all indices of hyperexponential monomials of K", "len(DE.D): if [i for i in DE.cases if i ==", "= [betai + alpha*ri for betai, ri in zip(beta, r)]", "else: dfa, dfd = fa, fd # Our assumption here", "for some a, b in k*. \"\"\" dn, ds =", "0 for key, value in ba.items()] ba_real = sum(r for", "D on k[t], f in k(t), and a hyperexponential monomial", "parametric_log_deriv_heu(fa, fd, wa, wd, DE, c1=None): \"\"\" Parametric logarithmic derivative", "j in ans]) # exp(f) will be the same as", "= A.row_join(vj) A = A.row_join(zeros(m, len(g))) A = A.col_join(zeros(B.rows, m).row_join(B))", "(rational function # coefficients will fall into this class). Furthermore,", "0. # Dy + b*y = Sum(ci*qi) is solvable if", "of the numerator ba[1] is the imaginary part and bd", "the derivation automatically. See also ======== is_log_deriv_k_t_radical, is_deriv_k \"\"\" fa,", "Dt_i/t_i = Da_i, for some a_i in C(x)(t_1, ..., t_i-1)", "for i in DE.indices('exp')] + [DE.extargs[i] for i in DE.indices('log')])", "u)) + alpha*Sum(ek*gk, (k, 1, v)). # Collect solution components.", "...,cm]) == 0. V = A.nullspace() # V = [v1,", "No non-trivial solution return [], eye(m) Mq = Matrix([q]) #", "..., fu] # # Solve the reduced equation recursively. #", "non-trivial solution return [], eye(m) Mq = Matrix([q]) # A", "# B*Matrix([c1, ..., cm, d1, ..., dr]) == 0. #", "1, r) + Sum(ei*hi, (i, 1, m)), # where ei", "on the hj. A = Matrix(0, m, []) # Solutions", "full algorithm using the structure theorems. # try: A =", "matrix A with coefficients in Const(k) such that if c1,", "# Sum(ci*gi) is in k[t] for c1, ..., cm in", "this point V = M.nullspace() # V = [v1, ...,", "deg(q) > B, no solution for c. return None M,", "Q[i] = Q[i] - derivation(sitn, DE) - b*sitn if all(qi.is_zero", "G h, A = param_rischDE(Fa, Fd, G, DE) V =", "terms in L_args. To handle the case where we are", "[DE.D[i].as_expr() for i in DE.indices('log')] lhs = Matrix([E_part + L_part])", "will both behave the same as monomials. For example, both", "!= 0 such that n*f == Du/u. Either returns (n,", "max(0, deg(D) - 1), returns h1, ..., hr in k[t]", "poly_linear_constraints(q, d) # qq = [qq1, ..., qqm] where qqi", "to alpha*p + Sum(dj*fj) where # fj = Sum(aji*betai). Mbeta", "(i, 1, m)), p = (q - Sum(ci*ri, (i, 1,", "A.gcd(B) a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for", "B, G, h) def prde_linear_constraints(a, b, G, DE): \"\"\" Parametric", "indexed by L_K (i.e., if i is in L_K, then", "m)), # where ei == ci (i = 1, ...,", "..., u). # Sum(ci*gi) is in k[t] if and only", "Dummy('c1') p, a = fa.div(fd) q, b = wa.div(wd) B", "Matrix(0, m, []) # No constraints, return the empty matrix.", "is needed to compute the final answer u such that", "Reduce number of constants at this point V = M.nullspace()", "DE.d.degree() - 1) and (DE.case == 'base' or DE.d.degree() >=", "U E_K/C(x) is exactly the transcendence degree of K over", "iterms = sqf_list(i) l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for b,", "DE): r\"\"\" Checks if Df/f is the derivative of an", "# Iterate SPDE as long as possible cumulating coefficient #", "1, m)), and to find such y and ci if", "variables other than the integration # variable (the structure theorems", "b*q == Sum(ci*qi, (i, 1, m)), then q = Sum(dj*hj,", "prde_linear_constraints(a, b, g, DE) # q = [q1, ..., qm]", "and because Matrix doesn't play well with Poly, M will", "in range(B + 1, C + 1)] s = solve(eqs,", "method. # TODO: This could be implemented more efficiently. #", "Bronstein's book, page 255), so most likely this indicates a", "from is_log_deriv_k_t_radical(fa, fd, DE, Df=False) for any given fa, fd,", "(resp. hypertangent) case, given a derivation D on k[t] and", "for the recovery of original solutions. alpha, beta = 1,", "over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some", "= DE.d.quo(Poly(DE.t, DE.t)) with DecrementLevel(DE): # We are guaranteed to", "in ba.items()] num_imag = [value if key[0] % 4 ==", "bd_imag*bd_imag).as_poly(gen) return (ba[0], ba[1], bd) def prde_special_denom(a, ba, bd, G,", "1/h A = a*pN B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p,", "divisible by d if and only if (c1, ..., cm)", "Sum(ci*qi). ## Reduce number of constants at this point V", "TODO: But maybe we can tell if they're not rational,", "TODO: Write the full algorithm using the structure theorems. #", "not play well with Matrix yet, this algorithm assumes that", "test if Q == 1: n = min(n, s/2) N", "vu] where each vj is a column matrix with #", "in G] return (a, b, a, N, (a*hn*fa).cancel(fd, include=True), V)", "pa, pd = frac_in(p, DE.t) A = is_log_deriv_k_t_radical_in_field(pa, pd, DE,", "r)), where d1, ..., dr in Const(k) and A*Matrix([[c1, ...,", "each function for more information. \"\"\" from __future__ import print_function,", "1, m)). \"\"\" dn, ds = splitfactor(fd, DE) Gas, Gds", "hypertangent support is not yet implemented.\") # TODO: What should", "in the given field not in some (possibly unspecified extension)", "## Build combined relation matrix with m + u +", "and a matrix A with m + r columns and", "u): # We need to get around things like sqrt(x**2)", "..., cm) is a solution of Mx == 0, and", "= list(zip(*G)) d = reduce(lambda i, j: i.lcm(j), Gds) d", "identically zero, but cannot # be reduced to such by", "with n=5, and much longer with large n's. n =", "should work find in that # case (it should be", "degree <= n in k[t] with c1, ..., cm in", "= min(n, m) elif case == 'tan': dcoeff = DE.d.quo(Poly(DE.t**2", "k(t)^r and a matrix A with m + r columns", "parametric_log_deriv_heu(fa, fd, wa, wd, DE) # except NotImplementedError: # Heuristic", "gd = reduce(lambda i, j: i.lcm(j), Gds, Poly(1, DE.t)) en,", "n1 and satisfies A*Dp + B*p == Sum(ci*qi, (i, 1,", "+ 1, m, lambda i, j: Q[j].nth(i)) A, u =", "linear constraints on the constants. Given a derivation D on", "everything below should work find in that # case (it", "en, es = splitfactor(gd, DE) p = dn.gcd(en) h =", "1, [0]*m while n >= 0: # and a, b", "all(qi.is_zero for qi in Q): dc = -1 M =", "+ m)) B = B.row_join(zeros(B.rows, m)) C = I.row_join(zeros(m, r)).row_join(-I)", "case == 'primitive', k<t> == k[t], so it returns (a,", "in k[t], a, b in k[t] relatively prime, and q", "k[t] if and only if p = Sum(ek*gk) where e1,", "N*fa*wd - M*wa*fd nfmwd = fd*wd Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd -", "r in num_real) ba_imag = sum(r for r in num_imag)", "in the log-part of the integral # of f respolys,", "problem: f = Dv + Sum(ci*wi, (i, 1, n)) \"\"\"", "solutions of the original equation are then # Sum(dj*fj, (j,", "problem is, given f, g1, ..., gm in K(t), to", "j in range(len(H)) for i in residues[j]] # TODO: finish", "-1): # [n, ..., 0] for i in range(m): si", "below should work find in that # case (it should", "Gns, Gds = list(zip(*G)) d = reduce(lambda i, j: i.lcm(j),", "ValueError(\"Inexact division\") u = cancel(u**m*Mul(*[Pow(i, j) for i, j in", "i.lcm(j), Gds) d = Poly(d, field=True) Q = [(ga*(d).quo(gd)).div(d) for", "k(t) produce u. This function uses the structure theorem approach,", "Differential Equation algorithm: Parametric Version. Given a derivation D on", "# We need to get around things like sqrt(x**2) !=", "h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t))) a = dn*h c = a*h ba", "then all the # roots of the resultant must be", "ba.quo(bd) return (a, B, G, Poly(1, DE.t)) else: raise ValueError(\"case", "\\ r * Dt + \\ r * i Df", "equation are then # Sum(dj*fj, (j, 1, u)) + alpha*Sum(ek*gk,", "i in DE.indices('exp')] + [DE.T[i] for i in DE.indices('log')]) const", "Sirr, but there could be # others, and this algorithm", "None, which means that Df cannot be written as the", "(j > 0) in Sum(ci*qi) must be zero. d =", "and only if p = Sum(ek*gk) where e1, ..., ev", "being careful with the sorts of expressions that # appear", "solutions z = q/hn of the weakly normalized equation. gamma", "DecrementLevel, recognize_log_derivative) from sympy.matrices import zeros, eye from sympy.polys import", "deg(q) <= B, no solution for c. return None M,", "case='auto') if A is None: return None n, u =", "is 1/h A = a*pN B = ba*pN.quo(bd) + Poly(n,", "for i in residues[j]] # TODO: finish writing this and", "such that Dy + f*y = Sum(ci*Gi, (i, 1, m))", "right hand side of the equation (i.e., qi in k[t]).", "= 1, ..., m) are then y = Sum(ek*hk, (k,", "respolys, residues = list(zip(*roots)) or [[], []] # Note: this", "as 'no solution', until the structure # theorem version of", "and only if M*Matrix([f1, ..., fm]) == 0, # in", "Const(k) and q in k<t> of a*Dq + b*q ==", "k<t> of a*Dq + b*q == Sum(ci*gi, (i, 1, m)),", "with DecrementLevel(DE): # We are guaranteed to not have problems,", "elements of k, is # divisible by d if and", "will always # terminate no matter what n is. n", "gd in G] a, (ba, bd), G, hn = prde_normal_denom(fa,", "i, j: i.lcm(j), (dn,) + En) # lcm(dn, en1, ...,", "= Matrix([ni[:] for ni in N]) # rows n1, ...,", "avoided with DecrementLevel(DE): ba, bd = frac_in(b + i*derivation(DE.t, DE)/DE.t,", "= k. f = [Poly(1, t, field=True)] # r =", "with # entries aj1, ..., ajm in Const(k). # Sum(aji*qi)", "= Ri.applyfunc(lambda x: derivation(x, DE, basic=True)/ derivation(A[i, j], DE, basic=True))", "exactly all the solutions of Bx == v, or v", "n is. n = bound_degree(a, b, r, DE, parametric=True) except", "in k<t> of a*Dq + b*q == Sum(ci*gi, (i, 1,", "(j, 1, r)) where d1, ..., dr in Const(k) and", "write tests c1 = c1 or Dummy('c1') p, a =", "entries in Const(k) such that a*Dp + b*p = Sum(ci*qi,", "in G]) n = min(0, nc - min(0, nb)) if", "A) # else: b is in k, deg(qi) < deg(Dt)", "= Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t) H[i] = H[i] + si Q[i] =", "dr]) == 0. # Transform fractions (fa, fd) in f", "hji # building up Sum(djn*(D(fjn*t^n) - b*fjnt^n)) Fi[j] = -(derivation(hji,", "- derivation(sitn, DE) - b*sitn if b.degree(DE.t) > 0: for", "const) def is_log_deriv_k_t_radical(fa, fd, DE, Df=True): r\"\"\" Checks if Df", "for i in DE.indices('exp')] L_part = [DE.D[i].as_expr() for i in", "Au.rref(simplify=cancel, normalize_last=False)[0] # Warning: This will NOT return correct results", "writing this and write tests p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H,", "then q = Sum(dj*hj, (j, 1, r)), where d1, ...,", "y.as_numer_denom() Ya, Yd = Poly(y_num, DE.t), Poly(y_den, DE.t) Y =", "= Df. --- --- t i in L i in", "k(t)* and n, m in ZZ with n != 0.", "= V[0][0] # v = [-1, c1, ..., cm, d1,", "DE.t) G = [(fa, fd)] + G h, A =", "i, j: Q[j].nth(i + 1)) A, _ = constant_system(M, zeros(d,", "this re-checking can be avoided with DecrementLevel(DE): ba, bd =", "equation for ci = Sum(dj*aji) # (i = 1, ...,", "= [Poly(0, DE.t)]*m for N in range(n, -1, -1): #", "for solvability is # B*Matrix([c1, ..., cm, d1]) == 0", "\"\"\" Checks if f can be written as the logarithmic", "v = len(h) M = Matrix([wl[:m] + wl[-v:] for wl", "H = [Poly(0, DE.t)]*m for N in range(n, -1, -1):", "= Qv if Q.is_zero or v.is_zero: return None return (Q*N,", "residueterms = [(H[j][1].subs(z, i), i) for j in range(len(H)) for", "== v, or v has a non-constant coefficient, in which", "expression to 0. The danger is that we might #", "tuple (ba[0], ba[1], bd) where ba[0] is real part of", "Q): dc = -1 M = Matrix() else: dc =", "[[], []] # Note: this might be empty, but everything", "frac_in(b, t0, field=True) Q0 = [frac_in(qi.TC(), t0, field=True) for qi", "Dy == 0 for all y in k and b", "alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t) etaa, etad = frac_in(dcoeff, DE.t) A", "algorithm is used both in solving parametric problems and in", "..., cm]) == 0, # in which case the sum", "TODO: Add test if Q == 1: n = min(n,", "and entries in Const(k). # Sum(ci*gi) is in k[t] for", "of the exponential terms in E_args. To handle the case", "y = z/q of the original equation. gamma = q", "coefficients in K, returns the tuple (B, v, s), where", "-nb) pN = p**N pn = p**-n # This is", "and implicit # in the correctness of the Risch Algorithm", "# of the partial fraction expansion of gi. # M", "can avoid this # problem entirely by being careful with", "in which case the sum is equal to Sum(fi*qi). M,", "could be implemented more efficiently. # It isn't too worrisome,", "# Solutions of the weakly normalized equation Dz + f*z", "Parametric Risch Differential Equation - Normal part of the denominator.", "A: # If the elements of u are not all", "splitfactor(gd, DE) p = dn.gcd(en) h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t))) a =", "1, 1), DE) c = eye(m) A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c))", "constraints on the constants. Given a derivation D on k[t],", "c1 = c1 or Dummy('c1') p, a = fa.div(fd) q,", "r)) where # d1, ..., dr ar in Const(k) and", "+ Sum(ci*wi, (i, 1, m)), p = v*h is in", "on the right hand side of the equation (i.e., gi", "in G] h = pn # (a*p**N, (b + n*a*Dp/p)*p**N,", "!= len(DE.D): if [i for i in DE.cases if i", "DE.t**e elif case == 'primitive': with DecrementLevel(DE): pa, pd =", "in that case. n = reduce(ilcm, [i.as_numer_denom()[1] for _, i", "Q]) M = Matrix(N + 1, m, lambda i, j:", "computed, this function calls the more general prde_special_denom() automatically if", "or None, which means that f cannot be written as", "to be used. The argument w == Dtheta/theta \"\"\" #", "for qi in Q] + [S(0)]]) # The condition for", "a solution p of degree <= n # in k[t]", "and only if ci = Sum(dj*aji) # (i = 1,", "docstring of rde.py for more information. The Parametric Risch Differential", "# columns and entries in Const(k) such that # (a/d)*Dp", "the full algorithm using the structure theorems. # try: A", "C = max(p.degree(DE.t), q.degree(DE.t)) if q.degree(DE.t) > B: eqs =", "# solutions alpha*p + Sum(Sum(dj*aji)*betai) of the initial # equation.", "the logarithmic derivative of a k(t) radical if there exist", "= [] if Df: dfa, dfd = (fd*derivation(fa, DE) -", "+ m + u. v = len(h) M = Matrix([wl[:m]", "tuples such that Mul(*[i**j for i, j in ans]) ==", "Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE) if Qv is None: #", "i / i --- = Df. --- --- t i", "are given Df, not f, use is_log_deriv_k_t_radical_in_field(). See also ========", "dterms]))) const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld)) return (ans, result, const) def is_log_deriv_k_t_radical(fa,", "p/hs of the previous equation. gamma *= hs g =", "solutions are # y = Sum(blk'*hk, (k, 1, v))/gamma, where", "Poly, M will be a Matrix of Basic expressions. \"\"\"", "and a, b, q1, ..., qm in k[t] with deg(a)", "elif case == 'tan': raise NotImplementedError(\"The hypertangent case is \"", "of Dy + f*y == Sum(ci*gi, (i, 1, m)), q", "vector (Matrix) such that either v has coefficients in C,", "b, q, n, DE) beta = [betai + alpha*ri for", "cos(x)**2 - 1)*x**2), x). # But this is a limitation", "the equation (i.e., qi in k[t]). See the docstring of", "with f weakly normalized with respect to t, return the", "columns and entries in Const(k) = Const(k0) # such that", "Const(k) = k. f = [Poly(1, t, field=True)] # r", "hand side of the equation (i.e., gi in k(t)), and", "if t is nonlinear or Liouvillian over k, then deg(p)", "max([qi.degree(DE.t) for qi in q]) M = Matrix(N + 1,", "Dt \\ r * Dt + \\ r * i", "DE) if A is not None: Q, m, z =", "b*hji) H += hi # in the next loop instead", "type of the derivation automatically. See also ======== is_log_deriv_k_t_radical, is_deriv_k", "= reduce(lambda i, j: i.lcm(j), Gds, Poly(1, DE.t)) en, es", "must, by their nature, be computed recursively using this same", "= is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto') if A is None: return", "V[0] c0 = V[0][0] # v = [-1, c1, ...,", "compute the final answer u such that n*f == Du/u.", "fd) in f into constant # polynomials fa/fd in k[t].", "E_K/C(x) is exactly the transcendence degree of K over C(x).", "has # the minimum number of rows. Mqq = Matrix([qq])", "q = [q1, ..., qm] where qi in k[t] is", "lambda r, jj: cancel(r - Asj*um1)) A = A.col_join(Rm1) u", "= k0(t0) ba, bd = frac_in(b, t0, field=True) Q0 =", "up to a multiplicative constant. This is because they will", "ev are in # Const(k) and B*Matrix([d1, ..., du, e1,", "si Q[i] = Q[i] - derivation(si, DE) - b*si if", "transcendental case, implies that Dt == a*t + b with", "is set. Eventually, it will be removed. # the currently", "= zeros(0, 2) else: dc = max([qi.degree(DE.t) for qi in", "0 and # B*Matrix([c1, ..., cm, d1, ..., dr]) ==", "= Poly(DE.t, DE.t) elif case == 'tan': p = Poly(DE.t**2", "fd*wd, DE, 'auto') if Qv is None: # (N*f -", "the space of those # constant families (c1, ..., cm)", "qi in q]) M = Matrix(N + 1, m, lambda", "B is a matrix with u + v # columns", "# in which case the quotient is Sum(fi*qqi). A, _", "= list(v[1: m + 1]) y = -sum([v[m + 1", "= Sum(dj*rj) correspond to # solutions alpha*p + Sum(Sum(dj*aji)*betai) of", "# the equation Dy + f*y == Sum(ci*Gi) exists. They", "= u[s] - A[s, j]*u[m+1 u.row_op(s, lambda r, jj: cancel(r", "ba, bd = ba.cancel(fd, include=True) G = [(c*A).cancel(D, include=True) for", "any d1 in Const(k) = k. f = [Poly(1, t,", "in k(t) with c1, ..., cm in Const(k) if and", "a, (ba, bd), G, hn = prde_normal_denom(fa, fd, G, DE)", "-1 M = zeros(0, 2) else: dc = max([qi.degree(DE.t) for", "the tuple (A, B, GG, h) such that A, B,", "one of {'primitive', 'exp', 'tan', 'auto'} for the primitive, hyperexponential,", "Poly(1, DE.t)) en, es = splitfactor(gd, DE) p = dn.gcd(en)", "DE.d.degree() - 1)): return prde_no_cancel_b_large(b, q, n, DE) elif ((b.is_zero", "= frac_in(dcoeff, DE.t) A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE)", "+ 1)] s = solve(eqs, c1) if not s or", "..., blm) generate the space of those # constant families", "gia, gid in G] # a*Dp + b*p = Sum(ci*gi)", "max(0, 1 - DE.d.degree(DE.t) - mu) else: # TODO: implement", "the very similar special_denom() in rde.py if case == 'auto':", "== 'other_linear'). N = hn.degree(DE.t) + hs.degree(DE.t) + max(0, 1", "n, DE): \"\"\"Polynomial solutions of a parametric Risch differential equation.", "1 C = list(v[1: m + 1]) y = -sum([v[m", "p in k[t] of A*Dp + B*p = Sum(ci*Gi) correspond", "If f is the logarithmic derivative of a k(t)-radical, then", "[h1, ..., hr] in k(t)^r and a matrix A with", "# only if the sum is in k[t]. q, M", "DE.t) nc = min([order_at(Ga, p, DE.t) - order_at(Gd, p, DE.t)", "DE) p = dn.gcd(en) h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t))) a = dn*h", "deg(b) small enough. Given a derivation D on k[t], n", "# [r1, ..., ru] # Solutions of a*Dp + b*p", "degree of K over C(x). Furthermore, because Const_D(K) == Const_D(C(x))", "log(2*x) == log(x) + log(2) satisfy Dt == 1/x, because", "bd) def prde_special_denom(a, ba, bd, G, DE, case='auto'): \"\"\" Parametric", "# y = Sum(blk'*hk, (k, 1, v))/gamma, where k' =", "[DE.T[i] for i in DE.indices('log')]) const = cancel(fa.as_expr()/fd.as_expr() - Add(*[Mul(i,", "and only if p = Sum(dj*hj, (j, 1, r)) where", "lambda i, j: q[j].nth(i)) A, _ = constant_system(M, zeros(M.rows, 1),", "reduce, range from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer, bound_degree) from", "yet implemented.\") else: # Liouvillian cases if DE.case == 'primitive'", "+ b*p == Sum(ci*gi, (i, 1, m)), (c1, ..., cm)", "ValueError(\"The %s case is not supported in this function.\" %", "the sum is in k[t]. q, M = prde_linear_constraints(a, b,", "are not all constant # Note: See comment in constant_system", "in which case the quotient is Sum(ci*qi, (i, 1, m)).", "Matrix(d, m, lambda i, j: Q[j].nth(i + 1)) A, _", "G = [(q*ga).cancel(gd, include=True) for ga, gd in G] a,", "for is_log_deriv_k_t_radical_in_field()\") elif case in ['other_linear', 'other_nonlinear']: # XXX: If", "ni in Const(k)^(m + v) are column # vectors generating", "Checks if Df/f is the derivative of an element of", "Differential Equation - Generate linear constraints on the constants. Given", "matrix with m + u + v columns. A =", "in this case. \"\"\" # TODO: Merge this with the", "Sum(ek*gk) where e1, ..., ev are in # Const(k) and", "= parametric_log_deriv_heu(fa, fd, wa, wd, DE) # except NotImplementedError: #", "= [Poly(1, t, field=True)] # r = 1 B =", "coefficients in C, in which case s is True and", "== 0. V = A.nullspace() # V = [v1, ...,", "a column matrix with # entries blk (k = 1,", "1, m)), r == q*h in k[t] satisfies A*Dr +", "with Poly, M will be a Matrix of Basic expressions.", "= [(Mqq*vj)[0] for vj in V] # [r1, ..., ru]", "+ 1, C + 1)] s = solve(eqs, c1) if", "DE) V = A.nullspace() V = [v for v in", "be extended to handle them. if DE.case in ['base', 'primitive',", "constant. This is because they will both behave the same", "DE) Gas, Gds = list(zip(*G)) gd = reduce(lambda i, j:", "return (a, b, a, N, (a*hn*fa).cancel(fd, include=True), V) def limited_integrate(fa,", "possible cumulating coefficient # and terms for the recovery of", "or DE.case == 'exp': return prde_cancel_liouvillian(b, q, n, DE) else:", "== 0 else -value if key[0] % 4 == 2", "The Parametric Risch Differential Equation problem is, given f, g1,", "logarithmic terms in L_args. To handle the case where we", "V, we take V[0] c0 = V[0][0] # v =", "elif case in ['other_linear', 'other_nonlinear']: # XXX: If these are", "m, lambda i, j: Q[j].nth(i + 1)) A, _ =", "\"\"\" # TODO: Merge this with the very similar special_denom()", "is True and the solutions in C of Ax ==", "and to find such y and ci if they exist.", "= DE.d.quo(Poly(DE.t**2 + 1, DE.t)) with DecrementLevel(DE): # We are", "n*f == Du/u. exp(f) will be the same as u", "some a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set", "denom_imag) num_real = [value if key[0] % 4 == 0", "that n*f == Du/u. Either returns (n, u) or None,", "and f, g1, ..., gm in k(t) with f weakly", "cases, respectively. If case is 'auto', it will attempt to", "Sum(dj*rj) correspond to solutions # y = p/gamma of the", "derivation D on k[t], an integer n, and a, b,", "elif (DE.d.degree() >= 2 and b.degree() == DE.d.degree() - 1", "system for the constant solutions. Given a differential field (K,", "can take any vector from V, we take V[0] c0", "A, but this has # the minimum number of rows.", "space of linear relations between # c1, ..., cm, e1,", "== 0, # in which case the sum is Sum(ci*qi).", "== 0 # is solvable in k(t}. The corresponding solutions", "s[c1].is_Rational: # deg(q) > B, no solution for c. return", "== DE.d.degree() - 1 and n > -b.as_poly().LC()/DE.d.as_poly().LC()): raise NotImplementedError(\"prde_no_cancel_b_equal()", "deg(fa) < deg(fd), and # gcd(fa, fd) == 1. The", "u): # TODO: But maybe we can tell if they're", "this algorithm assumes that all matrix entries are Basic expressions.", "by L_K (i.e., if i is in L_K, then T[i]", "A) def prde_no_cancel_b_small(b, Q, n, DE): \"\"\" Parametric Poly Risch", "ri, zi in zip(R, Z)] R = list(R) n1 =", "- Normal part of the denominator. Given a derivation D", "u. This is calculated by subtracting the arguments of one", "= DE.case if case == 'exp': wa, wd = derivation(DE.t,", "= max([qi.degree(DE.t) for qi in q]) M = Matrix(N +", "(1, 1) # f had better be 0 in that", "else: b is in k, deg(qi) < deg(Dt) t =", "fd, DE, Df=False) for any given fa, fd, DE in", "if all([qi.is_zero for qi in q]): return [], zeros(1, m)", "# Build combined constraint matrix with m + r +", "constant # polynomials fa/fd in k[t]. # (Is there a", "= frac_in((wa, wd), DE.t) A = parametric_log_deriv(pa, pd, wa, wd,", "i, j in ans]) argterms = ([DE.T[i] for i in", "[v1, ..., vm] in k(t)^m, and for any solution v", "case.\") else: n = reduce(ilcm, [i.as_numer_denom()[1] for i in u])", "not nb: # Possible cancellation. if case == 'exp': dcoeff", "list(zip(*Q)) return (qs, M) def poly_linear_constraints(p, d): \"\"\" Given p", "c1*r2.nth(i) for i in range(z.degree(DE.t))] s = solve(eqs, c1) if", "qm in k[t] with deg(b) < deg(D) - 1 and", "a multiplicative constant. This is because they will both behave", "fa, fd in f] else: # Base case. Dy ==", "f1 = 1 and any d1 in Const(k) = k.", "M is a matrix with m columns an entries in", "= A[s, :] - A[s, i]*A[:, m+1] Asj = A[s,", "so it returns (a, b, G, 1) in this case.", "it is required to pass them as indices to D", "W]) # excise dj's. N = M.nullspace() # N =", "differs from is_log_deriv_k_t_radical(fa, fd, DE, Df=False) for any given fa,", "general, and implicit # in the correctness of the Risch", "not s[c1].is_Rational: # deg(q) > B, no solution for c.", "in g] # Build combined relation matrix. A = -eye(m)", "DE, z)) p = p.as_poly(DE.t) if p is None: #", "= frac_in(dcoeff, DE.t) if recognize_log_derivative(2*betaa, betad, DE): A = parametric_log_deriv(alphaa,", "entries in k. # Sum(fi*gi, (i, 1, m)), where f1,", "+ Sum(dj*fj) where # fj = Sum(aji*betai). Mbeta = Matrix([beta])", "etaa, etad, DE) B = parametric_log_deriv(betaa, betad, etaa, etad, DE)", "in solving parametric problems and in determining if an element", "S from sympy.core.compatibility import reduce, range from sympy.integrals.rde import (order_at,", "0 when t_i is in L_K/C(x), implying in particular that", "special part is always computed, this function calls the more", "ba.items()] num_imag = [value if key[0] % 4 == 1", "Gds = list(zip(*G)) gd = reduce(lambda i, j: i.lcm(j), Gds,", "limited integration problem. Given a derivation D on k(t) and", "..., cm, d1, ..., dr) is a solution of Ax", "t_i-1)* } (i.e., the set of all indices of logarithmic", "combined constraint matrix with m + r + m columns.", "t_i is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i =", "-1, -1): if DE.case == 'exp': # this re-checking can", "DE): \"\"\" Parametric Risch Differential Equation - Generate linear constraints", "A.row_join(vj) A = A.row_join(zeros(m, len(h))) A = A.col_join(zeros(B.rows, m).row_join(B)) ##", "'primitive', 'exp', 'tan']: hs = reduce(lambda i, j: i.lcm(j), (ds,)", "z.has(DE.t): # TODO: We treat this as 'no solution', until", "# are the same as those of # (a/d)*Dp +", "the limited integration problem: f = Dv + Sum(ci*wi, (i,", "Df/f dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd*fa", "d = reduce(lambda i, j: i.lcm(j), Gds) d = Poly(d,", "information. \"\"\" from __future__ import print_function, division from sympy.core import", "its place Q = Q + Fi return (H, M)", "fd, DE, z=z) if not b: # I will have", "Q = [(ga*(d).quo(gd)).div(d) for ga, gd in G] if not", "in DE.indices('log')]) const = cancel(fa.as_expr()/fd.as_expr() - Add(*[Mul(i, j/n) for i,", "M will be a Matrix of Basic expressions. \"\"\" m", "in k[t] for c1, ..., cm in Const(k) # if", "u)) result = Add(*[Mul(i, j) for i, j in ans])", "(dn,) + En) # lcm(dn, en1, ..., enm) hn =", "k[t] satisfy deg(q) <= n and Dq + b*q ==", "param_rischDE(ba, bd, Qy, DE) fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True) for", "get around things like sqrt(x**2) != x # and also", "in k[t] of A*Dp + B*p = Sum(ci*Gi) correspond #", "[] # Why use DecrementLevel? Below line answers that: #", "that a*Dp + b*p = Sum(ci*qi, (i, 1, m)) has", "u *= DE.t**e elif case == 'primitive': with DecrementLevel(DE): pa,", "theorems, change to NotImplementedError. raise ValueError(\"The %s case is not", "in k(t) with Dt/t in k (resp. Dt/(t**2 + 1)", "in DE.indices('log')]) ans = list(zip(terms, u)) result = Add(*[Mul(i, j)", "not A: # If the elements of u are not", "of K or the logarithmic derivative of a K-radical using", "\"\"\" Solve a Parametric Risch Differential Equation: Dy + f*y", "is a list of tuples such that Add(*[i*j for i,", "= 1, [0]*m while n >= 0: # and a,", "= param_poly_rischDE(a.quo(d), b.quo(d), r, n, DE) # g = [g1,", "is in E_K/C(x) and deg(Dt_i) == 0 when t_i is", "None roots = [(i, i.real_roots()) for i, _ in H]", "if not A: return A, u Au = A.row_join(u) Au", "the structure theorems.\") E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in", "'other_nonlinear']: # XXX: If these are supported by the structure", "2, returns h1, ..., hr in k[t] and a matrix", "the ci satisfy a*Dp + b*p == g + Sum(ci*vi,", "This should never happen for the # functions given when", "k and b == 0. # Dy + b*y =", "b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for gia, gid", "= Dv + Sum(ci*wi, (i, 1, n)) \"\"\" fa, fd", "There are no constraints on d1. # Coefficients of t^j", "quotient Sum(aji*qqi). # Sum(ci*qi) is divisible by d if and", "4. Returns a tuple (ba[0], ba[1], bd) where ba[0] is", "p = Sum(ek*gk) where e1, ..., ev are in #", "the structure theorems. # try: A = parametric_log_deriv_heu(fa, fd, wa,", "i in {1, ..., n} such that t_i is transcendental", "= p/gamma of the initial equation with ci = Sum(dj*aji).", "residue_reduce_derivation(H, DE, z)) p = p.as_poly(DE.t) if p is None:", "no constant solution. This algorithm is used both in solving", "the equation Dy + f*y == Sum(ci*Gi) exists. They generate", "the polynomial component # of the partial fraction expansion of", "b*sitn if all(qi.is_zero for qi in Q): dc = -1", "N = max(0, -nb) pN = p**N pn = p**-n", "if and only if # Sum(ci*qi) == 0 in which", "frac_in(b, DE.t, field=True) for i in range(n, -1, -1): if", "Solutions of the original equation are # y = Sum(dj*fj,", "fj = Sum(aji*betai). Mbeta = Matrix([beta]) f = [(Mbeta*vj)[0] for", "logarithmic derivative of a K-radical using the structure theorem approach.", "num_real) ba_imag = sum(r for r in num_imag) ba =", "m + r + m columns. r = len(f) I", "DE.t)) else: raise ValueError(\"case must be one of {'exp', 'tan',", "that const(F(t0, ..., tn) == const(K) == F Ri =", "handle the case where we are given Df, not f,", "3 else 0 for key, value in bd.items()] bd_real =", "exists. They generate # the space and form a basis", "r + m columns. r = len(f) I = eye(m)", "..., du, e1, ..., ev]) == 0. # The solutions", "matrix with m + r + m columns. r =", "wd, DE) if A is None: return None n, e,", "for i in range(m): si = Q[i].nth(N + db)/b.LC() sitn", "large enough. Given a derivation D on k[t], n in", "has coefficients in C, in which case s is True", "in u]) u *= n terms = ([DE.T[i] for i", "k<t>, and p and the ci satisfy a*Dp + b*p", "will be removed. # the currently added test case takes", "y0 = Sum(dj*fj, (j, 1, r)) where # d1, ...,", "splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative) from sympy.matrices import zeros, eye from", "or deg(D) >= 2, returns h1, ..., hr in k[t]", "- Dg will be in k[t] if f is the", "g1, ..., gm in K(t), to determine if there exist", "no matter what n is. n = bound_degree(a, b, r,", "a vector (Matrix) u with coefficients in K, returns the", "f + [alpha*gk for gk in g] # Build combined", "return the tuple (A, B, GG, h) such that A,", "computed recursively using this same function. Therefore, it is required", "b*p = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) # are the same as", "longer with large n's. n = 5 h, B =", "[h1, ..., hv] in k[t]^v and and B is a", "# Transform fractions (fa, fd) in f into constant #", ">= 2)): return prde_no_cancel_b_small(b, q, n, DE) elif (DE.d.degree() >=", "const(F(t0, ..., tn) == const(K) == F Ri = A[i,", "polynomial solution # only if the sum is divisible by", "# [n, ..., 1] for i in range(m): si =", "Sum(dj*Sum(aji*qi)) = Sum(dj*rj) # where rj = Sum(aji*qi) (j =", "L_args. To handle the case where we are given Df/f,", "case='auto'): \"\"\" Parametric Risch Differential Equation - Special part of", "= len(fi) if i == n: M = Ai else:", "families (c1, ..., cm) for which a solution of #", "The last condition is handled by cancel() above. return None", "N, g, V) such that a, b, h in k[t],", "Dt/t in k (resp. Dt/(t**2 + 1) in k, sqrt(-1)", "solving Risch Differential Equations. See the outline in the docstring", "Eliminate d1, ..., du. W = A.nullspace() # W =", "M*Matrix([f1, ..., fm]) == 0, # in which case the", "ci = Sum(dj*aji) # (i = 1, ..., m) are", "ns] where the ni in Const(k)^(m + v) are column", "are guaranteed to not have problems, # because case !=", "= max([ri.degree(DE.t) for _, ri in Q]) M = Matrix(N", "c1, ..., cm in Const(k) # if and only y0", "< deg(fd), and # gcd(fa, fd) == 1. The last", "for fa, fd in f] else: # Base case. Dy", "theorem approach will need to be used. The argument w", "the resultant must be rational numbers. return None # [(a,", "(i, 1, m)) has a solution p of degree <=", "correctness problem exists in any # algorithm that uses rref()).", "N = max([ri.degree(DE.t) for _, ri in Q]) M =", "solution c1, ..., cm in Const(k) and y in k(t)", "= parametric_log_deriv(pa, pd, wa, wd, DE) if A is None:", "handle them. if DE.case in ['base', 'primitive', 'exp', 'tan']: hs", "SPDE as long as possible cumulating coefficient # and terms", "Differential Equation - Special part of the denominator. case is", "# in the correctness of the Risch Algorithm is the", "structure theorems, change to NotImplementedError. raise ValueError(\"The %s case is", "that either v has coefficients in C, in which case", "division\") u = cancel(u**m*Mul(*[Pow(i, j) for i, j in residueterms]))", "solutions of # a*Dp + b*p = Sum(ci*qi) = Sum(dj*Sum(aji*qi))", "f == Dv + Sum(ci*wi, (i, 1, m)), p =", "None in this case. This should never happen for the", "no constraints on d1. # Coefficients of t^j (j >", "Sum(ci*qi) == 0 in which case the solutions are #", "n: M = Ai else: M = Ai.col_join(M.row_join(zeros(M.rows, ri))) Fi,", "attempt to determine the type of the derivation automatically. See", "betad, DE): A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) B", "hypertangent case is \" \"not yet implemented for is_log_deriv_k_t_radical_in_field()\") elif", "= [(Mq*vj)[0] for vj in V] # [r1, ..., ru]", "with coefficients in C and v is a vector (Matrix)", "but there could be # others, and this algorithm will", "+ 1, m, lambda i, j: r[j].nth(i)) else: M =", "this # problem will only crop up if the integral", "wd.as_poly(DE.t).LC()) l = fd.monic().lcm(wd.monic())*Poly(c, DE.t) ln, ls = splitfactor(l, DE)", "this case. \"\"\" # TODO: Merge this with the very", "in Const(k) and y in k(t) of Dy + f*y", "log(f) will be the same as u up to a", "if not all(i.is_Rational for i in u): raise NotImplementedError(\"Cannot work", "and and B is a matrix with u + v", "== 0. \"\"\" m = len(Q) H = [Poly(0, DE.t)]*m", "a k(t)-radical, then all the # roots of the resultant", "j: Q[j].nth(i)) A, u = constant_system(M, zeros(dc + 1, 1),", "columns and entries in k. # Sum(fi*gi, (i, 1, m)),", "Const(k) and q in k[t] satisfy deg(q) <= n and", "Equation - Generate linear constraints on the constants. Given a", "i in u): raise NotImplementedError(\"Cannot work with non-rational \" \"coefficients", "identically zero expression to 0. The danger is that we", "== n: M = Ai else: M = Ai.col_join(M.row_join(zeros(M.rows, ri)))", "= [g1, ..., gm] in k(t)^m, and for any solution", "p of a*Dp + b*p = Sum(ci*qi) correspond to #", "K(t), to determine if there exist y in K(t) and", "a tuple (ba[0], ba[1], bd) where ba[0] is real part", "is real part of the numerator ba[1] is the imaginary", "in p]) if not all([ri.is_zero for ri in r]): n", "if A is not None and B is not None:", "/ i --- = --. --- --- t f i", "== Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is", "Warning: This will NOT return correct results if cancel() cannot", "N in range(n, -1, -1): # [n, ..., 0] for", "u): raise NotImplementedError(\"Cannot work with non-rational \" \"coefficients in this", "# N = [n1, ..., ns] where the ni in", "m, []) # No constraints. return q, M def constant_system(A,", "n, const) def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None): \"\"\" Checks", "Poly Risch Differential Equation - No cancellation: deg(b) large enough.", "is_log_deriv_k_t_radical_in_field, is_deriv_k \"\"\" H = [] if Df: dfa, dfd", "in k[t], b in k<t>, and g1, ..., gm in", "because Matrix doesn't play well with Poly, M will be", "R, n1), with Qq = [q1, ..., qm] and R", "= A.rows Rm1 = Ri.applyfunc(lambda x: derivation(x, DE, basic=True)/ derivation(A[i,", "(c1, ..., cm) is a solution of Mx == 0,", "+ (b/d)*p = Sum(dj*rj) has a solution p of degree", "M.as_poly(DE.t)*wa*fd nfmwd = fd*wd Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE) if", "DE.t) for Ga, Gd in G]) n = min(0, nc", "case == 'tan': p = Poly(DE.t**2 + 1, DE.t) elif", "derivative of an element of k(t). ans is a list", "entries in Const(k). # Sum(ci*qqi) is Sum(ci*qi).quo(d), and the remainder", "derivation D on k[t], f in k(t), and a hyperexponential", "is a column matrix with # entries aj1, ..., ajm", "cm]) == 0 and # B*Matrix([c1, ..., cm, d1, ...,", "'base': with DecrementLevel(DE): t0 = DE.t # k = k0(t0)", "be simple n, s = splitfactor(fd, DE) if not s.is_one:", "Either returns (n, u) or None, which means that f", "and # B*Matrix([c1, ..., cm, d1, ..., dr]) == 0.", "in Q]) M = Matrix(dc + 1, m, lambda i,", "dfd = fa, fd # Our assumption here is that", "+ max(0, 1 - DE.d.degree(DE.t) - mu) else: # TODO:", "compute the final answer u such that Df/f == Du.", "eye(m) A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A) def prde_no_cancel_b_small(b,", "\"\"\" Parametric Risch Differential Equation - Generate linear constraints on", "== Dv + Sum(ci*wi, (i, 1, m)), p = v*h", "A = param_rischDE(Fa, Fd, G, DE) V = A.nullspace() V", "a speed bottleneck from # calling some more complex simplification", "A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A) # else: b is in", "Dy + f*y = Sum(ci*Gi, (i, 1, m)) has a", "(q - Sum(ci*ri, (i, 1, m)))/a has degree at most", "* i / i i / i --- = Df.", "exist. For the algorithms here G is a list of", "H = [] if Df: dfa, dfd = (fd*derivation(fa, DE)", "is a solution of Ax == 0. Elements of k(t)", "field=True) for i in range(n, -1, -1): if DE.case ==", "we are given Df/f, not f, use is_deriv_k_in_field(). See also", "n} such that t_i is transcendental over C(x)(t_1, ..., t_i-1)", "B, Qq, R, n1) def prde_no_cancel_b_large(b, Q, n, DE): \"\"\"", "as Matrix from sympy.solvers import solve def prde_normal_denom(fa, fd, G,", "n*b == Du/u. Either returns (ans, u, n, const) or", "= q*Sum(ci*Gi) # correspond to solutions y = z/q of", "zero expression to 0. The danger is that we might", "DE) return [], A if a.is_ground: # Normalization: a =", "0. # The solutions of the original equation for ci", "ans]) argterms = ([DE.T[i] for i in DE.indices('exp')] + [DE.extargs[i]", "were [[1, 1]]) residueterms = [(H[j][1].subs(z, i), i) for j", "generate # the space and form a basis except possibly", "b in k*. \"\"\" dn, ds = splitfactor(fd, DE) E", "= 1. a = a.LC() b, q = b.quo_ground(a), [qi.quo_ground(a)", "for ri in r]): n = max([ri.degree() for ri in", "that: # Assuming that we can solve such problems over", "y = Sum(blk'*hk, (k, 1, v))/gamma, where k' = k", "u)) result = Mul(*[Pow(i, j) for i, j in ans])", "'no solution', until the structure # theorem version of parametric_log_deriv", "is identically zero, but cannot # be reduced to such", "call bound_degree() when t is linear and non-Liouvillian, which for", "empty matrix. qs, _ = list(zip(*Q)) return (qs, M) def", "is Sum(fi*qqi). A, _ = constant_system(M, zeros(M.rows, 1), DE) #", "(H, M) def param_poly_rischDE(a, b, q, n, DE): \"\"\"Polynomial solutions", "of step 1 & 2 for the limited integration problem.", "and (DE.case == 'base' or b.degree() > max(0, DE.d.degree() -", "Df/f is not the derivative of an element of k(t).", "..., qm in k[t] with b != 0 and either", "Const(k) such that a*Dp + b*p = Sum(ci*qi, (i, 1,", "implying in particular that E_K/C(x) and L_K/C(x) are disjoint. The", "not all([ri.is_zero for ri in r]): n = max([ri.degree() for", "function. \"\"\" bd = bd.as_poly(gen).as_dict() ba = ba.as_poly(gen).as_dict() denom_real =", "fd, G, DE) # Solutions q in k<t> of a*Dq", "bd = ba.cancel(fd, include=True) G = [(c*A).cancel(D, include=True) for A,", "to find such y and ci if they exist. For", "DE) if not all(derivation(i, DE, basic=True).is_zero for i in u)", "+ hs.degree(DE.t) + max(0, 1 - DE.d.degree(DE.t) - mu) else:", "NotImplementedError: # Heuristic failed, we have to use the full", "a solution y in k(t) with c1, ..., cm in", "# to solutions z = q/hn of the weakly normalized", "= A.nullspace() # W = [w1, ..., wt] where each", "derivative of a element of K if and only if", "solution (n, m, v) of the equation n*f == Dv/v", "find such y and ci if they exist. For the", "only if ci = Sum(dj*aji) # (i = 1, ...,", "in G] # a*Dp + b*p = Sum(ci*gi) may have", "or Dummy('c1') p, a = fa.div(fd) q, b = wa.div(wd)", "for solving Parametric Risch Differential Equations parallel those for solving", "f = [Poly(1, t, field=True)] # r = 1 B", "if a.is_ground: # Normalization: a = 1. a = a.LC()", "support is not yet implemented.\") # TODO: What should really", "in denom_imag) num_real = [value if key[0] % 4 ==", "not rational, like # log(2)/log(3). Also, there should be an", "in Sum(ci*qi) must be zero. d = max([qi.degree(DE.t) for qi", "= frac_in(p, DE.t) A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto') if", "\"\"\" Simpler version of step 1 & 2 for the", "p, DE.t) for Ga, Gd in G]) n = min(0,", "are no constraints on d1. # Coefficients of t^j (j", "[(i, j*common_denom) for i, j in residueterms] m = common_denom//n", "the imaginary part and bd is the denominator of the", "eye(m) Mq = Matrix([q]) # A single row. r =", "a matrix with m columns an entries in k. #", "prevent a speed bottleneck from # calling some more complex", "and b.degree() == DE.d.degree() - 1 and n > -b.as_poly().LC()/DE.d.as_poly().LC()):", "= p**N pn = p**-n # This is 1/h A", "*= hs g = A.gcd(B) a, b, g = A.quo(g),", "and odd power terms by checking the degree of terms", "y = Sum(dj*fj, (j, 1, r) + Sum(ei*hi, (i, 1,", "# # Solve the reduced equation recursively. # g, B", "means that f cannot be written as the logarithmic derivative", "k<t> == k[t], so it returns (a, b, G, 1)", "# Why use DecrementLevel? Below line answers that: # Assuming", "Dtheta/theta \"\"\" # TODO: finish writing this and write tests", "where # fj = Sum(aji*betai). Mbeta = Matrix([beta]) f =", "the algorithms here G is a list of tuples of", "[], eye(m) Mq = Matrix([q]) # A single row. r", "has no constant solution. This algorithm is used both in", "k[t]. # (Is there a better way?) f = [Poly(fa.as_expr()/fd.as_expr(),", "en1, ..., enm) hn = c.gcd(c.diff(DE.t)) a = hn b", "such that Dy0 + b*y0 = Sum(ci*qi, (i, 1, m))", "n*f == Du/u. Either returns (n, u) or None, which", "= fa*Poly(1/fd.LC(), DE.t), fd.monic() # interpretting limited integration problem as", "= parametric_log_deriv(alphaa, alphad, etaa, etad, DE) B = parametric_log_deriv(betaa, betad,", "equal to alpha*p + Sum(dj*fj) where # fj = Sum(aji*betai).", "Given a derivation D on k[t] and f, g1, ...,", "3 else 0 for key, value in ba.items()] ba_real =", "is a solution of Ax == 0. \"\"\" m =", "have problems, # because case != 'base'. betaa, alphaa, alphad", "Const(k) # if and only y0 = Sum(dj*fj, (j, 1,", "hk in h], C def limited_integrate_reduce(fa, fd, G, DE): \"\"\"", "multiplicative # constant. We now find the log of that", "i, j in residueterms] m = common_denom//n if common_denom !=", "Asj*Rm1[jj])) # u[s] = u[s] - A[s, j]*u[m+1 u.row_op(s, lambda", "sqrt(x**2) != x # and also sqrt(x**2 + 2*x +", "(Q*N, Q*M, v) if p.degree(DE.t) > B: return None c", "solution p of degree <= n in k[t] with c1,", "n and Dq + b*q == Sum(ci*qi, (i, 1, m)),", "dc = -1 M = zeros(0, 2) else: dc =", "p**N pn = p**-n # This is 1/h A =", "will only crop up if the integral explicitly contains an", "Assuming that we can solve such problems over 'k' (not", "which means that Df cannot be written as the logarithmic", "of Ax == 0. \"\"\" m = len(q) if n", "# g = [g1, ..., gv] in k[t]^v and and", "include=True) for ga, gd in G] return (a, b, a,", "deg(Dt_i) == 1 when t_i is in E_K/C(x) and deg(Dt_i)", "DE.indices('log')]) ans = list(zip(terms, u)) result = Add(*[Mul(i, j) for", "and u in k(t) with n, u != 0 such", "# to be Q + Fi taking its place Q", "G, DE) # Solutions p in k[t] of A*Dp +", "this class). Furthermore, (I believe) this # problem will only", "matrix with # entries blk (k = 1, ..., m", "= fa/fd, fd is square-free, deg(fa) < deg(fd), and #", "+= hi # in the next loop instead of Q", "i in L i in E i K/C(x) K/C(x) Where", "if not d.is_ground: break # a*Dp + b*p = Sum(ci*qi)", "sum is equal to Sum(fi*qi). M, _ = constant_system(M, zeros(M.rows,", "A = a B = b + derivation(a, DE) Qq", "a*pN B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN G =", "the logarithmic derivative of a k(t)-radical. case is one of", "range(m): si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t) H[i] = H[i] + si", "\" \"in the structure theorems.\") E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for", "fd*fa dfa, dfd = dfa.cancel(dfd, include=True) # Our assumption here", "relation matrix. A = -eye(m) for vj in V: A", "in k[t]. if not V: # No non-trivial solution return", "% case) common_denom = reduce(ilcm, [i.as_numer_denom()[1] for i in [j", "i Df / i i / i --- = --.", "in k(t) with f weakly normalized with respect to t,", "Note: if residueterms = [], returns (1, 1) # f", "En, Es = list(zip(*E)) c = reduce(lambda i, j: i.lcm(j),", "and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0. \"\"\"", "what elements of k(t) produce u. This function uses the", "then deg(p) <= N. So that the special part is", "range(A.rows): # A[s, :] = A[s, :] - A[s, i]*A[:,", "# a*Dp + b*p = Sum(ci*gi) may have a polynomial", "= Mul(*[Pow(i, j*n) for i, j in residueterms]) return (n,", "} (i.e., the set of all indices of hyperexponential monomials", "it has proven that no solution exists, or returns a", "integral is nonelementary (such as # risch_integrate(exp((sin(x)**2 + cos(x)**2 -", "recovery of original solutions. alpha, beta = 1, [0]*m while", "calculated by dividing the arguments of one logarithm from the", "m + 1]) y = -sum([v[m + 1 + i]*h[i][0].as_expr()/h[i][1].as_expr()", "as indices to D (or T). E_args are the arguments", "ci = Sum(dj*aji) # (i = 1, ..., m) for", "function calls the more general prde_special_denom() automatically if it cannot", "is square-free, deg(fa) < deg(fd), and # gcd(fa, fd) ==", "i in DE.indices('exp')] + [DE.extargs[i] for i in DE.indices('log')]) ans", "== u are exactly all the solutions of Bx ==", "of Ax == 0. Elements of k(t) are tuples (a,", "Const(K), a Matrix A, and a vector (Matrix) u with", "hi[j] = hji # building up Sum(djn*(D(fjn*t^n) - b*fjnt^n)) Fi[j]", "form a basis except possibly when Dy + f*y ==", "G] return (a, (ba, bd), G, h) def real_imag(ba, bd,", "argument w == Dtheta/theta \"\"\" # TODO: finish writing this", "\"\"\" fa, fd = fa*Poly(1/fd.LC(), DE.t), fd.monic() # interpretting limited", "the docstring of rde.py for more information. The Parametric Risch", "b = -derivation(hn, DE) N = 0 # These are", "by d if and only if ci = Sum(dj*aji) #", "const) or None, which means that Df cannot be written", "return None roots = [(i, i.real_roots()) for i, _ in", "which case s is False Ax == u has no", "of the original equation for ci = Sum(dj*aji) # (i", "0 in that case. n = reduce(ilcm, [i.as_numer_denom()[1] for _,", "column matrix with # entries aj1, ..., ajm in Const(k).", "- Asj*um1)) A = A.col_join(Rm1) u = u.col_join(Matrix([um1])) return (A,", "is a term in the log-part of the integral #", "4 == 0 else -value if key[0] % 4 ==", "DE): \"\"\" Parametric Risch Differential Equation - Normal part of", "for the constant solutions. Given a differential field (K, D)", "writing this and write tests c1 = c1 or Dummy('c1')", "range(z.degree(DE.t))] s = solve(eqs, c1) if not s or not", "behave the same as monomials. For example, both exp(x) and", "equal to Sum(aji*qi) (j = 1, ..., u). # Sum(ci*gi)", "that:: --- --- Dt \\ r * Dt + \\", "Au[:, -1] for j in range(A.cols): for i in range(A.rows):", "solution is possible. # Find relations between the qi. if", "j in zip(argterms, u)])) return (ans, result, n, const) def", "can be written as the logarithmic derivative of a k(t)", "the parametric logarithmic # derivative problem when integration elementary functions", "k(t) such that a = Db. Either returns (ans, u),", "C = list(v[1: m + 1]) y = -sum([v[m +", "in k, sqrt(-1) not in k), a != 0, and", "1, ..., u). # Sum(ci*gi) is in k[t] if and", "generating the space of linear relations between # c1, ...,", "given when solving the parametric logarithmic # derivative problem when", "for fa, fd in fi] ri = len(fi) if i", "DE.t)) en, es = splitfactor(gd, DE) p = dn.gcd(en) h", "in G])) # So far, all the above are also", "for solving Risch Differential Equations. See the outline in the", "entries in Const(k) such that # (a/d)*Dp + (b/d)*p =", "sqf_list(d) ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j) for b, e in", "H = [Poly(0, DE.t)]*m for N in range(n, 0, -1):", "ri = len(fi) if i == n: M = Ai", "is not supported in this function.\" % case) else: raise", "r, n = prde_spde(a, b, q, n, DE) beta =", "qq = [qq1, ..., qqm] where qqi = qi.quo(d). #", "matrix with m columns and entries in Const(k). # Sum(ci*gi)", "not f, use is_deriv_k_in_field(). See also ======== is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical \"\"\"", "then p is in k[t], and if t is nonlinear", "0] for i in range(m): si = Q[i].nth(N + db)/b.LC()", "returns (a, b, G, 1) in this case. \"\"\" #", "in k[t], GG = [gg1, ..., ggm] in k(t)^m, and", "DecrementLevel(DE): pa, pd = frac_in(p, DE.t, cancel=True) wa, wd =", "E_K/C(x) and L_K/C(x) are disjoint. The sets L_K/C(x) and E_K/C(x)", "etaa, etad, DE) if A is not None and B", "in q]) M = Matrix(N + 1, m, lambda i,", "+ b*p = Sum(dj*rj) has a solution p of degree", "v = V[0]/(-c0) r = len(h) m = len(v) -", "1, r)) where d1, ..., dr are in Const(k) and", "is # divisible by d if and only if M*Matrix([f1,", "if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)): return None if case ==", "or DE.d.degree() >= 2)): return prde_no_cancel_b_small(b, q, n, DE) elif", "# In that case, solutions of # a*Dp + b*p", "ds = splitfactor(fd, DE) E = [splitfactor(gd, DE) for _,", "m)) C = I.row_join(zeros(m, r)).row_join(-I) return f + H, A.col_join(B).col_join(C)", "NotImplementedError(\"Cannot work with non-rational \" \"coefficients in this case.\") else:", "if Q.is_zero or v.is_zero: return None return (Q*N, Q*M, v)", "of {'exp', 'tan', 'primitive', \" \"'base'}, not %s.\" % case)", "DE.t) ln, ls = splitfactor(l, DE) z = ls*ln.gcd(ln.diff(DE.t)) if", "..., wt] where each wl is a column matrix with", "DE, basic=True)) Rm1 = Rm1.applyfunc(cancel) um1 = cancel(derivation(u[i], DE, basic=True)/", "and q in k[t] of degree at most n of", "with the function name is used to indicate that. f", "transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for", "b, in k[t] with gcd(a, b) == 1, and G", "[]) # No constraints. return q, M def constant_system(A, u,", "V = [v1, ..., vu] where each vj is a", "such y and ci if they exist. For the algorithms", "l = fd.monic().lcm(wd.monic())*Poly(c, DE.t) ln, ls = splitfactor(l, DE) z", "for b, e in iterms]))) dcoeff, dterms = sqf_list(d) ld.append(Mul(*([Pow(dcoeff,", "Normal part of the denominator. Given a derivation D on", "== 1 (resp. gcd(a, t**2 + 1) == 1), return", "= DE.t # k = k0(t0) ba, bd = frac_in(b,", "contains an # expression in the constant field that is", "Q it has # to be Q + Fi taking", "The argument w == Dtheta/theta \"\"\" # TODO: finish writing", "m in ZZ with n != 0. If this heuristic", "fd) == 1. The last condition is handled by cancel()", "seeing exactly what elements of k(t) produce u. This function", "columns and entries in Const(k) such that # (a/d)*Dp +", "entries in k such that Sum(ci*pi, (i, 1, m)), for", "+ cos(x)**2 - 1)*x**2), x). # But this is a", "avoid this # problem entirely by being careful with the", "deg(b) large enough. Given a derivation D on k[t], n", "k[t]^m and a matrix M with entries in k(t) such", "= -1 M = Matrix() else: dc = max([qi.degree(DE.t) for", "yet been implemented\") # else: deg(a) > 0 # Iterate", "n, u != 0 such that n*f == Du/u. Either", "same as monomials. For example, both log(x) and log(2*x) ==", "example, both exp(x) and exp(x + 1) == E*exp(x) satisfy", "wd, DE) # except NotImplementedError: # Heuristic failed, we have", "k(t) with Dt/t in k (resp. Dt/(t**2 + 1) in", "a k(t) radical if there exist n in ZZ and", "entries in Const(k) = Const(k0) # such that Dy0 +", "frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative) from sympy.matrices import", "in k[t]^r and a matrix A with m + r", "is. n = bound_degree(a, b, r, DE, parametric=True) except NotImplementedError:", "multiplicative constant. This is because they will both behave the", "j in residueterms] m = common_denom//n if common_denom != n*m:", "if and only if M*Matrix([c1, ..., cm]) == 0, #", "A.nullspace() # W = [w1, ..., wt] where each wl", "in Const(k) # if and only if M*Matrix([c1, ..., cm])", "returns (ans, u), such that Df/f == Du, or None,", "m)). \"\"\" dn, ds = splitfactor(fd, DE) Gas, Gds =", "s[c1].is_Rational: # deg(q) <= B, no solution for c. return", "matrix with u + v # columns and entries in", "a*t + b with for some a, b in k*.", "Sum(aji*qi) is divisible by d with exact quotient Sum(aji*qqi). #", "primitive, hyperexponential, and hypertangent cases, respectively. If case is 'auto',", "element of k(t). a in k(t) is the derivative of", "\"not yet implemented for is_log_deriv_k_t_radical_in_field()\") elif case in ['other_linear', 'other_nonlinear']:", "f is the logarithmic derivative of a k(t)-radical, then all", "A = A.col_join(Rm1) u = u.col_join(Matrix([um1])) return (A, u) def", "..., du in Const(k). # In that case, solutions of", "D in G] return (a, (ba, bd), G, h) def", "ld = [] for i, j in zip(argterms, u): #", "for qi in Q): dc = -1 M = zeros(0,", "== u. This is useful for seeing exactly what elements", "elementary functions (see # Bronstein's book, page 255), so most", "constant field (actually, this same correctness problem exists in any", "up to a multiplicative # constant. We now find the", "element a of K is a derivative of an element", "(i, 1, m)). Furthermore, if S1irr == Sirr, then p", "for seeing exactly which elements of k(t) produce u. This", "DE, case='auto') if A is None: return None n, u", "a Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi,", "relations between # c1, ..., cm, e1, ..., ev. C", "general prde_special_denom() automatically if it cannot determine that S1irr ==", "b = wa.div(wd) B = max(0, derivation(DE.t, DE).degree(DE.t) - 1)", "Algorithm is the computability of the # constant field (actually,", "zero # for c1, ..., cm in Const(k) if and", "u. This function uses the structure theorem approach, which says", "1 (resp. gcd(a, t**2 + 1) == 1), return the", "<= N. So that the special part is always computed,", "if recognize_log_derivative(2*betaa, betad, DE): A = parametric_log_deriv(alphaa, alphad, etaa, etad,", "a, b, in k[t] with gcd(a, b) == 1, and", "_ = constant_system(M, zeros(M.rows, 1), DE) # A is a", "(a, b, a, N, (a*hn*fa).cancel(fd, include=True), V) def limited_integrate(fa, fd,", "constraints, return the empty matrix. qs, _ = list(zip(*Q)) return", "Sum(ci*qi, (i, 1, m)). Because M has entries in k(t),", "that if c1, ..., cm in Const(k) and q in", "K if and only if there are ri in QQ", "Const(k) such that # (a/d)*Dp + (b/d)*p = Sum(dj*rj) has", "= [w1, ..., wt] where each wl is a column", "parametric_log_deriv is implemented. return None u1, r1 = (fa*l.quo(fd)).div(z) #", "p.degree(DE.t) >= max(1, DE.d.degree(DE.t)): return None if case == 'auto':", "if not V: return None else: # we can take", "T). L_args are the arguments of the logarithms indexed by", "== 0. Elements of k(t) are tuples (a, d) with", "component # of the partial fraction expansion of gi. #", "DE.case if case == 'exp': p = Poly(DE.t, DE.t) elif", "hn.degree(DE.t) + hs.degree(DE.t) + max(0, 1 - DE.d.degree(DE.t) - mu)", "A is None: return None n, u = A elif", "A, u = constant_system(M, zeros(dc + 1, 1), DE) c", "= [h1, ..., hv] in k[t]^v and and B is", "else: if not all(i.is_Rational for i in u): raise NotImplementedError(\"Cannot", "columns. A = -eye(m) for vj in V: A =", "in W]) # excise dj's. N = M.nullspace() # N", "Matrix([dfa.as_expr()/dfd.as_expr()]) A, u = constant_system(lhs, rhs, DE) if not all(derivation(i,", "+ Fi return (H, M) def param_poly_rischDE(a, b, q, n,", "deg(p) <= N. So that the special part is always", "len(q) if n < 0: # Only the trivial zero", "z/q of the original equation. gamma = q G =", "= parametric_log_deriv(alphaa, alphad, etaa, etad, DE) if A is not", "hyperexponential, hypertangent, and primitive cases, respectively. For the hyperexponential (resp.", "Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0.", "set(DE.indices('log'))): raise NotImplementedError(\"Real version of the structure \" \"theorems with", "include=True), V) def limited_integrate(fa, fd, G, DE): \"\"\" Solves the", "= n - a.degree(DE.t) return (A, B, Qq, R, n1)", "additive constant. This is because they will both behave the", "such that Mul(*[i**j for i, j in ans]) == u.", "yet implemented for is_log_deriv_k_t_radical_in_field()\") elif case in ['other_linear', 'other_nonlinear']: #", "really be done in this case? raise NotImplementedError(\"Nonelementary extensions not", "m, lambda i, j: Q[j].nth(i)) A, u = constant_system(M, zeros(dc", "to continue # anyway, even if the result might potentially", "# excise dj's. N = M.nullspace() # N = [n1,", "[(i, i.real_roots()) for i, _ in H] if not all(len(j)", "the initial equation. d = a.gcd(b) if not d.is_ground: break", "in zip(argterms, u)])) return (ans, result, n, const) def is_log_deriv_k_t_radical_in_field(fa,", "is a list of terms on the right hand side", "Special part of the denominator. case is one of {'exp',", "uses rref()). # # We therefore limit ourselves to constant", "integration elementary functions (see # Bronstein's book, page 255), so", "r]) M = Matrix(n + 1, m, lambda i, j:", "of the original equation. gamma = q G = [(q*ga).cancel(gd,", "b, Q, n, DE): \"\"\" Special Polynomial Differential Equation algorithm:", "where we are given Df, not f, use is_log_deriv_k_t_radical_in_field(). See", "pi in p]) if not all([ri.is_zero for ri in r]):", "on the constants. Given a derivation D on k[t], a,", "(b/d)*p = Sum(dj*rj) correspond to # solutions alpha*p + Sum(Sum(dj*aji)*betai)", "Transform fractions (fa, fd) in f into constant # polynomials", "if the result might potentially be wrong. raise NotImplementedError(\"Cannot work", "(resp. Dt/(t**2 + 1) in k, sqrt(-1) not in k),", "Sum(ci*gi) is in k[t] for c1, ..., cm in Const(k)", "a K-radical using the structure theorem approach. Because Poly does", "bd = frac_in(b + i*derivation(DE.t, DE)/DE.t, DE.t, field=True) with DecrementLevel(DE):", "A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto') if A is None:", "v))/gamma. ## Build combined relation matrix with m + u", "that t_i is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i", "is not yet implemented.\") # TODO: What should really be", "- 1) and (DE.case == 'base' or DE.d.degree() >= 2)):", "b*p = Sum(dj*rj) correspond to solutions # y = p/gamma", "in h], C def limited_integrate_reduce(fa, fd, G, DE): \"\"\" Simpler", "therefore limit ourselves to constant fields that are computable #", "cm, e1, ..., ev. C = Matrix([ni[:] for ni in", "{'exp', 'tan', 'primitive', \" \"'base'}, not %s.\" % case) nb", "'k' (not k[t]) if DE.case == 'primitive': with DecrementLevel(DE): ba,", "the ni in Const(k)^(m + v) are column # vectors", "ni in N]) # rows n1, ..., ns. return [hk.cancel(gamma,", "in constant_system # Also note: derivation(basic=True) calls cancel() return None", "weak_normalizer, bound_degree) from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor,", "square-free, deg(fa) < deg(fd), and # gcd(fa, fd) == 1.", "elements of k, # is a polynomial if and only", "n, DE) # g = [g1, ..., gv] in k[t]^v", "= 1, ..., u) in k[t]. if not V: #", "p, DE.t) nc = min([order_at(Ga, p, DE.t) - order_at(Gd, p,", "for qi in Q])) A = a B = b", "for i in DE.indices('log')] lhs = Matrix([E_part + L_part]) rhs", "exactly the transcendence degree of K over C(x). Furthermore, because", "expressions. \"\"\" if not A: return A, u Au =", "such that Sum(ci*pi, (i, 1, m)), for c1, ..., cm", "returns h1, ..., hr in k[t] and a matrix A", "in H] if not all(len(j) == i.degree() and all(k.is_Rational for", "ga, gd in G] a, (ba, bd), G, hn =", "b.quo_ground(a), [qi.quo_ground(a) for qi in q] if not b.is_zero and", "+ G h, A = param_rischDE(Fa, Fd, G, DE) V", "field=True) for fa, fd in f] else: # Base case.", "= A.col_join(Rm1) u = u.col_join(Matrix([um1])) return (A, u) def prde_spde(a,", "m, v) of the equation n*f == Dv/v + m*Dtheta/theta,", "necessary to pass the arguments of the logarithmic terms in", "the logarithmic derivative of a k(t)-radical. It differs from is_log_deriv_k_t_radical(fa,", "derivation(basic=True) calls cancel() return None else: if not all(i.is_Rational for", "p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z)) p = p.as_poly(DE.t)", "in DE.cases if i == 'tan'] or \\ (set([i for", "(H, A) def prde_no_cancel_b_small(b, Q, n, DE): \"\"\" Parametric Poly", "in k(t), V == [v1, ..., vm] in k(t)^m, and", "d = max([qi.degree(DE.t) for qi in Q]) if d >", "nb = order_at(ba, p, DE.t) - order_at(bd, p, DE.t) nc", "over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is", "M = Matrix(d, m, lambda i, j: Q[j].nth(i + 1))", "the structure theorems, change to NotImplementedError. raise ValueError(\"The %s case", "+ f*z = q*Sum(ci*Gi) # correspond to solutions y =", "[]] # Note: this might be empty, but everything below", "Differential Equation - No cancellation: deg(b) small enough. Given a", "in range(m): si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t) H[i] = H[i] +", "+ v # columns and entries in Const(k) such that", "logarithmic derivative of a k(t)-radical. case is one of {'primitive',", "Find relations between the qi. if all([qi.is_zero for qi in", "fd.degree(): # f is the logarithmic derivative in the base", "m) for some d1, ..., du in Const(k). # In", "L_K/C(x), implying in particular that E_K/C(x) and L_K/C(x) are disjoint.", "comment in constant_system # Also note: derivation(basic=True) calls cancel() return", "r + m)) B = B.row_join(zeros(B.rows, m)) C = I.row_join(zeros(m,", "return A, u Au = A.row_join(u) Au = Au.rref(simplify=cancel, normalize_last=False)[0]", "list(zip(*G)) d = reduce(lambda i, j: i.lcm(j), Gds) d =", "len(h) M = Matrix([wl[:m] + wl[-v:] for wl in W])", "r\"\"\" Checks if Df/f is the derivative of an element", "be written as the logarithmic derivative of a k(t) radical", "= Sum(ek*gk) where e1, ..., ev are in # Const(k)", "== F Ri = A[i, :] # Rm+1; m =", "else: M = Matrix(0, m, []) # No constraints, return", "# only if the sum is divisible by d. qq,", "(i, 1, m)). For case == 'primitive', k<t> == k[t],", "<= n and Dq + b*q == Sum(ci*qi, (i, 1,", "i / i i / i --- = Df. ---", "if f can be written as the logarithmic derivative of", "j], DE, basic=True)) for s in range(A.rows): # A[s, :]", "import zeros, eye from sympy.polys import Poly, lcm, cancel, sqf_list", "f weakly normalized with respect to t, return the tuple", "If case is 'auto', it will attempt to determine the", "k, is divisible by d if and only if (c1,", "= splitfactor(fd, DE) Gas, Gds = list(zip(*G)) gd = reduce(lambda", "to be extended to handle them. if DE.case in ['base',", "real and imaginary part of a rational function evaluated at", "is \" \"not yet implemented.\") else: # Liouvillian cases if", "not yet implemented.\") # TODO: What should really be done", "num_real = [value if key[0] % 4 == 0 else", "- fa*derivation(fd, DE)), fd*fa dfa, dfd = dfa.cancel(dfd, include=True) #", "poly_linear_constraints(p, d): \"\"\" Given p = [p1, ..., pm] in", "dn, ds = splitfactor(fd, DE) Gas, Gds = list(zip(*G)) gd", "in k(t)), and Q is a list of terms on", "for _, ri in Q]) M = Matrix(N + 1,", "pm] in k[t]^m and d in k[t], return q =", "= Matrix() else: dc = max([qi.degree(DE.t) for qi in Q])", "ans]) == u. This is useful for seeing exactly which", "= Poly(d, field=True) Q = [(ga*(d).quo(gd)).div(d) for ga, gd in", "# Only the trivial zero solution is possible. # Find", "== a*t + b with for some a, b in", "and b, q1, ..., qm in k[t] with deg(b) <", "is, given f, g1, ..., gm in K(t), to determine", "N = max([qi.degree(DE.t) for qi in q]) M = Matrix(N", "b, q1, ..., qm in k[t] with deg(b) < deg(D)", "# vectors generating the space of linear relations between #", "of degree at most n of a*Dq + b*q ==", "is solvable if and only if # Sum(ci*qi) == 0", "problem when integration elementary functions (see # Bronstein's book, page", "and # gcd(fa, fd) == 1. The last condition is", "for more information. The Parametric Risch Differential Equation problem is,", "in E_K/C(x) and deg(Dt_i) == 0 when t_i is in", "'base'. alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t) etaa, etad = frac_in(dcoeff,", "4 == 3 else 0 for key, value in bd.items()]", "cm, d1, ..., dr]) == 0 # Build combined constraint", "monomials of K over C(x)). If K is an elementary", "same function. Therefore, it is required to pass them as", "0. Elements of k(t) are tuples (a, d) with a", "any solution v in k(t), c1, ..., cm in C", "a*Dp + b*p == g + Sum(ci*vi, (i, 1, m)).", "a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for gia,", "a derivation D on k[t] and a in k[t], b", "& 2 for the limited integration problem. Given a derivation", "i in DE.indices('log')]) const = cancel(fa.as_expr()/fd.as_expr() - Add(*[Mul(i, j/n) for", "value in ba.items()] ba_real = sum(r for r in num_real)", "y0 in k with c1, ..., cm in Const(k) #", "DE.cases if i == 'tan'] or \\ (set([i for i", "same correctness problem exists in any # algorithm that uses", "find the log of that constant. argterms = ([DE.extargs[i] for", "# f = [f1, ..., fr] in k^r and B", "= cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z)) p = p.as_poly(DE.t) if", "% 4 == 1 else -value if key[0] % 4", "j) for i, j in ans]) argterms = ([DE.T[i] for", "q, n, DE): \"\"\"Polynomial solutions of a parametric Risch differential", "parametric logarithmic # derivative problem when integration elementary functions (see", "had better be 0 in that case. n = reduce(ilcm,", "= len(G) q, (fa, fd) = weak_normalizer(fa, fd, DE) #", "exactly what elements of k(t) produce u. This function uses", "degree at most n of a*Dq + b*q == Sum(ci*gi,", "the real and imaginary part of a rational function evaluated", "integer, g in k(t), V == [v1, ..., vm] in", "b*p == Sum(ci*gi, (i, 1, m)), (c1, ..., cm) is", "be the same as result up to a multiplicative #", "= -(derivation(hji, DE) - b*hji) H += hi # in", "ans is a list of tuples such that Mul(*[i**j for", "Our assumption here is that each monomial is recursively transcendental", "= bd.as_poly(gen).as_dict() ba = ba.as_poly(gen).as_dict() denom_real = [value if key[0]", "+ log(2) satisfy Dt == 1/x, because log(2) is constant.", "V == [v1, ..., vm] in k(t)^m, and for any", "wa, wd = frac_in((wa, wd), DE.t) A = parametric_log_deriv(pa, pd,", "c = a*h ba = a*fa - dn*derivation(h, DE)*fd ba,", "derivation automatically. See also ======== is_log_deriv_k_t_radical, is_deriv_k \"\"\" fa, fd", "a k(t)-radical. case is one of {'primitive', 'exp', 'tan', 'auto'}", "..., gm in k(t) with Dt/t in k (resp. Dt/(t**2", "bd = bd.as_poly(gen).as_dict() ba = ba.as_poly(gen).as_dict() denom_real = [value if", "in Const(k) and (c1, ..., cm, d1, ..., dr) is", "problem. Given a derivation D on k(t) and f, g1,", "# in k[t] if and only if p = Sum(ek*hk)", "to prevent a speed bottleneck from # calling some more", "the # constant field (actually, this same correctness problem exists", "also ======== is_log_deriv_k_t_radical, is_deriv_k \"\"\" fa, fd = fa.cancel(fd, include=True)", "_ = list(zip(*Q)) return (qs, M) def poly_linear_constraints(p, d): \"\"\"", "n in ZZ, and b, q1, ..., qm in k[t]", "hn*hs b -= (hn*derivation(hs, DE)).quo(hs) mu = min(order_at_oo(fa, fd, DE.t),", "'base': # TODO: we can use more efficient residue reduction", "1, v)). # Collect solution components. h = f +", "Sum(blk'*hk, (k, 1, v))/gamma, where k' = k + m", "# variable (the structure theorems should be able to completely", "is in L_K/C(x), implying in particular that E_K/C(x) and L_K/C(x)", "Given a derivation D on k(t) and f, g1, ...,", "if i == 'primitive']) - set(DE.indices('log'))): raise NotImplementedError(\"Real version of", "k[t] with deg(a) > 0 and gcd(a, b) == 1,", "for any solution c1, ..., cm in Const(k) and y", "A, _ = constant_system(M, zeros(d, 1), DE) else: # No", "1) and (DE.case == 'base' or DE.d.degree() >= 2)): return", "the logarithmic derivative of a k(t)-radical. ans is a list", "DE.case == 'primitive' or DE.case == 'exp': return prde_cancel_liouvillian(b, q,", "implemented.\") # TODO: What should really be done in this", "return None else: if not all(i.is_Rational for i in u):", "derivation D on k[t] and f, g1, ..., gm in", "in k[t] with gcd(a, b) == 1, and G =", "such that exp(const)*f == u. This is calculated by subtracting", "Risch Differential Equations parallel those for solving Risch Differential Equations.", "== 'base' or b.degree() > max(0, DE.d.degree() - 1)): return", "At least for prde_spde, it will always # terminate no", "\"\"\" dn, ds = splitfactor(fd, DE) Gas, Gds = list(zip(*G))", "u = A elif case == 'base': # TODO: we", "ci if they exist. For the algorithms here G is", "'tan'] or \\ (set([i for i in DE.cases if i", "= [h1, ..., hr] in k(t)^r and a matrix A", "gm in k(t) with f weakly normalized with respect to", "..., t_i-1)* } (i.e., the set of all indices of", "return None Q, v = Qv if Q.is_zero or v.is_zero:", "= A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A) # else: b is", "A = parametric_log_deriv(pa, pd, wa, wd, DE) if A is", "DE, basic=True).is_zero for i in u) or not A: #", "Matrix([[qi.TC() for qi in Q] + [S(0)]]) # The condition", "know that S1irr = Sirr, but there could be #", "qm in k[t] with deg(a) > 0 and gcd(a, b)", "+ ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen)) bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen)", "ratint() if not fd.is_sqf or fa.degree() >= fd.degree(): # f", "h in k[t], b in k<t>, G = [g1, ...,", "bd_imag = sum(r for r in denom_imag) num_real = [value", "q1, ..., qm in k[t] with deg(b) < deg(D) -", "+ m columns. r = len(f) I = eye(m) A", "N = hn.degree(DE.t) + hs.degree(DE.t) + max(0, 1 - DE.d.degree(DE.t)", "Q*M, v) if p.degree(DE.t) > B: return None c =", "trivial zero solution is possible. # Find relations between the", "Dy + b*y = Sum(ci*qi) is solvable if and only", "where ei == ci (i = 1, ..., m), when", "of tuples such that Add(*[i*j for i, j in ans])", "derivation(A[i, j], DE, basic=True)) Rm1 = Rm1.applyfunc(cancel) um1 = cancel(derivation(u[i],", "Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0.", "case == 'exp': wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True)", "in zip(beta, r)] alpha *= a # Solutions p of", "j: i.lcm(j), Gds, Poly(1, DE.t)) en, es = splitfactor(gd, DE)", "possibly when Dy + f*y == 0 # is solvable", "+ 1 # Issue 10798: i need not be a", "Fa = Poly(0, DE.t) Fd = Poly(1, DE.t) G =", "that Dy0 + b*y0 = Sum(ci*qi, (i, 1, m)) has", "given Df/f, not f, use is_deriv_k_in_field(). See also ======== is_log_deriv_k_t_radical_in_field,", "which case the sum is equal to Sum(fi*qi). M, _", "DE) B = parametric_log_deriv(betaa, betad, etaa, etad, DE) if A", "t0 = DE.t # k = k0(t0) ba, bd =", "# We are guaranteed to not have problems, # because", "H, A.col_join(B).col_join(C) def prde_cancel_liouvillian(b, Q, n, DE): \"\"\" Pg, 237.", "ba.cancel(fd, include=True) G = [(c*A).cancel(D, include=True) for A, D in", "q in k[t] satisfy deg(q) <= n and Dq +", "E i K/C(x) K/C(x) Where C = Const(K), L_K/C(x) =", "If the elements of u are not all constant #", "!= 0 and either D == d/dt or deg(b) >", "= Matrix(0, m, []) # Solutions of the original equation", "the integral explicitly contains an # expression in the constant", "is a solution of Mx == 0, and p and", "hj. A = Matrix(0, m, []) # Solutions of the", "u. This is calculated by dividing the arguments of one", "Parametric Risch Differential Equation - Special part of the denominator.", "raise NotImplementedError V = [(-a*hn*ga).cancel(gd, include=True) for ga, gd in", "from ratint() if not fd.is_sqf or fa.degree() >= fd.degree(): #", "B is not None: Q, s, z = A #", "u) def prde_spde(a, b, Q, n, DE): \"\"\" Special Polynomial", "si = Q[i].nth(N + DE.d.degree(DE.t) - 1)/(N*DE.d.LC()) sitn = Poly(si*DE.t**N,", "K is a derivative of an element of K or", "[(c*A).cancel(D, include=True) for A, D in G] return (a, (ba,", "them as indices to D (or T). E_args are the", "_, gd in G] En, Es = list(zip(*E)) c =", "is a matrix with m columns an entries in k.", "c1, ..., cm in Const(k) if and only if p", "of rows. Mqq = Matrix([qq]) # A single row. r", "is_log_deriv_k_t_radical(fa, fd, DE, Df=False) for any given fa, fd, DE", "k, is # divisible by d if and only if", "[i.as_numer_denom()[1] for i in u]) u *= n terms =", "indicates a bug. return None roots = [(i, i.real_roots()) for", "else: dc = max([qi.degree(DE.t) for qi in Q]) M =", "n, u != 0 such that n*b == Du/u. Either", "such that # (a/d)*Dp + (b/d)*p = Sum(dj*rj) has a", "simple n, s = splitfactor(fd, DE) if not s.is_one: pass", "# In that case, # Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi))", "A.col_join(zeros(B.rows, m).row_join(B)) return h, A def param_rischDE(fa, fd, G, DE):", "for i in range(B + 1, C + 1)] s", "None M, N = s[c1].as_numer_denom() nfmwa = N*fa*wd - M*wa*fd", "= Q[i].nth(N + DE.d.degree(DE.t) - 1)/(N*DE.d.LC()) sitn = Poly(si*DE.t**N, DE.t)", "= [q1, ..., qm] in k[t]^m, return h = [h1,", "Poly(1, DE.t)) else: raise ValueError(\"case must be one of {'exp',", "j)] + [Pow(b, e*j) for b, e in dterms]))) const", "this case.\") else: terms = ([DE.extargs[i] for i in DE.indices('exp')]", "the arguments of the logarithmic terms in L_args. To handle", "= solve(eqs, c1) if not s or not s[c1].is_Rational: #", "an element of K or the logarithmic derivative of a", "* i Df / i i / i --- =", "divisible by d if and only if ci = Sum(dj*aji)", "p.degree(DE.t) > B: return None c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC()) l", "ci (i = 1, ..., m), when # A*Matrix([c1, ...,", "..., cm in Const(k) if and only if # A*Matrix([c1,", "Df/f is the derivative of an element of k(t). a", "then # Sum(dj*fj, (j, 1, u)) + alpha*Sum(ek*gk, (k, 1,", "of a*Dp + b*p = Sum(dj*rj) correspond to solutions #", "in V if v[0] != 0] if not V: return", "eye from sympy.polys import Poly, lcm, cancel, sqf_list from sympy.polys.polymatrix", "explicitly contains an # expression in the constant field that", "condition is handled by cancel() above. return None # Note:", "!= 0] if not V: return None else: # we", "[DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')] L_part = [DE.D[i].as_expr() for", "(N*f - M*w) is not the logarithmic derivative of a", "Furthermore, because Const_D(K) == Const_D(C(x)) == C, deg(Dt_i) == 1", "DE) E = [splitfactor(gd, DE) for _, gd in G]", "'primitive', \" \"'base'}, not %s.\" % case) nb = order_at(ba,", "L_K, then T[i] == log(L_args[i])). This is needed to compute", "isn't too worrisome, because the heuristic handles most difficult #", "to compute the final answer u such that n*f ==", "T). E_args are the arguments of the hyperexponentials indexed by", "that a, h in k[t], b in k<t>, G =", "= Sum(dj*aji). try: # We try n=5. At least for", "- b*sitn if b.degree(DE.t) > 0: for i in range(m):", "a K-radical if and only if there are ri in", "= Sum(dj*aji) # (i = 1, ..., m) are then", "bd_real = sum(r for r in denom_real) bd_imag = sum(r", "solution c1, ..., cm in Const(k) and p in k[t]", "recursively using this same function. Therefore, it is required to", "K over C(x)), and E_K/C(x) = { i in {1,", "\"coefficients in this case.\") else: terms = ([DE.extargs[i] for i", "1)] s = solve(eqs, c1) if not s or not", "\"\"\" bd = bd.as_poly(gen).as_dict() ba = ba.as_poly(gen).as_dict() denom_real = [value", "H += hi # in the next loop instead of", "given field not in some (possibly unspecified extension) and \"in_field\"", "(ba, bd), G, h) def real_imag(ba, bd, gen): \"\"\" Helper", "F Ri = A[i, :] # Rm+1; m = A.rows", "# algorithm that uses rref()). # # We therefore limit", "(Matrix) such that either v has coefficients in C, in", ":] # Rm+1; m = A.rows Rm1 = Ri.applyfunc(lambda x:", "bd, gen): \"\"\" Helper function, to get the real and", "return correct results if cancel() cannot reduce # an identically", "normalized with respect to t, return the tuple (a, b,", "Poly(si*DE.t**N, DE.t) H[i] = H[i] + sitn Q[i] = Q[i]", "information. The Parametric Risch Differential Equation problem is, given f,", "in G] return (a, (ba, bd), G, h) def real_imag(ba,", "Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t) H[i] = H[i] + si Q[i] = Q[i]", "(i, 1, m)) has a solution y in k(t) with", "Algorithms for solving Parametric Risch Differential Equations. The methods used", "is such that log(const) + f == u. This is", "j)] + [Pow(b, e*j) for b, e in iterms]))) dcoeff,", "and Q is a list of terms on the right", "lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC()) l = fd.monic().lcm(wd.monic())*Poly(c, DE.t) ln, ls = splitfactor(l,", "this with the very similar special_denom() in rde.py if case", "only is ci = Sum(dj*aji) # (i = 1, ...,", "(n, m, v) of the equation n*f == Dv/v +", "same as u up to a multiplicative constant. This is", "j: i.lcm(j), (ds,) + Es) # lcm(ds, es1, ..., esm)", "linear relations between # c1, ..., cm, e1, ..., ev.", "[value if key[0] % 4 == 0 else -value if", "DE.t) elif case == 'tan': p = Poly(DE.t**2 + 1,", "if the sum is in k[t]. q, M = prde_linear_constraints(a,", "return (A, B, Q, R, n1), with Qq = [q1,", "correspond to # solutions alpha*p + Sum(Sum(dj*aji)*betai) of the initial", "some a_i in C(x)(t_1, ..., t_i-1)* } (i.e., the set", "cm in Const(k) if and only if p = Sum(dj*hj,", "k(t) with c1, ..., cm in Const(k) if and only", "..., cm in C of f == Dv + Sum(ci*wi,", "b == 0. # Dy + b*y = Sum(ci*qi) is", "that is identically zero, but cannot # be reduced to", "== Sum(ci*gi, (i, 1, m)), (c1, ..., cm) is a", "= len(Q) H = [Poly(0, DE.t)]*m for N in range(n,", "= A.row_join(vj) A = A.row_join(zeros(m, len(h))) A = A.col_join(zeros(B.rows, m).row_join(B))", "Dt + \\ r * i / i i /", "f1, ..., fm are elements of k, is # divisible", "= max(p.degree(DE.t), q.degree(DE.t)) if q.degree(DE.t) > B: eqs = [p.nth(i)", "than the integration # variable (the structure theorems should be", "..., dr ar in Const(k) and # B*Matrix([c1, ..., cm,", "== 1 when t_i is in E_K/C(x) and deg(Dt_i) ==", "case. n = reduce(ilcm, [i.as_numer_denom()[1] for _, i in residueterms],", "u != 0 such that n*b == Du/u. Either returns", "basic=True)/ derivation(A[i, j], DE, basic=True)) Rm1 = Rm1.applyfunc(cancel) um1 =", "matrix with m columns an entries in k. # Sum(fi*qi,", "theta over k(t), raises either NotImplementedError, in which case the", "DE) else: # No constraints on the hj. A =", "= c1 or Dummy('c1') p, a = fa.div(fd) q, b", "k(t)^m, return h = [h1, ..., hr] in k(t)^r and", "for i, j in zip(argterms, u): # We need to", "and y in k(t) of Dy + f*y == Sum(ci*gi,", "wd, DE): # TODO: Write the full algorithm using the", "in k[t], b in k<t>, G = [g1, ..., gm]", "NotImplementedError(\"prde_no_cancel_b_equal() is \" \"not yet implemented.\") else: # Liouvillian cases", "handles most difficult # cases. return A def is_deriv_k(fa, fd,", "in bd.items()] denom_imag = [value if key[0] % 4 ==", "useful for seeing exactly what elements of k(t) produce u.", "len(Q) H = [Poly(0, DE.t)]*m for N in range(n, 0,", "prde_no_cancel_b_large(b, q, n, DE) elif ((b.is_zero or b.degree() < DE.d.degree()", "v # columns and entries in Const(k) such that #", "have a polynomial solution # only if the sum is", "in which case s is False Ax == u has", "No constraints on the hj. A = Matrix(0, m, [])", "-= (hn*derivation(hs, DE)).quo(hs) mu = min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga, gd,", "if not fd.is_sqf or fa.degree() >= fd.degree(): # f is", "of a K-radical using the structure theorem approach. Because Poly", "= cancel(derivation(u[i], DE, basic=True)/ derivation(A[i, j], DE, basic=True)) for s", "case. \"\"\" # TODO: Merge this with the very similar", "hs.degree(DE.t) + max(0, 1 - DE.d.degree(DE.t) - mu) else: #", "u are not all constant # Note: See comment in", "DE).degree(DE.t) - 1) C = max(p.degree(DE.t), q.degree(DE.t)) if q.degree(DE.t) >", "of the equation (i.e., gi in k(t)), and Q is", "gn in k(t), return (a, b, h, N, g, V)", "has # a solution y0 in k with c1, ...,", "rref()). # # We therefore limit ourselves to constant fields", "f in K, Df/f is the derivative of a element", "t. Therefore, the term const is returned. const is such", "= sum(r for r in num_real) ba_imag = sum(r for", "# A is a matrix with m columns and entries", "both log(x) and log(2*x) == log(x) + log(2) satisfy Dt", "k[t] with gcd(a, b) == 1, and G = [g1,", "'exp': p = Poly(DE.t, DE.t) elif case == 'tan': p", "See comment in constant_system # Also note: derivation(basic=True) calls cancel()", "in K, Df is the logarithmic derivative of a K-radical", "such that:: --- --- Dt \\ r * Dt +", "difficult # cases. return A def is_deriv_k(fa, fd, DE): r\"\"\"", "[i.as_numer_denom()[1] for _, i in residueterms], S(1)) u = Mul(*[Pow(i,", "in this function.\" % case) else: raise ValueError(\"case must be", "of the initial equation with ci = Sum(dj*aji). try: #", "for _, ri in Q]): N = max([ri.degree(DE.t) for _,", "a solution (n, m, v) of the equation n*f ==", "= ba.cancel(fd, include=True) G = [(c*A).cancel(D, include=True) for A, D", "vj in V: A = A.row_join(vj) A = A.row_join(zeros(m, len(g)))", "is calculated by dividing the arguments of one logarithm from", "on k(t) and f, g1, ..., gn in k(t), return", "= Au[:, :-1], Au[:, -1] for j in range(A.cols): for", "= frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t) etaa, etad = frac_in(dcoeff, DE.t) A =", "else 0 for key, value in bd.items()] denom_imag = [value", "case if and only if # f = fa/fd, fd", "A with coefficients in Const(k) such that if c1, ...,", "else 0 for key, value in ba.items()] num_imag = [value", "in DE.indices('log')]) l = [] ld = [] for i,", "q in k<t> of a*Dq + b*q == Sum(ci*gi, (i,", "# [r1, ..., ru] # Solutions of (a/d)*Dp + (b/d)*p", "\" \"theorems with hypertangent support is not yet implemented.\") #", "d = i.as_numer_denom() icoeff, iterms = sqf_list(i) l.append(Mul(*([Pow(icoeff, j)] +", "= f + [alpha*gk for gk in g] # Build", "None: Q, m, z = A if Q == 1:", "= common_denom//n if common_denom != n*m: # Verify exact division", "will need to be updated to call bound_degree() # as", "- 1)*x**2), x). # But this is a limitation in", "is in E_K, then T[i] == exp(E_args[i])). This is needed", "..., ru] # Solutions of a*Dp + b*p = Sum(dj*rj)", "nonlinear or Liouvillian, but if this # changes, then this", "indicate that. f in k(t) can be written as the", "with c1, ..., cm in Const(k) if and only if", "some d1, ..., du in Const(k). # In that case,", "dr]) == 0 # Build combined constraint matrix with m", "1, ..., m) are then y = Sum(ek*hk, (k, 1,", "of k(t) are tuples (a, d) with a and d", "[qq1, ..., qqm] where qqi = qi.quo(d). # M is", "constants at this point V = M.nullspace() # V =", "to compute the final answer u such that Df/f ==", "== Sum(ci*qi, (i, 1, m)). Because M has entries in", "k[t] with deg(b) < deg(D) - 1 and either D", "(b/d)*p = Sum(dj*rj) has a solution p of degree <=", "all([qi.is_zero for qi in q]): return [], zeros(1, m) #", "V: A = A.row_join(vj) A = A.row_join(zeros(m, len(g))) A =", "for i in DE.indices('log')]) l = [] ld = []", "ru] # Solutions of a*Dp + b*p = Sum(dj*rj) correspond", "to solutions # y = p/gamma of the initial equation", "limit ourselves to constant fields that are computable # via", "constant # Note: See comment in constant_system # Also note:", "vm] in k(t)^m, and for any solution v in k(t),", "need to get around things like sqrt(x**2) != x #", "is such that exp(const)*f == u. This is calculated by", "-b.as_poly().LC()/DE.d.as_poly().LC()): raise NotImplementedError(\"prde_no_cancel_b_equal() is \" \"not yet implemented.\") else: #", "b: # I will have to verify, but I believe", "required to pass them as indices to D (or T).", "include=True) G = [(c*A).cancel(D, include=True) for A, D in G]", "# f - Dg will be in k[t] if f", "True and the solutions in C of Ax == u", "m)). \"\"\" m = len(p) q, r = zip(*[pi.div(d) for", "c1, ..., cm in Const(k) and p in k[t] of", "0 # These are the cases where we know that", "C and v is a vector (Matrix) such that either", "They generate # the space and form a basis except", "Dy0 + b*y0 = Sum(ci*qi, (i, 1, m)) has #", "which case the quotient is Sum(fi*qqi). A, _ = constant_system(M,", "final answer u such that n*f == Du/u. exp(f) will", "A.row_join(zeros(m, len(h))) A = A.col_join(zeros(B.rows, m).row_join(B)) ## Eliminate d1, ...,", "same as monomials. For example, both exp(x) and exp(x +", "\"\"\" fa, fd = fa.cancel(fd, include=True) # f must be", "ba[1] is the imaginary part and bd is the denominator", "if not s or not s[c1].is_Rational: # deg(q) <= B,", "fa*derivation(fd, DE)), fd*fa dfa, dfd = dfa.cancel(dfd, include=True) # Our", "= param_poly_rischDE(a, b, r, n, DE) # h = [h1,", "test case takes large time # even with n=5, and", "= constant_system(M, zeros(M.rows, 1), DE) # A is a matrix", "in the correctness of the Risch Algorithm is the computability", "= hn b = -derivation(hn, DE) N = 0 #", "recursively. # g, B = param_poly_rischDE(a.quo(d), b.quo(d), r, n, DE)", "theorems. # try: A = parametric_log_deriv_heu(fa, fd, wa, wd, DE)", "and a vector (Matrix) u with coefficients in K, returns", "Dt_i = Da_i/a_i, for some a_i in C(x)(t_1, ..., t_i-1)*", "# TODO: We treat this as 'no solution', until the", "E_K (i.e., if i is in E_K, then T[i] ==", "for r in denom_real) bd_imag = sum(r for r in", "Separates the even and odd power terms by checking the", "const(K) == F Ri = A[i, :] # Rm+1; m", "is recursively transcendental if len(DE.exts) != len(DE.D): if [i for", "def limited_integrate_reduce(fa, fd, G, DE): \"\"\" Simpler version of step", "# because case != 'base'. alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t)", "g] # Build combined relation matrix. A = -eye(m) for", "needed to compute the final answer u such that Df/f", "on k[t], n in ZZ, and b, q1, ..., qm", "a*Dq + b*q == Sum(ci*Gi, (i, 1, m)). \"\"\" dn,", "long as possible cumulating coefficient # and terms for the", "..., dr]) == 0. # Transform fractions (fa, fd) in", "A = A.col_join(zeros(B.rows, m).row_join(B)) return h, A def param_rischDE(fa, fd,", "Const(k) and p in k[t] of a*Dp + b*p ==", "in k, is divisible by d if and only if", "lcm(dn, en1, ..., enm) hn = c.gcd(c.diff(DE.t)) a = hn", "hypertangent \" \"cases have not yet been implemented\") # else:", "--- --- Dt \\ r * Dt + \\ r", "temporary bound is set. Eventually, it will be removed. #", "the previous equation. gamma *= hs g = A.gcd(B) a,", "V] # [r1, ..., ru] # Solutions of (a/d)*Dp +", "order_at(ba, p, DE.t) - order_at(bd, p, DE.t) nc = min([order_at(Ga,", "answer u such that Df/f == Du. log(f) will be", "((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen)) bd = (bd_real*bd_real +", "algorithm will need to be extended to handle them. if", "G, DE) V = A.nullspace() V = [v for v", "to pass the arguments of the exponential terms in E_args.", "solution c1, ..., cm in Const(k) and q in k<t>", "h], C def limited_integrate_reduce(fa, fd, G, DE): \"\"\" Simpler version", "B, no solution for c. return None M, N =", "= [(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd in G] h =", "a rational function evaluated at sqrt(-1) without actually evaluating it", "# via the cancel() function, in order to prevent a", "= Sum(ci*qi, (i, 1, m)) has # a solution y0", "(a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N - n), ..., gm*p**(N -", "etaa, etad = frac_in(dcoeff, DE.t) A = parametric_log_deriv(alphaa, alphad, etaa,", "L_part]) rhs = Matrix([dfa.as_expr()/dfd.as_expr()]) A, u = constant_system(lhs, rhs, DE)", "wrong. raise NotImplementedError(\"Cannot work with non-rational \" \"coefficients in this", "Du/u. Either returns (ans, u, n, const) or None, which", "element of k(t). ans is a list of tuples such", "dn*h c = a*h ba = a*fa - dn*derivation(h, DE)*fd", "D (or T). E_args are the arguments of the hyperexponentials", "q*Sum(ci*Gi) # correspond to solutions y = z/q of the", "DE) # except NotImplementedError: # Heuristic failed, we have to", "exact quotient Sum(aji*qqi). # Sum(ci*qi) is divisible by d if", "derivative of a K-radical if and only if there are", "DE.t) etaa, etad = frac_in(dcoeff, DE.t) A = parametric_log_deriv(alphaa, alphad,", "if and only if # A*Matrix([c1, ...,cm]) == 0. V", "# Sum(ci*gi) is in k[t] if and only is ci", "qqi = qi.quo(d). # M is a matrix with m", "D on k(t) and f, g1, ..., gn in k(t),", "= Sum(ci*qi) is solvable if and only if # Sum(ci*qi)", "of K is a derivative of an element of K", "deg(fd), and # gcd(fa, fd) == 1. The last condition", "is \" \"not yet implemented for is_log_deriv_k_t_radical_in_field()\") elif case in", "Sum(dj*hj, (j, 1, r)) where d1, ..., dr in Const(k)", "are # y = Sum(blk'*hk, (k, 1, v))/gamma, where k'", "== 2 else 0 for key, value in bd.items()] denom_imag", "space and form a basis except possibly when Dy +", "problem as a # parametric Risch DE problem Fa =", "gi. # M is a matrix with m columns and", "the same as u up to a additive constant. This", "return [], zeros(1, m) # No constraints. N = max([qi.degree(DE.t)", "return (a, (ba, bd), G, h) def real_imag(ba, bd, gen):", "the denominator. case is one of {'exp', 'tan', 'primitive'} for", "DE.t) A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto') if A is", "believe) this # problem will only crop up if the", "m) # No constraints. N = max([qi.degree(DE.t) for qi in", "n, DE) elif ((b.is_zero or b.degree() < DE.d.degree() - 1)", "divisible by d with exact quotient Sum(aji*qqi). # Sum(ci*qi) is", "fractions (fa, fd) in f into constant # polynomials fa/fd", "return h = [h1, ..., hr] in k[t]^r and a", "Note: this might be empty, but everything below should work", "\"coefficients in this case.\") else: n = reduce(ilcm, [i.as_numer_denom()[1] for", "ba = a*fa - dn*derivation(h, DE)*fd ba, bd = ba.cancel(fd,", "r * Dt + \\ r * i Df /", "Elements of k(t) are tuples (a, d) with a and", "Sum(dj*Sum(aji*qi)) # are the same as those of # (a/d)*Dp", "u). # Sum(ci*gi) is in k[t] if and only is", "M*wa*fd, fd*wd, DE, 'auto') if Qv is None: # (N*f", "from the other. Therefore, it is necessary to pass the", "= -derivation(hn, DE) N = 0 # These are the", "a # parametric Risch DE problem Fa = Poly(0, DE.t)", "[q1, ..., qm] in k[t]^m and a matrix M with", "differential field (K, D) with constant field C = Const(K),", "u1, r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z) u2, r2 = (wa*l.quo(wd)).div(z)", "= Au.applyfunc(cancel) A, u = Au[:, :-1], Au[:, -1] for", "when # A*Matrix([c1, ..., cm]) == 0 and # B*Matrix([c1,", "== 'exp': return prde_cancel_liouvillian(b, q, n, DE) else: raise NotImplementedError(\"non-linear", "Given a derivation D on k[t], a, b, in k[t]", "efficient residue reduction from ratint() if not fd.is_sqf or fa.degree()", "Mx = 0, in which case the quotient is Sum(ci*qi,", "in DE.indices('log')] lhs = Matrix([E_part + L_part]) rhs = Matrix([dfa.as_expr()/dfd.as_expr()])", "hi # in the next loop instead of Q it", "1), returns h1, ..., hr in k[t] and a matrix", "only if the sum is in k[t]. q, M =", "def prde_cancel_liouvillian(b, Q, n, DE): \"\"\" Pg, 237. \"\"\" H", "entries are Basic expressions. \"\"\" if not A: return A,", "m + r columns and entries in Const(k) = Const(k0)", "return h = [h1, ..., hr] in k(t)^r and a", "d1, ..., dr) is a solution of Ax == 0.", "approach, which says that for any f in K, Df", "else: # Liouvillian cases if DE.case == 'primitive' or DE.case", "M) def poly_linear_constraints(p, d): \"\"\" Given p = [p1, ...,", "if i == n: M = Ai else: M =", "du, e1, ..., ev]) == 0. # The solutions of", "Asj = A[s, j] A.row_op(s, lambda r, jj: cancel(r -", "Basic expressions. \"\"\" if not A: return A, u Au", "a column matrix with # entries aj1, ..., ajm in", "return q, M def constant_system(A, u, DE): \"\"\" Generate a", "term const is returned. const is such that exp(const)*f ==", "1: n = min(n, m) elif case == 'tan': dcoeff", "limited_integrate_reduce(fa, fd, G, DE): \"\"\" Simpler version of step 1", "x). # But this is a limitation in computer algebra", "# Coefficients of t^j (j > 0) in Sum(ci*qi) must", "or b.degree() > max(0, DE.d.degree() - 1)): return prde_no_cancel_b_large(b, q,", "and Dt_i = Da_i/a_i, for some a_i in C(x)(t_1, ...,", "them. if DE.case in ['base', 'primitive', 'exp', 'tan']: hs =", "Df: dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2, include=True)", "p is in k[t], and if t is nonlinear or", "== t. Therefore, the term const is returned. const is", "M = Matrix(0, m, []) # No constraints. return q,", "m = len(p) q, r = zip(*[pi.div(d) for pi in", "i == 'primitive']) - set(DE.indices('log'))): raise NotImplementedError(\"Real version of the", "i.degree() and all(k.is_Rational for k in j) for i, j", "S(1)) u = Mul(*[Pow(i, j*n) for i, j in residueterms])", "zeros(M.rows, 1), DE) # M is a matrix with m", "Yd = Poly(y_num, DE.t), Poly(y_den, DE.t) Y = Ya*Poly(1/Yd.LC(), DE.t),", "a*Dp + b*p = Sum(dj*rj) has a solution p of", "of an element of K or the logarithmic derivative of", "m = len(v) - r - 1 C = list(v[1:", "Const(K) such that Dy + f*y == Sum(ci*gi, (i, 1,", "= [g1, ..., gv] in k[t]^v and and B is", "is a list of tuples of factions of the terms", "problems, # because case != 'base'. betaa, alphaa, alphad =", "(the structure theorems should be able to completely decide these", "calculated by subtracting the arguments of one exponential from the", "danger is that we might # incorrectly prove that an", "nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd nfmwd = fd*wd Qv =", "[DE.extargs[i] for i in DE.indices('log')]) ans = list(zip(terms, u)) result", "% 4 == 2 else 0 for key, value in", "# Sum(dj*fj, (j, 1, u)) + alpha*Sum(ek*gk, (k, 1, v)).", "qi. if all([qi.is_zero for qi in q]): return [], zeros(1,", "case in ['primitive', 'base']: B = ba.quo(bd) return (a, B,", "i), ...], where i*log(a) is a term in the log-part", "Q[i] = Q[i] - derivation(si, DE) - b*si if all(qi.is_zero", "(n, u) elif case == 'tan': raise NotImplementedError(\"The hypertangent case", "B, h in k[t], GG = [gg1, ..., ggm] in", "when t is linear and non-Liouvillian, which for the transcendental", "Poly Risch Differential Equation - No cancellation: deg(b) small enough.", "(j, 1, r)), where d1, ..., dr in Const(k) and", "produce u. This function uses the structure theorem approach, which", "+ b*q == Sum(ci*qi, (i, 1, m)), then q =", "a and d in k[t]. \"\"\" m = len(G) q,", "by cancel(). Therefore, a careful user can avoid this #", "None and B is not None: Q, s, z =", "the case where we are given Df/f, not f, use", "each wl is a column matrix with # entries blk", "Da_i/a_i, for some a_i in C(x)(t_1, ..., t_i-1)* } (i.e.,", "A is not None and B is not None: Q,", "terms wrt mod 4. Returns a tuple (ba[0], ba[1], bd)", "This is useful for seeing exactly what elements of k(t)", "== const(K) == F Ri = A[i, :] # Rm+1;", "cancel(fa.as_expr()/fd.as_expr() - Add(*[Mul(i, j/n) for i, j in zip(argterms, u)]))", ":] - A[s, i]*A[:, m+1] Asj = A[s, j] A.row_op(s,", "or fa.degree() >= fd.degree(): # f is the logarithmic derivative", "= max([ri.degree() for ri in r]) M = Matrix(n +", "Checks if Df is the logarithmic derivative of a k(t)-radical.", "a, b, h in k[t], N is a non-negative integer,", "etad, DE) if A is not None and B is", "it will be removed. # the currently added test case", "# (i = 1, ..., m) for some d1, ...,", "k such that Sum(ci*pi, (i, 1, m)), for c1, ...,", "dc = max([qi.degree(DE.t) for qi in Q]) M = Matrix(dc", "a basis except possibly when Dy + f*y == 0", "= Q + Fi return (H, M) def param_poly_rischDE(a, b,", "eqs = [p.nth(i) - c1*q.nth(i) for i in range(B +", "= i.as_numer_denom() icoeff, iterms = sqf_list(i) l.append(Mul(*([Pow(icoeff, j)] + [Pow(b,", "of an element of k(t). ans is a list of", "+ [DE.extargs[i] for i in DE.indices('log')]) ans = list(zip(terms, u))", "[splitfactor(gd, DE) for _, gd in G] En, Es =", "# The vectors (bl1, ..., blm) generate the space of", "dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd*fa dfa, dfd", "in residueterms], S(1)) u = Mul(*[Pow(i, j*n) for i, j", "line answers that: # Assuming that we can solve such", "in k, deg(qi) < deg(Dt) t = DE.t if DE.case", "alpha *= a # Solutions p of a*Dp + b*p", "not supported \" \"in the structure theorems.\") E_part = [DE.D[i].quo(Poly(DE.T[i],", "equation Dz + f*z = q*Sum(ci*Gi) # correspond to solutions", "transcendence degree of K over C(x). Furthermore, because Const_D(K) ==", "Matrix from sympy.solvers import solve def prde_normal_denom(fa, fd, G, DE):", "Df/f == Du, or None, which means that Df/f is", "non-negative integer, g in k(t), V == [v1, ..., vm]", "q, (fa, fd) = weak_normalizer(fa, fd, DE) # Solutions of", "bd, Q0, DE) # f = [f1, ..., fr] in", "at sqrt(-1) Separates the even and odd power terms by", "sitn = Poly(si*DE.t**N, DE.t) H[i] = H[i] + sitn Q[i]", "A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A) # else: b", "cm in Const(k) and q in k[t] satisfy deg(q) <=", "Mul(*[i**j for i, j in ans]) == u. This is", "h in k[t], GG = [gg1, ..., ggm] in k(t)^m,", "# Note: this might be empty, but everything below should", "for i, j in zip(argterms, u)])) return (ans, result, n,", "if not all(len(j) == i.degree() and all(k.is_Rational for k in", "else: if not all(i.is_Rational for i in u): # TODO:", "import (order_at, order_at_oo, weak_normalizer, bound_degree) from sympy.integrals.risch import (gcdex_diophantine, frac_in,", "k[t] relatively prime, and q = [q1, ..., qm] in", "None if case == 'auto': case = DE.case if case", "= bound_degree(a, b, r, DE, parametric=True) except NotImplementedError: # A", "Q[i].nth(N + db)/b.LC() sitn = Poly(si*DE.t**N, DE.t) H[i] = H[i]", "i, j: Q[j][1].nth(i)) else: M = Matrix(0, m, []) #", "max([qi.degree(DE.t) for qi in Q]) M = Matrix(dc + 1,", "L_K (i.e., if i is in L_K, then T[i] ==", "i in u): # TODO: But maybe we can tell", "c1, ..., cm in Const(k) and q in k[t] of", "and the ci satisfy a*Dp + b*p == Sum(ci*qi, (i,", "_, ri in Q]): N = max([ri.degree(DE.t) for _, ri", "returns None, in which case it has proven that no", "cm) is a solution of Mx = 0, in which", "= [(Mbeta*vj)[0] for vj in V] # [f1, ..., fu]", "Sum(aji*qqi). # Sum(ci*qi) is divisible by d if and only", "added test case takes large time # even with n=5,", "case is 'auto', it will attempt to determine the type", "reduction from ratint() if not fd.is_sqf or fa.degree() >= fd.degree():", "that it finds the solution in the given field not", "ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen)) bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen) return", "DE.t) if recognize_log_derivative(2*betaa, betad, DE): A = parametric_log_deriv(alphaa, alphad, etaa,", "i*log(a) is a term in the log-part of the integral", "blk (k = 1, ..., m + u + v)", "corresponding solutions are # y = Sum(blk'*hk, (k, 1, v))/gamma,", "M.nullspace() # V = [v1, ..., vu] where each vj", "of a element of K if and only if there", "Q0, DE) # f = [f1, ..., fr] in k^r", "in rde.py if case == 'auto': case = DE.case if", "ru] # Solutions of (a/d)*Dp + (b/d)*p = Sum(dj*rj) correspond", "for r in denom_imag) num_real = [value if key[0] %", "such that Add(*[i*j for i, j in ans]) == u.", "equation (i.e., qi in k[t]). See the docstring of each", "f*y == Sum(ci*Gi) exists. They generate # the space and", "solutions q = p/hs of the previous equation. gamma *=", "{ i in {1, ..., n} such that t_i is", "sum(r for r in denom_imag) num_real = [value if key[0]", "Sum(ci*qi, (i, 1, m)). \"\"\" m = len(p) q, r", "= Matrix([qq]) # A single row. r = [(Mqq*vj)[0] for", "const is returned. const is such that log(const) + f", "Const(k). # Sum(aji*gi) is in k[t] and equal to Sum(aji*qi)", "A = Matrix(0, m, []) # Solutions of the original", "ggm] in k(t)^m, and for any solution c1, ..., cm", "given f, g1, ..., gm in K(t), to determine if", "e in iterms]))) dcoeff, dterms = sqf_list(d) ld.append(Mul(*([Pow(dcoeff, j)] +", "fa.cancel(fd, include=True) # f must be simple n, s =", "and a matrix M with entries in k such that", "b*p = Sum(dj*rj) has a solution p of degree <=", "Build combined constraint matrix with m + r + m", "for ci = Sum(dj*aji) # (i = 1, ..., m)", "m*Dtheta/theta, with v in k(t)* and n, m in ZZ", "the structure # theorem version of parametric_log_deriv is implemented. return", "if b.degree(DE.t) > 0: for i in range(m): si =", "in this case. This should never happen for the #", "qi in Q]) if d > 0: M = Matrix(d,", "B = param_poly_rischDE(a.quo(d), b.quo(d), r, n, DE) # g =", "range(A.cols): for i in range(A.rows): if A[i, j].has(*DE.T): # This", "with large n's. n = 5 h, B = param_poly_rischDE(a,", "any solution c1, ..., cm in Const(k) and y in", "A if a.is_ground: # Normalization: a = 1. a =", "# entries aj1, ..., ajm in Const(k). # Sum(aji*gi) is", "# TODO: This could be implemented more efficiently. # It", "is that each monomial is recursively transcendental if len(DE.exts) !=", "computable # via the cancel() function, in order to prevent", "= hn.degree(DE.t) + hs.degree(DE.t) + max(0, 1 - DE.d.degree(DE.t) -", "return A, but this has # the minimum number of", "logarithm from the other. Therefore, it is necessary to pass", "Au[:, :-1], Au[:, -1] for j in range(A.cols): for i", "given Df, not f, use is_log_deriv_k_t_radical_in_field(). See also ======== is_log_deriv_k_t_radical_in_field,", "fr] in k^r and B is a matrix with #", "f respolys, residues = list(zip(*roots)) or [[], []] # Note:", "# log(2)/log(3). Also, there should be an option to continue", "N = M.nullspace() # N = [n1, ..., ns] where", "in ans]) == u. This is useful for seeing exactly", "coefficients in C and v is a vector (Matrix) such", "k<t> of a*Dq + b*q = Sum(ci*Gi) correspond # to", "normalized equation Dz + f*z = q*Sum(ci*Gi) # correspond to", "and write tests c1 = c1 or Dummy('c1') p, a", "exactly which elements of k(t) produce u. This function uses", "-value if key[0] % 4 == 2 else 0 for", "be zero. d = max([qi.degree(DE.t) for qi in Q]) if", "1, r)) where # d1, ..., dr ar in Const(k)", "T[i] == exp(E_args[i])). This is needed to compute the final", "monomial theta over k(t), raises either NotImplementedError, in which case", "in Const(k) = Const(k0) # such that Dy0 + b*y0", "derivation D on k(t) and f, g1, ..., gn in", "log-part of the integral # of f respolys, residues =", "..., gm] in k(t)^m, and for any solution c1, ...,", "behave the same as monomials. For example, both log(x) and", "d1*f1 for f1 = 1 and any d1 in Const(k)", "which a solution of # the equation Dy + f*y", "DE.d.degree(DE.t)): return None if case == 'auto': case = DE.case", "for key, value in ba.items()] num_imag = [value if key[0]", "in f] else: # Base case. Dy == 0 for", "all indices of logarithmic monomials of K over C(x)), and", "Q, n, DE): \"\"\" Special Polynomial Differential Equation algorithm: Parametric", "..., hv] in k[t]^v and and B is a matrix", "of {'primitive', 'exp', 'tan', \" \"'base', 'auto'}, not %s\" %", "deg(a) > 0 # Iterate SPDE as long as possible", "bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen) return (ba[0], ba[1], bd) def", "a solution of Ax == 0. Elements of k(t) are", "u.row_op(s, lambda r, jj: cancel(r - Asj*um1)) A = A.col_join(Rm1)", "-1 M = Matrix() else: dc = max([qi.degree(DE.t) for qi", "weak_normalizer(fa, fd, DE) # Solutions of the weakly normalized equation", "the same as monomials. For example, both log(x) and log(2*x)", "k[t]^r and a matrix A with m + r columns", "g in k(t), V == [v1, ..., vm] in k(t)^m,", "because case != 'base'. betaa, alphaa, alphad = real_imag(ba, bd*a,", "wa.div(wd) B = max(0, derivation(DE.t, DE).degree(DE.t) - 1) C =", "C def limited_integrate_reduce(fa, fd, G, DE): \"\"\" Simpler version of", "DecrementLevel? Below line answers that: # Assuming that we can", "b in k(t) can be written as the logarithmic derivative", "logarithmic derivative of a K-radical if and only if there", "n = reduce(ilcm, [i.as_numer_denom()[1] for _, i in residueterms], S(1))", "simplification function (rational function # coefficients will fall into this", "qi in Q): dc = -1 M = zeros(0, 2)", "more complex simplification function (rational function # coefficients will fall", "to Sum(aji*qi) (j = 1, ..., u). # Sum(ci*gi) is", "= splitfactor(fd, DE) if not s.is_one: pass z = z", "to D (or T). L_args are the arguments of the", "in L_args. To handle the case where we are given", "solving the parametric logarithmic # derivative problem when integration elementary", "is not None: Q, m, z = A if Q", "of {'primitive', 'exp', 'tan', 'auto'} for the primitive, hyperexponential, and", "# an identically zero expression to 0. The danger is", "deg(qi) < deg(Dt) t = DE.t if DE.case != 'base':", "DE): \"\"\" Special Polynomial Differential Equation algorithm: Parametric Version. Given", "..., du in Const(k). # In that case, # Sum(ci*gi)", "of Mx == 0, and p and the ci satisfy", "not the logarithmic derivative of a k(t)-radical. return None Q,", "and much longer with large n's. n = 5 h,", "Sum(ci*Gi) correspond # to solutions z = q/hn of the", "Sum(ci*pi, (i, 1, m)), for c1, ..., cm in k,", "+ b*y0 = Sum(ci*qi, (i, 1, m)) has # a", "Df=False) for any given fa, fd, DE in that it", "of original solutions. alpha, beta = 1, [0]*m while n", "the arguments of one exponential from the other. Therefore, it", "u[s] - A[s, j]*u[m+1 u.row_op(s, lambda r, jj: cancel(r -", "dfa.cancel(dfd, include=True) # Our assumption here is that each monomial", "return (A, B, Qq, R, n1) def prde_no_cancel_b_large(b, Q, n,", "are ri in QQ such that:: --- --- Dt \\", "are also nonlinear or Liouvillian, but if this # changes,", "i]*h[i][0].as_expr()/h[i][1].as_expr() \\ for i in range(r)]) y_num, y_den = y.as_numer_denom()", "c = eye(m) A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A)", "fd, DE, case='auto', z=None): \"\"\" Checks if f can be", "What should really be done in this case? raise NotImplementedError(\"Nonelementary", "- c1*q.nth(i) for i in range(B + 1, C +", "(ba, bd), G, hn = prde_normal_denom(fa, fd, G, DE) #", "b, q, n, DE): \"\"\"Polynomial solutions of a parametric Risch", "return (ans, result, const) def is_log_deriv_k_t_radical(fa, fd, DE, Df=True): r\"\"\"", "= parametric_log_deriv(betaa, betad, etaa, etad, DE) if A is not", "2) else: dc = max([qi.degree(DE.t) for qi in Q]) M", "return None M, N = s[c1].as_numer_denom() nfmwa = N.as_poly(DE.t)*fa*wd -", "+ \\ r * i Df / i i /", "sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative)", "not all([ri.is_zero for _, ri in Q]): N = max([ri.degree(DE.t)", "Risch Algorithm is the computability of the # constant field", "= { i in {1, ..., n} such that t_i", "not in k), a != 0, and gcd(a, t) ==", "and f, g1, ..., gn in k(t), return (a, b,", "point V = M.nullspace() # V = [v1, ..., vu]", "expressions. \"\"\" m = len(G) Gns, Gds = list(zip(*G)) d", "i in [j for _, j in residueterms]] + [n],", "..., m + u + v) in Const(k). # The", "with the sorts of expressions that # appear in his", "ga, gd in G])) # So far, all the above", "is a matrix with m columns and entries in k.", "blm) generate the space of those # constant families (c1,", "with m + u + v columns. A = -eye(m)", "Sum(ci*qi) is divisible by d if and only if ci", "!= 0. If this heuristic fails, the structure theorem approach", "\" \"'base'}, not %s.\" % case) nb = order_at(ba, p,", "# Verify exact division raise ValueError(\"Inexact division\") u = cancel(u**m*Mul(*[Pow(i,", "with m columns an entries in k. # Sum(fi*qi, (i,", "..., cm, d1, ..., dr]) == 0 # Build combined", "e1, ..., ev]) == 0. # The solutions of the", "A single row. r = [(Mq*vj)[0] for vj in V]", "if and only is ci = Sum(dj*aji) # (i =", "= Sum(dj*rj) # where rj = Sum(aji*qi) (j = 1,", "print_function, division from sympy.core import Dummy, ilcm, Add, Mul, Pow,", "DE.t, cancel=True) wa, wd = frac_in((wa, wd), DE.t) A =", "None n, u = A elif case == 'base': #", "hv] in k[t]^v and and B is a matrix with", "exact division raise ValueError(\"Inexact division\") u = cancel(u**m*Mul(*[Pow(i, j) for", "c1, ..., cm in Const(k) if and only if y", "using this same function. Therefore, it is required to pass", "Const(k) such that if c1, ..., cm in Const(k) and", "_, ri in Q]) M = Matrix(N + 1, m,", "DE.d.degree(DE.t) - 1)/(N*DE.d.LC()) sitn = Poly(si*DE.t**N, DE.t) H[i] = H[i]", "Dt == t. Therefore, the term const is returned. const", "M = Ai else: M = Ai.col_join(M.row_join(zeros(M.rows, ri))) Fi, hi", "ZZ and u in k(t) with n, u != 0", "+ b*q == Sum(ci*Gi, (i, 1, m)). \"\"\" dn, ds", "0. # The solutions of the original equation are then", "not the derivative of an element of k(t). ans is", "integral explicitly contains an # expression in the constant field", "ns. return [hk.cancel(gamma, include=True) for hk in h], C def", "t) == 1 (resp. gcd(a, t**2 + 1) == 1),", "D on k[t] and f, g1, ..., gm in k(t)", "include=True) # f must be simple n, s = splitfactor(fd,", "Either returns (ans, u, n, const) or None, which means", "fd, wa, wd, DE): # TODO: Write the full algorithm", "this function (DE.case == 'other_linear'). N = hn.degree(DE.t) + hs.degree(DE.t)", "k<t> satisfies a*Dq + b*q == Sum(ci*Gi, (i, 1, m)).", "log of that constant. argterms = ([DE.extargs[i] for i in", "parametric_log_deriv(pa, pd, wa, wd, DE) if A is None: return", "\"\"\" Special Polynomial Differential Equation algorithm: Parametric Version. Given a", "0 # There are no constraints on d1. # Coefficients", "set. Eventually, it will be removed. # the currently added", "the original equation are then # Sum(dj*fj, (j, 1, u))", "None M, N = s[c1].as_numer_denom() nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd", "e1, ..., ev are in # Const(k) and B*Matrix([d1, ...,", "(i, 1, m)). \"\"\" dn, ds = splitfactor(fd, DE) Gas,", "linear and non-Liouvillian, which for the transcendental case, implies that", "Gd in G] h = pn # (a*p**N, (b +", "+ si Q[i] = Q[i] - derivation(si, DE) - b*si", "uses the structure theorem approach, which says that for any", "si = Q[i].nth(N + db)/b.LC() sitn = Poly(si*DE.t**N, DE.t) H[i]", "logarithmic derivative of a k(t)-radical return None if p.degree(DE.t) >=", "p and the ci satisfy a*Dp + b*p == g", "not be a polynomial i, d = i.as_numer_denom() icoeff, iterms", "if common_denom != n*m: # Verify exact division raise ValueError(\"Inexact", "is a matrix with # m + r columns and", "m + u. v = len(h) M = Matrix([wl[:m] +", "== 'auto': case = DE.case if case == 'exp': p", "where B is a Matrix with coefficients in C and", "some more complex simplification function (rational function # coefficients will", "it will attempt to determine the type of the derivation", "g + Sum(ci*vi, (i, 1, m)). Furthermore, if S1irr ==", "e*j) for b, e in dterms]))) const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld)) return", "k(t), f in k(t), and G = [G1, ..., Gm]", "[(a, i), ...], where i*log(a) is a term in the", "a, b in k[t] relatively prime, and q = [q1,", "and entries in Const(k) such that a*Dp + b*p =", "structure theorem approach, which says that for any f in", "except NotImplementedError: # Heuristic failed, we have to use the", "if not all(i.is_Rational for i in u): # TODO: But", "..., dr]) == 0 # Build combined constraint matrix with", "m)). Given a derivation D in k(t), f in k(t),", "i in residues[j]] # TODO: finish writing this and write", "a system for the constant solutions. Given a differential field", "of the initial equation. d = a.gcd(b) if not d.is_ground:", "- ba_real*bd_imag).as_poly(gen)) bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen) return (ba[0], ba[1],", "a in k[t], b in k<t>, and g1, ..., gm", "a*Dp + b*p = Sum(ci*qi) may have a polynomial solution", "if an element a of K is a derivative of", "alphad etaa, etad = frac_in(dcoeff, DE.t) if recognize_log_derivative(2*betaa, betad, DE):", "computability of the # constant field (actually, this same correctness", "in Q])) A = a B = b + derivation(a,", "k[t]. q, M = prde_linear_constraints(a, b, g, DE) # q", "Const(k). # In that case, # Sum(ci*gi) = Sum(ci*qi) =", "bd, Qy, DE) fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True) for fa,", "and entries in Const(k) such that # (a/d)*Dp + (b/d)*p", "s[c1].as_numer_denom() nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd nfmwd = fd*wd Qv", "v, or v has a non-constant coefficient, in which case", "solutions. Given a differential field (K, D) with constant field", "as monomials. For example, both exp(x) and exp(x + 1)", "hr in k[t] and a matrix A with coefficients in", "a hyperexponential monomial theta over k(t), raises either NotImplementedError, in", "some (possibly unspecified extension) and \"in_field\" with the function name", "I = eye(m) A = A.row_join(zeros(A.rows, r + m)) B", "are disjoint. The sets L_K/C(x) and E_K/C(x) must, by their", "space of those # constant families (c1, ..., cm) for", "V if v[0] != 0] if not V: return None", "+ 1) == 1), return the tuple (A, B, GG,", "k[t] satisfies A*Dr + B*r == Sum(ci*ggi, (i, 1, m)).", "f = [Poly(fa.as_expr()/fd.as_expr(), t, field=True) for fa, fd in f]", "..., 1] for i in range(m): si = Q[i].nth(N +", "return None if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)): return None if", "implemented. return None u1, r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z) u2,", "logarithmic derivative of a k(t)-radical. return None Q, v =", "instead of Q it has # to be Q +", "relatively prime a, b, q, r, n = prde_spde(a, b,", "= b.degree(DE.t) m = len(Q) H = [Poly(0, DE.t)]*m for", "ba[0] is real part of the numerator ba[1] is the", "Matrix([q]) # A single row. r = [(Mq*vj)[0] for vj", "both exp(x) and exp(x + 1) == E*exp(x) satisfy Dt", "eye(m) # Could return A, but this has # the", "A.row_join(u) Au = Au.rref(simplify=cancel, normalize_last=False)[0] # Warning: This will NOT", "n1 = n - a.degree(DE.t) return (A, B, Qq, R,", "dfa, dfd = fa, fd # Our assumption here is", "M) def param_poly_rischDE(a, b, q, n, DE): \"\"\"Polynomial solutions of", "k(t), V == [v1, ..., vm] in k(t)^m, and for", "= DE.t if DE.case != 'base': with DecrementLevel(DE): t0 =", "p = v*h is in k<t>, and p and the", "r = len(h) m = len(v) - r - 1", "= Matrix(dc + 1, m, lambda i, j: Q[j].nth(i)) A,", "pa, pd = frac_in(p, DE.t, cancel=True) wa, wd = frac_in((wa,", "en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t))) a = dn*h c = a*h ba = a*fa", "+ 1, m, lambda i, j: Q[j][1].nth(i)) else: M =", "at most n1 and satisfies A*Dp + B*p == Sum(ci*qi,", "..., qm] and R = [r1, ..., rm], such that", "[]) # Solutions of the original equation are # y", "Sum(ci*qi, (i, 1, m)) has # a solution y0 in", "Matrix(0, m, []) # No constraints. return q, M def", "is one of {'primitive', 'exp', 'tan', 'auto'} for the primitive,", "be the same as u up to a multiplicative constant.", "this same function. Therefore, it is required to pass them", "as u up to a multiplicative constant. This is because", "order_at(bd, p, DE.t) nc = min([order_at(Ga, p, DE.t) - order_at(Gd,", "# from eq. on top of p.238 (unnumbered) for j", "key[0] % 4 == 3 else 0 for key, value", "fd, DE in that it finds the solution in the", "= [r1, ..., rm], such that for any solution c1,", "Matrix of Basic expressions. \"\"\" m = len(G) Gns, Gds", "This is useful for seeing exactly which elements of k(t)", "terms = ([DE.extargs[i] for i in DE.indices('exp')] + [DE.T[i] for", "in k[t], and if t is nonlinear or Liouvillian over", "in L_K/C(x), implying in particular that E_K/C(x) and L_K/C(x) are", "a*Dq + b*q == Sum(ci*gi, (i, 1, m)), r ==", "the constant solutions. Given a differential field (K, D) with", "Q.is_zero or v.is_zero: return None return (Q*N, Q*M, v) if", "is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE) if Qv is None: # (N*f -", "nonelementary (such as # risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x).", "are elements of k, is # divisible by d if", "the solutions are # y = d1*f1 for f1 =", "f in k(t), and a hyperexponential monomial theta over k(t),", "[q1, ..., qm] in k[t]^m, return h = [h1, ...,", "get the real and imaginary part of a rational function", "q, r, n = prde_spde(a, b, q, n, DE) beta", "= [p.nth(i) - c1*q.nth(i) for i in range(B + 1,", "all([ri.is_zero for ri in r]): n = max([ri.degree() for ri", "is the imaginary part and bd is the denominator of", "of the rational function. \"\"\" bd = bd.as_poly(gen).as_dict() ba =", "denom_real) bd_imag = sum(r for r in denom_imag) num_real =", "DE, basic=True)/ derivation(A[i, j], DE, basic=True)) Rm1 = Rm1.applyfunc(cancel) um1", "tn) == const(K) == F Ri = A[i, :] #", "no solution for c. return None M, N = s[c1].as_numer_denom()", "for key, value in bd.items()] bd_real = sum(r for r", "None u1, r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z) u2, r2 =", "in V] # [r1, ..., ru] # Solutions of (a/d)*Dp", "# and terms for the recovery of original solutions. alpha,", "in k^r and B is a matrix with # m", "with DecrementLevel(DE): ba, bd = frac_in(b, DE.t, field=True) for i", "..., gv] in k[t]^v and and B is a matrix", "the solutions in C of Ax == u are exactly", "import reduce, range from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer, bound_degree)", "in L i in E i K/C(x) K/C(x) Where C", "..., esm) a = hn*hs b -= (hn*derivation(hs, DE)).quo(hs) mu", "dfd = dfa.cancel(dfd, include=True) # Our assumption here is that", "there could be # others, and this algorithm will need", "y = d1*f1 for f1 = 1 and any d1", "and equal to Sum(aji*qi) (j = 1, ..., u). #", "(a/d)*Dp + (b/d)*p = Sum(dj*rj) # where rj = Sum(aji*qqi).", "if key[0] % 4 == 1 else -value if key[0]", "always # terminate no matter what n is. n =", "that # a*Dp + b*p = Sum(dj*rj) has a solution", "u) in k[t]. if not V: # No non-trivial solution", "by dividing the arguments of one logarithm from the other.", "M*w) is not the logarithmic derivative of a k(t)-radical. return", "exp(E_args[i])). This is needed to compute the final answer u", "C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and", "is a matrix with u + v # columns and", "A.rows Rm1 = Ri.applyfunc(lambda x: derivation(x, DE, basic=True)/ derivation(A[i, j],", "m).row_join(B)) return h, A def param_rischDE(fa, fd, G, DE): \"\"\"", "where each vj is a column matrix with # entries", "to a additive constant. This is because they will both", "== Du/u. Either returns (ans, u, n, const) or None,", "j].has(*DE.T): # This assumes that const(F(t0, ..., tn) == const(K)", "(i, 1, m)), (c1, ..., cm) is a solution of", "matrix with m columns and entries in Const(k). # Sum(ci*qqi)", "= sum(r for r in num_imag) ba = ((ba_real*bd_real +", "b) == 1, and G = [g1, ..., gm] in", "# Assuming that we can solve such problems over 'k'", "= sqf_list(i) l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for b, e", "..., fm are elements of k, is # divisible by", "parametric=True) except NotImplementedError: # A temporary bound is set. Eventually,", "a.degree(DE.t) return (A, B, Qq, R, n1) def prde_no_cancel_b_large(b, Q,", "r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z) u2, r2 = (wa*l.quo(wd)).div(z) #", "over C(x). Furthermore, because Const_D(K) == Const_D(C(x)) == C, deg(Dt_i)", "i i / i --- = --. --- --- t", "solution of # the equation Dy + f*y == Sum(ci*Gi)", "decide these # problems in the integration variable). Au =", "G] a, (ba, bd), G, hn = prde_normal_denom(fa, fd, G,", "wrt mod 4. Returns a tuple (ba[0], ba[1], bd) where", "the other. Therefore, it is necessary to pass the arguments", "d/dt or deg(D) >= 2, returns h1, ..., hr in", "if c1, ..., cm in Const(k) and q in k[t]", "in k<t> satisfies a*Dq + b*q == Sum(ci*Gi, (i, 1,", "max(1, DE.d.degree(DE.t)): return None if case == 'auto': case =", "M is a matrix with m columns and entries in", "# So far, all the above are also nonlinear or", "(c1, ..., cm) is a solution of Mx = 0,", "Mbeta = Matrix([beta]) f = [(Mbeta*vj)[0] for vj in V]", "such that # a*Dp + b*p = Sum(dj*rj) has a", "problem will only crop up if the integral explicitly contains", "# gcd(fa, fd) == 1. The last condition is handled", "k^r and B is a matrix with # m +", "I will have to verify, but I believe that the", "= a*fa - dn*derivation(h, DE)*fd ba, bd = ba.cancel(fd, include=True)", "# A single row. r = [(Mq*vj)[0] for vj in", "num_imag = [value if key[0] % 4 == 1 else", "has entries in k(t), and because Matrix doesn't play well", "dr]]).T == 0. \"\"\" m = len(Q) H = [Poly(0,", "+ b*q == Sum(ci*gi, (i, 1, m)), p = (q", "is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i,", "correspond # to solutions z = q/hn of the weakly", "y_num, y_den = y.as_numer_denom() Ya, Yd = Poly(y_num, DE.t), Poly(y_den,", "if Qv is None: # (N*f - M*w) is not", "G, DE): \"\"\" Simpler version of step 1 & 2", "for the # functions given when solving the parametric logarithmic", "[n, ..., 1] for i in range(m): si = Q[i].nth(N", "# as per the docstring of this function (DE.case ==", "None: return None n, u = A elif case ==", "== d/dt or deg(D) >= 2, returns h1, ..., hr", "+ B*p = Sum(ci*Gi) correspond # to solutions q =", "- order_at(bd, p, DE.t) nc = min([order_at(Ga, p, DE.t) -", "b, q1, ..., qm in k[t] with b != 0", "for the hyperexponential, hypertangent, and primitive cases, respectively. For the", "q, n, DE) elif (DE.d.degree() >= 2 and b.degree() ==", "Parametric Poly Risch Differential Equation - No cancellation: deg(b) small", "# others, and this algorithm will need to be extended", "such that Df/f == Du, or None, which means that", "DE).cancel(Poly(DE.t, DE.t), include=True) with DecrementLevel(DE): pa, pd = frac_in(p, DE.t,", "B*p = Sum(ci*Gi) correspond # to solutions q = p/hs", "in k[t] and equal to Sum(aji*qi) (j = 1, ...,", "+ [DE.extargs[i] for i in DE.indices('log')]) l = [] ld", "= --. --- --- t f i in L i", "aj1, ..., ajm in Const(k). # Sum(aji*qi) is divisible by", "Q, n, DE): \"\"\" Pg, 237. \"\"\" H = []", "and a matrix M with entries in k(t) such that", "p = p.as_poly(DE.t) if p is None: # f -", "either D == d/dt or deg(b) > max(0, deg(D) -", "version of the structure \" \"theorems with hypertangent support is", "denominator. case is one of {'exp', 'tan', 'primitive'} for the", "+ f*y == Sum(ci*Gi) exists. They generate # the space", "a derivation D on k[t], f in k(t), and a", "DE, 'auto') if Qv is None: # (N*f - M*w)", "Rm1 = Ri.applyfunc(lambda x: derivation(x, DE, basic=True)/ derivation(A[i, j], DE,", "2 for the limited integration problem. Given a derivation D", "- No cancellation: deg(b) large enough. Given a derivation D", "k, then deg(p) <= N. So that the special part", "[S(0)]]) # The condition for solvability is # B*Matrix([c1, ...,", "TODO: This could be implemented more efficiently. # It isn't", "t_i-1) and Dt_i = Da_i/a_i, for some a_i in C(x)(t_1,", "case where we are given Df, not f, use is_log_deriv_k_t_radical_in_field().", "implement this raise NotImplementedError V = [(-a*hn*ga).cancel(gd, include=True) for ga,", "B*Matrix([d1, ..., du, e1, ..., ev]) == 0. # The", "in r]) M = Matrix(n + 1, m, lambda i,", "NotImplementedError: # A temporary bound is set. Eventually, it will", "elements of u are not all constant # Note: See", "constant solution. This algorithm is used both in solving parametric", "a of K is a derivative of an element of", "!= 0, and gcd(a, t) == 1 (resp. gcd(a, t**2", "rj = Sum(aji*qi) (j = 1, ..., u) in k[t].", "= [v for v in V if v[0] != 0]", "None # [(a, i), ...], where i*log(a) is a term", "G = [g1, ..., gm] in k(t)^m, and for any", "Parametric Poly Risch Differential Equation - No cancellation: deg(b) large", "I.row_join(zeros(m, r)).row_join(-I) return f + H, A.col_join(B).col_join(C) def prde_cancel_liouvillian(b, Q,", "betad = alphad etaa, etad = frac_in(dcoeff, DE.t) if recognize_log_derivative(2*betaa,", "0. # Transform fractions (fa, fd) in f into constant", "a multiplicative # constant. We now find the log of", "methods used for solving Parametric Risch Differential Equations parallel those", "ajm in Const(k). # Sum(aji*qi) is divisible by d with", "Dummy, ilcm, Add, Mul, Pow, S from sympy.core.compatibility import reduce,", "= min(0, nc - min(0, nb)) if not nb: #", "in Const(k) such that # (a/d)*Dp + (b/d)*p = Sum(dj*rj)", "DE, Df=True): r\"\"\" Checks if Df is the logarithmic derivative", "= list(R) n1 = n - a.degree(DE.t) return (A, B,", "# g, B = param_poly_rischDE(a.quo(d), b.quo(d), r, n, DE) #", "p.as_poly(DE.t) if p is None: # f - Dg will", "radical if there exist n in ZZ and u in", "u has no constant solution. This algorithm is used both", "r columns and entries in Const(k) such that Dy +", "k[t]^m, return h = [h1, ..., hr] in k[t]^r and", "algorithm that uses rref()). # # We therefore limit ourselves", "Q == 1: n = min(n, m) elif case ==", "dj's. N = M.nullspace() # N = [n1, ..., ns]", "The condition for solvability is # B*Matrix([c1, ..., cm, d1])", "N is a non-negative integer, g in k(t), V ==", "is not the derivative of an element of k(t). ans", "only if there are ri in QQ such that:: ---", "others, and this algorithm will need to be extended to", "..., cm, d1, ..., dr]]).T == 0. \"\"\" db =", "b*q == Sum(ci*Gi, (i, 1, m)). \"\"\" dn, ds =", "0 # is solvable in k(t}. The corresponding solutions are", "..., ru] # Solutions of (a/d)*Dp + (b/d)*p = Sum(dj*rj)", "= fa.div(fd) q, b = wa.div(wd) B = max(0, derivation(DE.t,", "Q is a list of terms on the right hand", "deg(Dt) t = DE.t if DE.case != 'base': with DecrementLevel(DE):", "1: n = min(n, s/2) N = max(0, -nb) pN", "M = Matrix(N + 1, m, lambda i, j: Q[j][1].nth(i))", "= A[i, :] # Rm+1; m = A.rows Rm1 =", "for _, gd in G] En, Es = list(zip(*E)) c", "(i = 1, ..., m) for some d1, ..., du", "[(fa, fd)] + G h, A = param_rischDE(Fa, Fd, G,", "Sum(ci*gi, (i, 1, m)), p = (q - Sum(ci*ri, (i,", "c1, ..., cm in Const(k) # if and only if", "# entries blk (k = 1, ..., m + u", "+ 1)) A, _ = constant_system(M, zeros(d, 1), DE) else:", "the same as u up to a multiplicative constant. This", "and Dq + b*q == Sum(ci*qi, (i, 1, m)) then", "term const is returned. const is such that log(const) +", "1, r)), where d1, ..., dr in Const(k) and A*Matrix([[c1,", "if q.degree(DE.t) > B: eqs = [p.nth(i) - c1*q.nth(i) for", "checking the degree of terms wrt mod 4. Returns a", "way?) f = [Poly(fa.as_expr()/fd.as_expr(), t, field=True) for fa, fd in", "(H, A) # else: b is in k, deg(qi) <", "or Liouvillian, but if this # changes, then this will", "matter what n is. n = bound_degree(a, b, r, DE,", "bd) where ba[0] is real part of the numerator ba[1]", "should be the same as if it were [[1, 1]])", "if not all([ri.is_zero for ri in r]): n = max([ri.degree()", "Const(k) and # B*Matrix([c1, ..., cm, d1, ..., dr]) ==", "page 255), so most likely this indicates a bug. return", "of f respolys, residues = list(zip(*roots)) or [[], []] #", "be an option to continue # anyway, even if the", "which for the transcendental case, implies that Dt == a*t", "= 0, in which case the quotient is Sum(ci*qi, (i,", "= p/hs of the previous equation. gamma *= hs g", "in k[t], N is a non-negative integer, g in k(t),", "= Sirr, but there could be # others, and this", "Const(k) such that Dy + f*y = Sum(ci*Gi, (i, 1,", "initial equation with ci = Sum(dj*aji). try: # We try", "fa, fd, DE in that it finds the solution in", "in C and v is a vector (Matrix) such that", "== Sirr. Furthermore, it will automatically call bound_degree() when t", "range(m): si = Q[i].nth(N + DE.d.degree(DE.t) - 1)/(N*DE.d.LC()) sitn =", "= Ai else: M = Ai.col_join(M.row_join(zeros(M.rows, ri))) Fi, hi =", "in k and b == 0. # Dy + b*y", "DE): # TODO: Write the full algorithm using the structure", "None: # f - Dg will be in k[t] if", "u.col_join(Matrix([um1])) return (A, u) def prde_spde(a, b, Q, n, DE):", "j in range(ri): hji = fi[j]*DE.t**i hi[j] = hji #", "A = -eye(m) for vj in V: A = A.row_join(vj)", "the arguments of the hyperexponentials indexed by E_K (i.e., if", "- derivation(si, DE) - b*si if all(qi.is_zero for qi in", "the heuristic handles most difficult # cases. return A def", "V = A.nullspace() V = [v for v in V", "n - a.degree(DE.t) return (A, B, Qq, R, n1) def", "k(t)-radical. return None Q, v = Qv if Q.is_zero or", "Qq = [zi - derivation(ri, DE) for ri, zi in", "and only if M*Matrix([c1, ..., cm]) == 0, # in", "such by cancel(). Therefore, a careful user can avoid this", "= min(n, s/2) N = max(0, -nb) pN = p**N", "for N in range(n, -1, -1): # [n, ..., 0]", "'base' or b.degree() > max(0, DE.d.degree() - 1)): return prde_no_cancel_b_large(b,", "b.degree(DE.t) > 0: for i in range(m): si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(),", "'tan': dcoeff = DE.d.quo(Poly(DE.t**2 + 1, DE.t)) with DecrementLevel(DE): #", "is always computed, this function calls the more general prde_special_denom()", "theorem approach, which says that for any f in K,", "in f into constant # polynomials fa/fd in k[t]. #", "with m + r + m columns. r = len(f)", "Gas, Gds = list(zip(*G)) gd = reduce(lambda i, j: i.lcm(j),", "1) in this case. \"\"\" # TODO: Merge this with", "with Qq = [q1, ..., qm] and R = [r1,", "# Base case. Dy == 0 for all y in", "# because case != 'base'. betaa, alphaa, alphad = real_imag(ba,", "and b, q1, ..., qm in k[t] with b !=", "# A*Matrix([c1, ...,cm]) == 0. V = A.nullspace() # V", "the rational function. \"\"\" bd = bd.as_poly(gen).as_dict() ba = ba.as_poly(gen).as_dict()", "imaginary part and bd is the denominator of the rational", "D) with constant field C = Const(K), a Matrix A,", "C = Matrix([ni[:] for ni in N]) # rows n1,", "resultant must be rational numbers. return None # [(a, i),", "or Liouvillian over k, then deg(p) <= N. So that", "i in residueterms], S(1)) u = Mul(*[Pow(i, j*n) for i,", "does not play well with Matrix yet, this algorithm assumes", "columns and entries in Const(k) such that a*Dp + b*p", "+ Sum(ci*vi, (i, 1, m)). Furthermore, if S1irr == Sirr,", "parallel those for solving Risch Differential Equations. See the outline", "- M*wa*fd nfmwd = fd*wd Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd,", "some a, b in k*. \"\"\" dn, ds = splitfactor(fd,", "and q in k[t] satisfy deg(q) <= n and Dq", "E_K/C(x) must, by their nature, be computed recursively using this", "solutions alpha*p + Sum(ci*betai) of the initial equation. d =", "in k(t), return (a, b, h, N, g, V) such", "2 else 0 for key, value in ba.items()] num_imag =", "p = (q - Sum(ci*ri, (i, 1, m)))/a has degree", "(j = 1, ..., u) in k[t]. if not V:", "b*p == g + Sum(ci*vi, (i, 1, m)). Furthermore, if", "in k[t] of a*Dp + b*p == Sum(ci*gi, (i, 1,", "for i in DE.indices('log')]) ans = list(zip(terms, u)) result =", "p = Sum(dj*hj, (j, 1, r)) where d1, ..., dr", "if and only if M*Matrix([f1, ..., fm]) == 0, #", "is necessary to pass the arguments of the exponential terms", "believe that the answer should be # None in this", "as long as possible cumulating coefficient # and terms for", "(not k[t]) if DE.case == 'primitive': with DecrementLevel(DE): ba, bd", "(j, 1, u)) + alpha*Sum(ek*gk, (k, 1, v)). # Collect", "A*Matrix([c1, ..., cm]) == 0 and # B*Matrix([c1, ..., cm,", "n, DE) beta = [betai + alpha*ri for betai, ri", "w == Dtheta/theta \"\"\" # TODO: finish writing this and", "by subtracting the arguments of one exponential from the other.", "= Matrix([beta]) f = [(Mbeta*vj)[0] for vj in V] #", "if p = Sum(ek*gk) where e1, ..., ev are in", "x # and also sqrt(x**2 + 2*x + 1) !=", "k[t] and equal to Sum(aji*qi) (j = 1, ..., u).", "of Bx == v, or v has a non-constant coefficient,", "min(n, s/2) N = max(0, -nb) pN = p**N pn", "from eq. on top of p.238 (unnumbered) for j in", "prde_normal_denom(fa, fd, G, DE) # Solutions q in k<t> of", "= len(v) - r - 1 C = list(v[1: m", "deg(D) - 1 and either D == d/dt or deg(D)", "k[t] and f, g1, ..., gm in k(t) with f", "(K, D) with constant field C = Const(K), a Matrix", "zeros, eye from sympy.polys import Poly, lcm, cancel, sqf_list from", "Dy + f*y == Sum(ci*gi, (i, 1, m)), q ==", "this heuristic fails, the structure theorem approach will need to", "..., pm] in k[t]^m and d in k[t], return q", "the degree of terms wrt mod 4. Returns a tuple", "i in DE.indices('exp')] L_part = [DE.D[i].as_expr() for i in DE.indices('log')]", "dn*derivation(h, DE)*fd ba, bd = ba.cancel(fd, include=True) G = [(c*A).cancel(D,", "in Q): dc = -1 M = zeros(0, 2) else:", "for c1, ..., cm in k, is divisible by d", "k[t], GG = [gg1, ..., ggm] in k(t)^m, and for", "z = q/hn of the weakly normalized equation. gamma *=", "if S1irr == Sirr, then p is in k[t], and", "wl in W]) # excise dj's. N = M.nullspace() #", "+ 1, 1), DE) c = eye(m) A = A.row_join(zeros(A.rows,", "b = residue_reduce(fa, fd, DE, z=z) if not b: #", "field that is identically zero, but cannot # be reduced", "- Generate linear constraints on the constants. Given a derivation", "possible. # Find relations between the qi. if all([qi.is_zero for", "user can avoid this # problem entirely by being careful", "== Du/u. Either returns (n, u) or None, which means", "of the weakly normalized equation. gamma *= hn A, B,", "and Dq + b*q == Sum(ci*qi, (i, 1, m)), then", "f, B = param_rischDE(ba, bd, Q0, DE) # f =", "h, A = param_rischDE(Fa, Fd, G, DE) V = A.nullspace()", "This is calculated by subtracting the arguments of one exponential", "in k[t] relatively prime, and q = [q1, ..., qm]", "return (H, M) def param_poly_rischDE(a, b, q, n, DE): \"\"\"Polynomial", "can use more efficient residue reduction from ratint() if not", "implemented.\") else: # Liouvillian cases if DE.case == 'primitive' or", "No constraints. return q, M def constant_system(A, u, DE): \"\"\"", "cm in Const(K) such that Dy + f*y == Sum(ci*gi,", "Equation algorithm: Parametric Version. Given a derivation D on k[t],", "k[t] if and only is ci = Sum(dj*aji) # (i", "--- t f i in L i in E i", "lcm(ds, es1, ..., esm) a = hn*hs b -= (hn*derivation(hs,", "that Sum(ci*pi, (i, 1, m)), for c1, ..., cm in", "etaa, etad = frac_in(dcoeff, DE.t) if recognize_log_derivative(2*betaa, betad, DE): A", "ci satisfy a*Dp + b*p == Sum(ci*qi, (i, 1, m)).", "= 0 # These are the cases where we know", "< 0: # Only the trivial zero solution is possible.", "= eye(m) A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A) def", "i, _ in H] if not all(len(j) == i.degree() and", "Liouvillian over k, then deg(p) <= N. So that the", "from sympy.polys.polymatrix import PolyMatrix as Matrix from sympy.solvers import solve", "gd in G] En, Es = list(zip(*E)) c = reduce(lambda", "b, q1, ..., qm in k[t] with deg(a) > 0", "necessary to pass the arguments of the exponential terms in", "will be the same as result up to a multiplicative", "rows. Mqq = Matrix([qq]) # A single row. r =", "in k[t]). See the docstring of each function for more", "> -b.as_poly().LC()/DE.d.as_poly().LC()): raise NotImplementedError(\"prde_no_cancel_b_equal() is \" \"not yet implemented.\") else:", "\"\"\" dn, ds = splitfactor(fd, DE) E = [splitfactor(gd, DE)", "s.is_one: pass z = z or Dummy('z') H, b =", "in the next loop instead of Q it has #", "+ 1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \\ for i in range(r)]) y_num,", "+ b*p = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) # are the same", "This is needed to compute the final answer u such", "To handle the case where we are given Df/f, not", "to use the full method. # TODO: This could be", "key, value in ba.items()] ba_real = sum(r for r in", "A with m + r columns and entries in Const(k)", "The sets L_K/C(x) and E_K/C(x) must, by their nature, be", "key, value in bd.items()] denom_imag = [value if key[0] %", "= A.row_join(zeros(m, len(g))) A = A.col_join(zeros(B.rows, m).row_join(B)) return h, A", "K/C(x) Where C = Const(K), L_K/C(x) = { i in", "for the primitive, hyperexponential, and hypertangent cases, respectively. If case", "the term const is returned. const is such that exp(const)*f", "result = Mul(*[Pow(i, j) for i, j in ans]) #", "range(ri): hji = fi[j]*DE.t**i hi[j] = hji # building up", "k(t), and G = [G1, ..., Gm] in k(t)^m, return", "in range(m): si = Q[i].nth(N + DE.d.degree(DE.t) - 1)/(N*DE.d.LC()) sitn", "in Const(k) such that # a*Dp + b*p = Sum(dj*rj)", "if # f = fa/fd, fd is square-free, deg(fa) <", "equation. Given a derivation D in k[t], a, b in", "Matrix with coefficients in C and v is a vector", "by the structure theorems, change to NotImplementedError. raise ValueError(\"The %s", "in k[t] satisfy deg(q) <= n and Dq + b*q", "in which case the heuristic failed, or returns None, in", "any f in K, Df/f is the derivative of a", "evaluating it at sqrt(-1) Separates the even and odd power", "the function name is used to indicate that. f in", "differential equation. Given a derivation D in k[t], a, b", "solvable in k(t}. The corresponding solutions are # y =", "= Matrix(n + 1, m, lambda i, j: r[j].nth(i)) else:", "in k[t] and a matrix A with coefficients in Const(k)", "const = cancel(fa.as_expr()/fd.as_expr() - Add(*[Mul(i, j/n) for i, j in", "= s[c1].as_numer_denom() nfmwa = N*fa*wd - M*wa*fd nfmwd = fd*wd", "# This is 1/h A = a*pN B = ba*pN.quo(bd)", "1, m)) has a solution y in k(t) with c1,", "all indices of hyperexponential monomials of K over C(x)). If", "f must be simple n, s = splitfactor(fd, DE) if", "the arguments of the logarithms indexed by L_K (i.e., if", "B*Matrix([c1, ..., cm, d1, ..., dr]) == 0. # Transform", "a, h in k[t], b in k<t>, G = [g1,", "ev]) == 0. # The solutions of the original equation", "Returns a tuple (ba[0], ba[1], bd) where ba[0] is real", "in range(r)]) y_num, y_den = y.as_numer_denom() Ya, Yd = Poly(y_num,", "iterms]))) dcoeff, dterms = sqf_list(d) ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j)", "case? raise NotImplementedError(\"Nonelementary extensions not supported \" \"in the structure", "if and only if p = Sum(ek*gk) where e1, ...,", "k[t] with b != 0 and either D == d/dt", "Poly, lcm, cancel, sqf_list from sympy.polys.polymatrix import PolyMatrix as Matrix", "in residueterms]] + [n], S(1)) residueterms = [(i, j*common_denom) for", "from __future__ import print_function, division from sympy.core import Dummy, ilcm,", "'exp': # this re-checking can be avoided with DecrementLevel(DE): ba,", "where rj = Sum(aji*qqi). if not V: # No non-trivial", "- 1) C = max(p.degree(DE.t), q.degree(DE.t)) if q.degree(DE.t) > B:", "if and only if p = Sum(ek*hk) where e1, ...,", "logarithmic derivative of a k(t) radical if there exist n", "k[t]^v and and B is a matrix with u +", "rj = Sum(aji*qqi). if not V: # No non-trivial solution.", "B is a matrix with # m + r columns", "in Const(k). # In that case, # Sum(ci*gi) = Sum(ci*qi)", "a derivation D on k[t], an integer n, and a,", "derivation(si, DE) - b*si if all(qi.is_zero for qi in Q):", "r columns and entries in Const(k) such that a*Dp +", "Could return A, but this has # the minimum number", "b*p == Sum(ci*qi, (i, 1, m)). Because M has entries", "return (ans, result, n, const) def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto',", "in k(t) can be written as the logarithmic derivative of", "..., cm, d1, ..., dr]) == 0. # Transform fractions", "of t^j (j > 0) in Sum(ci*qi) must be zero.", "'tan': raise NotImplementedError(\"The hypertangent case is \" \"not yet implemented", "len(v) - r - 1 C = list(v[1: m +", "fd, G, DE): \"\"\" Parametric Risch Differential Equation - Normal", "return the tuple (a, b, G, h) such that a,", "<= n # in k[t] if and only if p", "of the integral # of f respolys, residues = list(zip(*roots))", "returns (ans, u, n, const) or None, which means that", "is handled by cancel() above. return None # Note: if", "zeros(M.rows, 1), DE) return [], A if a.is_ground: # Normalization:", "= min([order_at(Ga, p, DE.t) - order_at(Gd, p, DE.t) for Ga,", "= Q[i].nth(N + db)/b.LC() sitn = Poly(si*DE.t**N, DE.t) H[i] =", "is None: return None n, u = A elif case", "a, b in k*. \"\"\" dn, ds = splitfactor(fd, DE)", "case = DE.case if case == 'exp': p = Poly(DE.t,", "the sorts of expressions that # appear in his integrand", "# y = Sum(dj*fj, (j, 1, r) + Sum(ei*hi, (i,", "fd, DE, Df=True): r\"\"\" Checks if Df is the logarithmic", "in DE.indices('exp')] + [DE.T[i] for i in DE.indices('log')]) ans =", "needed to compute the final answer u such that n*f", "= v*h is in k<t>, and p and the ci", "of K over C(x)), and E_K/C(x) = { i in", "*= a # Solutions p of a*Dp + b*p =", "heuristic fails, the structure theorem approach will need to be", "break # a*Dp + b*p = Sum(ci*qi) may have a", "the logarithmic derivative of a K-radical using the structure theorem", "of # a*Dp + b*p = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) #", "1]]) residueterms = [(H[j][1].subs(z, i), i) for j in range(len(H))", "import (gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative) from", "h, N, g, V) such that a, b, h in", "= q/hn of the weakly normalized equation. gamma *= hn", "- M*w) is not the logarithmic derivative of a k(t)-radical.", "Solves the limited integration problem: f = Dv + Sum(ci*wi,", "-derivation(hn, DE) N = 0 # These are the cases", "i.as_numer_denom() icoeff, iterms = sqf_list(i) l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j)", "a = hn*hs b -= (hn*derivation(hs, DE)).quo(hs) mu = min(order_at_oo(fa,", "algorithm using the structure theorems. # try: A = parametric_log_deriv_heu(fa,", "..., fm are elements of k, # is a polynomial", "cancel, sqf_list from sympy.polys.polymatrix import PolyMatrix as Matrix from sympy.solvers", "case where we are given Df/f, not f, use is_deriv_k_in_field().", "..., cm in Const(k) and y in k(t) of Dy", "polynomial component # of the partial fraction expansion of gi.", "then T[i] == log(L_args[i])). This is needed to compute the", "'auto'} for the primitive, hyperexponential, and hypertangent cases, respectively. If", "== 'tan': raise NotImplementedError(\"The hypertangent case is \" \"not yet", "v*h is in k<t>, and p and the ci satisfy", "the terms on the right hand side of the equation", "to a multiplicative constant. This is because they will both", "residueterms = [], returns (1, 1) # f had better", "(j, 1, r)) where # d1, ..., dr ar in", "== Sum(ci*gi, (i, 1, m)), r == q*h in k[t]", "s/2) N = max(0, -nb) pN = p**N pn =", "not None: Q, s, z = A # TODO: Add", "= a.LC() b, q = b.quo_ground(a), [qi.quo_ground(a) for qi in", "um1 = cancel(derivation(u[i], DE, basic=True)/ derivation(A[i, j], DE, basic=True)) for", "are # y = d1*f1 for f1 = 1 and", "G, hn = prde_normal_denom(fa, fd, G, DE) # Solutions q", "B: eqs = [p.nth(i) - c1*q.nth(i) for i in range(B", "ba[1], bd) def prde_special_denom(a, ba, bd, G, DE, case='auto'): \"\"\"", "zeros(d, 1), DE) else: # No constraints on the hj.", "and only y0 = Sum(dj*fj, (j, 1, r)) where #", "= A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A) def prde_no_cancel_b_small(b, Q, n,", "= fa.cancel(fd, include=True) # f must be simple n, s", "is a limitation in computer algebra in general, and implicit", "log(const) + f == u. This is calculated by dividing", "c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC()) l = fd.monic().lcm(wd.monic())*Poly(c, DE.t) ln, ls", "in k[t]^m, return h = [h1, ..., hr] in k[t]^r", "Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in", "returned. const is such that exp(const)*f == u. This is", "v) def parametric_log_deriv(fa, fd, wa, wd, DE): # TODO: Write", "nb: # Possible cancellation. if case == 'exp': dcoeff =", "like # log(2)/log(3). Also, there should be an option to", "L_K/C(x) are disjoint. The sets L_K/C(x) and E_K/C(x) must, by", "import PolyMatrix as Matrix from sympy.solvers import solve def prde_normal_denom(fa,", "ans = list(zip(terms, u)) result = Add(*[Mul(i, j) for i,", "t^j (j > 0) in Sum(ci*qi) must be zero. d", "+ 2*x + 1) != x + 1 # Issue", "def is_log_deriv_k_t_radical(fa, fd, DE, Df=True): r\"\"\" Checks if Df is", "> 0: for i in range(m): si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t)", "answer should be # None in this case. This should", "each vj is a column matrix with # entries aj1,", "and v is a vector (Matrix) such that either v", "# the space and form a basis except possibly when", "complex simplification function (rational function # coefficients will fall into", "DE.d.quo(Poly(DE.t**2 + 1, DE.t)) with DecrementLevel(DE): # We are guaranteed", "a B = b + derivation(a, DE) Qq = [zi", "in Const(k). # In that case, solutions of # a*Dp", "Const(k) if and only if y = Sum(dj*hj, (j, 1,", "..., dr) is a solution of Ax == 0. Elements", "a.gcd(b) if not d.is_ground: break # a*Dp + b*p =", "raise NotImplementedError(\"non-linear and hypertangent \" \"cases have not yet been", "in range(n, -1, -1): # [n, ..., 0] for i", "+ B*r == Sum(ci*ggi, (i, 1, m)). For case ==", "== Sum(ci*Gi) exists. They generate # the space and form", "\"\"\" # TODO: finish writing this and write tests c1", "Equation - No cancellation: deg(b) large enough. Given a derivation", "last condition is handled by cancel() above. return None #", "None return (Q*N, Q*M, v) if p.degree(DE.t) > B: return", "dcoeff, dterms = sqf_list(d) ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j) for", "# Bronstein's book, page 255), so most likely this indicates", "of a*Dq + b*q == Sum(ci*gi, (i, 1, m)), r", "of a parametric Risch differential equation. Given a derivation D", "B*Matrix([c1, ..., cm, d1]) == 0 # There are no", "Sirr. Furthermore, it will automatically call bound_degree() when t is", "that log(const) + f == u. This is calculated by", "# q = [q1, ..., qm] where qi in k[t]", "in Q] + [S(0)]]) # The condition for solvability is", "b relatively prime a, b, q, r, n = prde_spde(a,", "= splitfactor(fd, DE) E = [splitfactor(gd, DE) for _, gd", "V) such that a, b, h in k[t], N is", "k[t], and if t is nonlinear or Liouvillian over k,", "- 1), returns h1, ..., hr in k[t] and a", "ba, bd = frac_in(b, DE.t, field=True) for i in range(n,", "even if the result might potentially be wrong. raise NotImplementedError(\"Cannot", "{'primitive', 'exp', 'tan', 'auto'} for the primitive, hyperexponential, and hypertangent", "to be Q + Fi taking its place Q =", "s or not s[c1].is_Rational: # deg(q) <= B, no solution", "= param_rischDE(ba, bd, Q0, DE) # f = [f1, ...,", "zero. d = max([qi.degree(DE.t) for qi in Q]) if d", "k[t], return q = [q1, ..., qm] in k[t]^m and", "sqrt(-1) not in k), a != 0, and gcd(a, t)", "PolyMatrix as Matrix from sympy.solvers import solve def prde_normal_denom(fa, fd,", "= s[c1].as_numer_denom() nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd nfmwd = fd*wd", "the logarithms indexed by L_K (i.e., if i is in", "A[s, i]*A[:, m+1] Asj = A[s, j] A.row_op(s, lambda r,", "b != 0 and either D == d/dt or deg(b)", "part of the denominator. case is one of {'exp', 'tan',", "constant_system(A, u, DE): \"\"\" Generate a system for the constant", "beta = [betai + alpha*ri for betai, ri in zip(beta,", "in k*. \"\"\" dn, ds = splitfactor(fd, DE) E =", "m + u + v columns. A = -eye(m) for", "of k, is # divisible by d if and only", "and all(k.is_Rational for k in j) for i, j in", "of K over C(x). Furthermore, because Const_D(K) == Const_D(C(x)) ==", "the denominator. Given a derivation D on k[t] and f,", "in r]): n = max([ri.degree() for ri in r]) M", "- 1 and n > -b.as_poly().LC()/DE.d.as_poly().LC()): raise NotImplementedError(\"prde_no_cancel_b_equal() is \"", "gv] in k[t]^v and and B is a matrix with", "'primitive', k<t> == k[t], so it returns (a, b, G,", "DE): \"\"\" Solves the limited integration problem: f = Dv", "c1, ..., cm in Const(k) and y in k(t) of", "\"\"\" Parametric Poly Risch Differential Equation - No cancellation: deg(b)", "# B*Matrix([c1, ..., cm, d1, ..., dr]) == 0 #", "for all y in k and b == 0. #", "hn A, B, G, hs = prde_special_denom(a, ba, bd, G,", "structure \" \"theorems with hypertangent support is not yet implemented.\")", "r]): n = max([ri.degree() for ri in r]) M =", "= z or Dummy('z') H, b = residue_reduce(fa, fd, DE,", "b.degree() > max(0, DE.d.degree() - 1)): return prde_no_cancel_b_large(b, q, n,", "# problems in the integration variable). Au = Au.applyfunc(cancel) A,", "the elements of u are not all constant # Note:", "Df/f == Du. log(f) will be the same as u", "ans = list(zip(terms, u)) result = Mul(*[Pow(i, j) for i,", "h) such that a, h in k[t], b in k<t>,", "should never happen for the # functions given when solving", "return None # [(a, i), ...], where i*log(a) is a", "m = common_denom//n if common_denom != n*m: # Verify exact", "const is returned. const is such that exp(const)*f == u.", "cancel() function, in order to prevent a speed bottleneck from", "1, m)) has a solution p of degree <= n", "Sum(ek*hk, (k, 1, v))/gamma. ## Build combined relation matrix with", "gm] in k(t)^m, and for any solution c1, ..., cm", "q = Sum(dj*hj, (j, 1, r)), where d1, ..., dr", "q in k<t> of a*Dq + b*q = Sum(ci*Gi) correspond", "bd = frac_in(b, t0, field=True) Q0 = [frac_in(qi.TC(), t0, field=True)", "basic=True)) for s in range(A.rows): # A[s, :] = A[s,", "'exp', 'tan', \" \"'base', 'auto'}, not %s\" % case) common_denom", "if not s.is_one: pass z = z or Dummy('z') H,", "n, u = A elif case == 'base': # TODO:", "that Dy + f*y == Sum(ci*gi, (i, 1, m)), and", "structure theorems. # try: A = parametric_log_deriv_heu(fa, fd, wa, wd,", "the original equation for ci = Sum(dj*aji) # (i =", "must be simple n, s = splitfactor(fd, DE) if not", "+ b with for some a, b in k*. \"\"\"", "argterms = ([DE.extargs[i] for i in DE.indices('exp')] + [DE.T[i] for", "b*fjnt^n)) Fi[j] = -(derivation(hji, DE) - b*hji) H += hi", "DE.indices('exp')] + [DE.extargs[i] for i in DE.indices('log')]) ans = list(zip(terms,", "enough. Given a derivation D on k[t], n in ZZ,", "1] for i in range(m): si = Q[i].nth(N + DE.d.degree(DE.t)", "DE)*fd ba, bd = ba.cancel(fd, include=True) G = [(c*A).cancel(D, include=True)", "problem Fa = Poly(0, DE.t) Fd = Poly(1, DE.t) G", "c = reduce(lambda i, j: i.lcm(j), (dn,) + En) #", "[Poly(fa.as_expr()/fd.as_expr(), t, field=True) for fa, fd in f] else: #", "in k[t] with deg(a) > 0 and gcd(a, b) ==", "we can solve such problems over 'k' (not k[t]) if", "in range(len(H)) for i in residues[j]] # TODO: finish writing", "- M*wa*fd, fd*wd, DE, 'auto') if Qv is None: #", "deg(D) - 1), returns h1, ..., hr in k[t] and", "with respect to t, return the tuple (a, b, G,", "[value if key[0] % 4 == 1 else -value if", "cm in Const(k) and q in k<t> of a*Dq +", "= prde_spde(a, b, q, n, DE) beta = [betai +", "approach. Because Poly does not play well with Matrix yet,", "not f, use is_log_deriv_k_t_radical_in_field(). See also ======== is_log_deriv_k_t_radical_in_field, is_deriv_k \"\"\"", "b in k(t) such that a = Db. Either returns", "Dy + f*y == Sum(ci*Gi, (i, 1, m)). Given a", "r, n, DE) # g = [g1, ..., gv] in", "fd = fa.cancel(fd, include=True) # f must be simple n,", "of one logarithm from the other. Therefore, it is necessary", "..., cm) is a solution of Mx = 0, in", ">= max(1, DE.d.degree(DE.t)): return None if case == 'auto': case", "= constant_system(M, zeros(d, 1), DE) else: # No constraints on", "is in k[t]. q, M = prde_linear_constraints(a, b, g, DE)", "= prde_linear_constraints(a, b, g, DE) # q = [q1, ...,", "...], where i*log(a) is a term in the log-part of", "Gds, Poly(1, DE.t)) en, es = splitfactor(gd, DE) p =", "Pg, 237. \"\"\" H = [] # Why use DecrementLevel?", "q[j].nth(i)) A, _ = constant_system(M, zeros(M.rows, 1), DE) return [],", "of rde.py for more information. The Parametric Risch Differential Equation", "bound_degree() # as per the docstring of this function (DE.case", "single row. r = [(Mqq*vj)[0] for vj in V] #", "0. \"\"\" m = len(q) if n < 0: #", "i in DE.indices('log')]) l = [] ld = [] for", "= reduce(ilcm, [i.as_numer_denom()[1] for _, i in residueterms], S(1)) u", "Dummy('z') H, b = residue_reduce(fa, fd, DE, z=z) if not", "the base case if and only if # f =", "in E_K, then T[i] == exp(E_args[i])). This is needed to", "if # A*Matrix([c1, ...,cm]) == 0. V = A.nullspace() #", "cm in Const(k) if and only if # A*Matrix([c1, ...,cm])", "m, []) # Solutions of the original equation are #", "jj: cancel(r - Asj*Rm1[jj])) # u[s] = u[s] - A[s,", "in V: A = A.row_join(vj) A = A.row_join(zeros(m, len(g))) A", "G] return (a, b, a, N, (a*hn*fa).cancel(fd, include=True), V) def", "equation Dy + f*y == Sum(ci*Gi) exists. They generate #", "a*fa - dn*derivation(h, DE)*fd ba, bd = ba.cancel(fd, include=True) G", "assumes that const(F(t0, ..., tn) == const(K) == F Ri", "same as u up to a additive constant. This is", "to solutions y = z/q of the original equation. gamma", "where i*log(a) is a term in the log-part of the", "b, q = b.quo_ground(a), [qi.quo_ground(a) for qi in q] if", "Ga, Gd in G]) n = min(0, nc - min(0,", "in iterms]))) dcoeff, dterms = sqf_list(d) ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b,", "in C of Ax == u are exactly all the", "if i is in E_K, then T[i] == exp(E_args[i])). This", "# equation. These are equal to alpha*p + Sum(dj*fj) where", "use DecrementLevel? Below line answers that: # Assuming that we", "= ([DE.extargs[i] for i in DE.indices('exp')] + [DE.T[i] for i", "fd, DE) # Solutions of the weakly normalized equation Dz", "Sum(ci*qqi) is Sum(ci*qi).quo(d), and the remainder is zero # for", "D in k(t), f in k(t), and G = [G1,", "DE.indices('log')] lhs = Matrix([E_part + L_part]) rhs = Matrix([dfa.as_expr()/dfd.as_expr()]) A,", "It differs from is_log_deriv_k_t_radical(fa, fd, DE, Df=False) for any given", "prde_special_denom(a, ba, bd, G, DE, case='auto'): \"\"\" Parametric Risch Differential", "'auto', it will attempt to determine the type of the", "ourselves to constant fields that are computable # via the", "= 1 B = Matrix([[qi.TC() for qi in Q] +", "(k, 1, v))/gamma. ## Build combined relation matrix with m", "Sum(ci*Gi) exists. They generate # the space and form a", "well with Poly, M will be a Matrix of Basic", "solving parametric problems and in determining if an element a", "supported \" \"in the structure theorems.\") E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr()", "s in range(A.rows): # A[s, :] = A[s, :] -", "DE): \"\"\" Solve a Parametric Risch Differential Equation: Dy +", "Sum(ci*qi, (i, 1, m)) \"\"\" R, Z = list(zip(*[gcdex_diophantine(b, a,", "the sum is divisible by d. qq, M = poly_linear_constraints(q,", "or None, which means that Df cannot be written as", "G is a list of tuples of factions of the", "G = [(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd in G] h", "for vj in V] # [f1, ..., fu] # #", "given a derivation D on k[t] and a in k[t],", "for any given fa, fd, DE in that it finds", "i, j in zip(argterms, u)])) return (ans, result, n, const)", "b in k<t>, G = [g1, ..., gm] in k(t)^m,", "by being careful with the sorts of expressions that #", "..., cm, d1, ..., dr] v = V[0]/(-c0) r =", "a k(t)-radical. It differs from is_log_deriv_k_t_radical(fa, fd, DE, Df=False) for", "m, lambda i, j: r[j].nth(i)) else: M = Matrix(0, m,", "# These are the cases where we know that S1irr", "..., Gm] in k(t)^m, return h = [h1, ..., hr]", "[n, ..., 0] for i in range(m): si = Q[i].nth(N", "+ b*p = Sum(ci*qi) may have a polynomial solution #", "len(h) m = len(v) - r - 1 C =", "1, ..., m), when # A*Matrix([c1, ..., cm]) == 0", "DE) if Qv is None: # (N*f - M*w) is", "and d in k[t]. \"\"\" m = len(G) q, (fa,", "A elif case == 'base': # TODO: we can use", "nc = min([order_at(Ga, p, DE.t) - order_at(Gd, p, DE.t) for", "q in Q] fi, Ai = param_rischDE(ba, bd, Qy, DE)", "large time # even with n=5, and much longer with", "if v[0] != 0] if not V: return None else:", "= [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True) for fa, fd in fi] ri", "denom_imag = [value if key[0] % 4 == 1 else", "Sum(ci*qi, (i, 1, m)), then q = Sum(dj*hj, (j, 1,", "+ r columns and entries in Const(k) such that Dy", "A = a*pN B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN", "is Sum(ci*qi, (i, 1, m)). \"\"\" m = len(p) q,", "they will both behave the same as monomials. For example,", "all the # roots of the resultant must be rational", "- n), ..., gm*p**(N - n), p**-n) return (A, B,", "A = A.row_join(zeros(A.rows, r + m)) B = B.row_join(zeros(B.rows, m))", "(Matrix) u with coefficients in K, returns the tuple (B,", "237. \"\"\" H = [] # Why use DecrementLevel? Below", "= [frac_in(q.nth(i), DE.t, field=True) for q in Q] fi, Ai", "else: terms = ([DE.extargs[i] for i in DE.indices('exp')] + [DE.T[i]", "which means that f cannot be written as the logarithmic", "zeros(dc + 1, 1), DE) c = eye(m) A =", "g, B = param_poly_rischDE(a.quo(d), b.quo(d), r, n, DE) # g", "p, DE.t) - order_at(Gd, p, DE.t) for Ga, Gd in", "in ZZ and u in k(t) with n, u !=", "for gk in g] # Build combined relation matrix. A", "the even and odd power terms by checking the degree", "it cannot determine that S1irr == Sirr. Furthermore, it will", "The vectors (bl1, ..., blm) generate the space of those", "have to verify, but I believe that the answer should", "DE, Df=False) for any given fa, fd, DE in that", "= Db. Either returns (ans, u), such that Df/f ==", "s is True and the solutions in C of Ax", "# of f respolys, residues = list(zip(*roots)) or [[], []]", "But this is a limitation in computer algebra in general,", "handle the case where we are given Df/f, not f,", "..., cm in Const(k) if and only if y =", "1, m)), (c1, ..., cm) is a solution of Mx", "logarithmic derivative in the base case if and only if", "C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices", "M = Matrix() else: dc = max([qi.degree(DE.t) for qi in", "the case where we are given Df, not f, use", "the hyperexponential (resp. hypertangent) case, given a derivation D on", "correspond to solutions # y = p/gamma of the initial", "a careful user can avoid this # problem entirely by", "We need to get around things like sqrt(x**2) != x", "be a Matrix of Basic expressions. \"\"\" m = len(G)", "# solutions alpha*p + Sum(ci*betai) of the initial equation. d", "--- --- t i in L i in E i", "and Dt_i/t_i = Da_i, for some a_i in C(x)(t_1, ...,", "K or the logarithmic derivative of a K-radical using the", "Sum(dj*rj) correspond to # solutions alpha*p + Sum(Sum(dj*aji)*betai) of the", "Matrix(dc + 1, m, lambda i, j: Q[j].nth(i)) A, u", "i in range(m): si = Q[i].nth(N + DE.d.degree(DE.t) - 1)/(N*DE.d.LC())", "entries in Const(k). # Sum(ci*gi) is in k[t] for c1,", "a*Dp + b*p = Sum(ci*qi) correspond to # solutions alpha*p", "k[t] of a*Dp + b*p == Sum(ci*gi, (i, 1, m)),", "1, m)). Given a derivation D in k(t), f in", "deg(a) > 0 and gcd(a, b) == 1, return (A,", "k[t]). See the docstring of each function for more information.", "in k such that Sum(ci*pi, (i, 1, m)), for c1,", "G, h) def real_imag(ba, bd, gen): \"\"\" Helper function, to", "polynomials fa/fd in k[t]. # (Is there a better way?)", "d) # qq = [qq1, ..., qqm] where qqi =", "prde_no_cancel_b_large(b, Q, n, DE): \"\"\" Parametric Poly Risch Differential Equation", "of a*Dp + b*p == Sum(ci*gi, (i, 1, m)), (c1,", "== 'tan'] or \\ (set([i for i in DE.cases if", "+ (b/d)*p = Sum(dj*rj) # where rj = Sum(aji*qqi). if", "function, in order to prevent a speed bottleneck from #", "qs, _ = list(zip(*Q)) return (qs, M) def poly_linear_constraints(p, d):", "A # TODO: Add test if Q == 1: n", "A, B, G, hs = prde_special_denom(a, ba, bd, G, DE)", "algorithm: Parametric Version. Given a derivation D on k[t], an", "# No constraints on the hj. A = Matrix(0, m,", "only if # Sum(ci*qi) == 0 in which case the", "..., rm], such that for any solution c1, ..., cm", "is in k[t] and equal to Sum(aji*qi) (j = 1,", "DE) if A is None: return None n, e, u", "Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)). Given", "= Sum(ci*qi) correspond to # solutions alpha*p + Sum(ci*betai) of", "full method. # TODO: This could be implemented more efficiently.", "return prde_cancel_liouvillian(b, q, n, DE) else: raise NotImplementedError(\"non-linear and hypertangent", "Sum(ci*qi) must be zero. d = max([qi.degree(DE.t) for qi in", "B*Matrix([c1, ..., cm, d1, ..., dr]) == 0 # Build", "# (a/d)*Dp + (b/d)*p = Sum(dj*rj) # where rj =", "(n, u) or None, which means that f cannot be", "wa, wd, DE, c1=None): \"\"\" Parametric logarithmic derivative heuristic. Given", "Q.is_zero or v.is_zero: return None return (Q*N, Q*M, v) def", "of an element of k(t). a in k(t) is the", "v in k(t), c1, ..., cm in C of f", "integrand in the variables other than the integration # variable", "Ax == 0. Elements of k(t) are tuples (a, d)", "r columns and entries in Const(k) = Const(k0) # such", "A: return A, u Au = A.row_join(u) Au = Au.rref(simplify=cancel,", "is because they will both behave the same as monomials.", "logarithmic derivative of a k(t)-radical, then all the # roots", "case the sum is Sum(ci*qi). ## Reduce number of constants", "for prde_spde, it will always # terminate no matter what", "need to be extended to handle them. if DE.case in", "r) + Sum(ei*hi, (i, 1, m)), # where ei ==", "p = Sum(ek*hk) where e1, ..., ev are in #", "be computed recursively using this same function. Therefore, it is", "# terminate no matter what n is. n = bound_degree(a,", "i --- = --. --- --- t f i in", "- residue_reduce_derivation(H, DE, z)) p = p.as_poly(DE.t) if p is", "for i, j in residueterms]) return (n, u) elif case", "r, jj: cancel(r - Asj*um1)) A = A.col_join(Rm1) u =", "in j) for i, j in roots): # If f", "# Solutions q in k<t> of a*Dq + b*q =", "== 1. The last condition is handled by cancel() above.", "Risch differential equation. Given a derivation D in k[t], a,", "them as indices to D (or T). L_args are the", "else: raise NotImplementedError(\"non-linear and hypertangent \" \"cases have not yet", "(a/d)*Dp + (b/d)*p = Sum(dj*rj) has a solution p of", "== Dtheta/theta \"\"\" # TODO: finish writing this and write", "of the equation n*f == Dv/v + m*Dtheta/theta, with v", "f = [f1, ..., fr] in k^r and B is", "is a list of tuples such that Mul(*[i**j for i,", "a list of tuples of factions of the terms on", "that f cannot be written as the logarithmic derivative of", "if not V: # No non-trivial solution. return [], eye(m)", "G, DE) # Solutions q in k<t> of a*Dq +", "b*q = Sum(ci*Gi) correspond # to solutions z = q/hn", "of terms on the right hand side of the equation", "k(t). ans is a list of tuples such that Add(*[i*j", "number of rows. Mqq = Matrix([qq]) # A single row.", "DE.t) Y = Ya*Poly(1/Yd.LC(), DE.t), Yd.monic() return Y, C def", "be written as the logarithmic derivative of a k(t)-radical. ans", "# Our assumption here is that each monomial is recursively", "u != 0 such that n*f == Du/u. Either returns", "A = A.col_join(zeros(B.rows, m).row_join(B)) ## Eliminate d1, ..., du. W", "Also note: derivation(basic=True) calls cancel() return None else: if not", ">= 0: # and a, b relatively prime a, b,", "which case the quotient is Sum(ci*qi, (i, 1, m)). \"\"\"", "in denom_real) bd_imag = sum(r for r in denom_imag) num_real", "the outline in the docstring of rde.py for more information.", "k(t). a in k(t) is the derivative of an element", "fa*derivation(fd, DE)).cancel(fd**2, include=True) else: dfa, dfd = fa, fd #", "hji = fi[j]*DE.t**i hi[j] = hji # building up Sum(djn*(D(fjn*t^n)", "(i, 1, m)), r == q*h in k[t] satisfies A*Dr", "and d in k[t], return q = [q1, ..., qm]", "(c1, ..., cm, d1, ..., dr) is a solution of", "it at sqrt(-1) Separates the even and odd power terms", "[f1, ..., fu] # # Solve the reduced equation recursively.", "== Sum(ci*qi, (i, 1, m)) then q = Sum(dj*hj, (j,", "max([ri.degree(DE.t) for _, ri in Q]) M = Matrix(N +", "# in the next loop instead of Q it has", "Sum(dj*rj) # where rj = Sum(aji*qi) (j = 1, ...,", "- M.as_poly(DE.t)*wa*fd nfmwd = fd*wd Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE)", "\"\"\" Parametric Risch Differential Equation - Special part of the", "a = a.LC() b, q = b.quo_ground(a), [qi.quo_ground(a) for qi", "B, G, hs = prde_special_denom(a, ba, bd, G, DE) #", "[(q*ga).cancel(gd, include=True) for ga, gd in G] a, (ba, bd),", "c1, ..., cm in C of f == Dv +", "implicit # in the correctness of the Risch Algorithm is", "qi in q]): return [], zeros(1, m) # No constraints.", "for any solution c1, ..., cm in Const(k) and q", "entries in Const(k) such that Dy + f*y = Sum(ci*Gi,", "A.nullspace() V = [v for v in V if v[0]", "derivative of an element of K or the logarithmic derivative", "value in ba.items()] num_imag = [value if key[0] % 4", "[frac_in(q.nth(i), DE.t, field=True) for q in Q] fi, Ai =", "Parametric Risch Differential Equation problem is, given f, g1, ...,", "# Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj) # where", "f cannot be written as the logarithmic derivative of a", "# where ei == ci (i = 1, ..., m),", "v) in Const(k). # The vectors (bl1, ..., blm) generate", "q = [q1, ..., qm] in k[t]^m and a matrix", "derivative of a k(t)-radical. return None Q, v = Qv", "Sum(ci*betai) of the initial equation. d = a.gcd(b) if not", "correspond # to solutions q = p/hs of the previous", "single row. r = [(Mq*vj)[0] for vj in V] #", "G = [G1, ..., Gm] in k(t)^m, return h =", "+ [Pow(b, e*j) for b, e in dterms]))) const =", "b.is_zero and (DE.case == 'base' or b.degree() > max(0, DE.d.degree()", "r == q*h in k[t] satisfies A*Dr + B*r ==", "c1, ..., cm, e1, ..., ev. C = Matrix([ni[:] for", "in k[t]. # (Is there a better way?) f =", "+ [n], S(1)) residueterms = [(i, j*common_denom) for i, j", "1, ..., m + u + v) in Const(k). #", "== y*h in k<t> satisfies a*Dq + b*q == Sum(ci*Gi,", "the hyperexponential, hypertangent, and primitive cases, respectively. For the hyperexponential", "k[t]^m and d in k[t], return q = [q1, ...,", "be empty, but everything below should work find in that", "b.degree(DE.t) m = len(Q) H = [Poly(0, DE.t)]*m for N", "for any solution c1, ..., cm in Const(k) and p", "gk in g] # Build combined relation matrix. A =", "False Ax == u has no constant solution. This algorithm", "derivation D in k[t], a, b in k[t] relatively prime,", "cm, d1, ..., dr]]).T == 0. \"\"\" db = b.degree(DE.t)", "V: # No non-trivial solution. return [], eye(m) # Could", "if n < 0: # Only the trivial zero solution", "# a*Dp + b*p = Sum(dj*rj) has a solution p", "raise NotImplementedError(\"Cannot work with non-rational \" \"coefficients in this case.\")", "[gia.cancel(gid*g, include=True) for gia, gid in G] # a*Dp +", "for i, j in residueterms] m = common_denom//n if common_denom", "= real_imag(ba, bd*a, DE.t) betad = alphad etaa, etad =", "v in k(t)* and n, m in ZZ with n", "case == 'exp': dcoeff = DE.d.quo(Poly(DE.t, DE.t)) with DecrementLevel(DE): #", "might potentially be wrong. raise NotImplementedError(\"Cannot work with non-rational \"", "return [], eye(m) Mq = Matrix([q]) # A single row.", "0 such that n*b == Du/u. Either returns (ans, u,", "over C(x)), and E_K/C(x) = { i in {1, ...,", "function uses the structure theorem approach, which says that for", "No cancellation: deg(b) large enough. Given a derivation D on", "Mq = Matrix([q]) # A single row. r = [(Mq*vj)[0]", "a = dn*h c = a*h ba = a*fa -", "proven that no solution exists, or returns a solution (n,", "happen for the # functions given when solving the parametric", "the sum is Sum(ci*qi). ## Reduce number of constants at", "to such by cancel(). Therefore, a careful user can avoid", "k(t), c1, ..., cm in C of f == Dv", "case s is False Ax == u has no constant", "= (fa*l.quo(fd)).div(z) # (l*f).div(z) u2, r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z)", "that all matrix entries are Basic expressions. \"\"\" if not", "b, q, r, n = prde_spde(a, b, q, n, DE)", "is divisible by d with exact quotient Sum(aji*qqi). # Sum(ci*qi)", "it will always # terminate no matter what n is.", "Const(k). # The vectors (bl1, ..., blm) generate the space", "Also, there should be an option to continue # anyway,", "_, j in residueterms]] + [n], S(1)) residueterms = [(i,", "A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) if A is", "= [value if key[0] % 4 == 0 else -value", "b.quo(d), r, n, DE) # g = [g1, ..., gv]", "need to be used. The argument w == Dtheta/theta \"\"\"", "derivation(A[i, j], DE, basic=True)) for s in range(A.rows): # A[s,", "raise ValueError(\"The %s case is not supported in this function.\"", "might be empty, but everything below should work find in", "for c1, ..., cm in Const(k) # if and only", "matrix with # m + r columns and entries in", "m)))/a has degree at most n1 and satisfies A*Dp +", "for i in u): raise NotImplementedError(\"Cannot work with non-rational \"", "Poly(0, DE.t) Fd = Poly(1, DE.t) G = [(fa, fd)]", "DE.t) - order_at(Gd, p, DE.t) for Ga, Gd in G])", "in Const(k) if and only if y = Sum(dj*hj, (j,", "== Dv/v + m*Dtheta/theta, with v in k(t)* and n,", "the empty matrix. qs, _ = list(zip(*Q)) return (qs, M)", "0, # in which case the quotient is Sum(fi*qqi). A,", "frac_in(p, DE.t) A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto') if A", "B = ba.quo(bd) return (a, B, G, Poly(1, DE.t)) else:", "will be the same as u up to a multiplicative", "i, j: i.lcm(j), (ds,) + Es) # lcm(ds, es1, ...,", "(i.e., the set of all indices of logarithmic monomials of", "\"\"\" m = len(p) q, r = zip(*[pi.div(d) for pi", "lambda i, j: Q[j][1].nth(i)) else: M = Matrix(0, m, [])", "and gcd(a, t) == 1 (resp. gcd(a, t**2 + 1)", "the cancel() function, in order to prevent a speed bottleneck", "1, m, lambda i, j: Q[j].nth(i)) A, u = constant_system(M,", "1, DE.t) elif case in ['primitive', 'base']: B = ba.quo(bd)", "= frac_in(b + i*derivation(DE.t, DE)/DE.t, DE.t, field=True) with DecrementLevel(DE): Qy", "n terms = ([DE.T[i] for i in DE.indices('exp')] + [DE.extargs[i]", "K-radical using the structure theorem approach. Because Poly does not", "M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i))", "qm] in k[t]^m, return h = [h1, ..., hr] in", "== Sum(ci*ggi, (i, 1, m)). For case == 'primitive', k<t>", "Sum(ci*gi, (i, 1, m)), q == y*h in k<t> satisfies", "to Sum(fi*qi). M, _ = constant_system(M, zeros(M.rows, 1), DE) #", "and non-Liouvillian, which for the transcendental case, implies that Dt", "qi in q] if not b.is_zero and (DE.case == 'base'", "finish writing this and write tests c1 = c1 or", "with DecrementLevel(DE): pa, pd = frac_in(p, DE.t) A = is_log_deriv_k_t_radical_in_field(pa,", "and bd is the denominator of the rational function. \"\"\"", "only if M*Matrix([f1, ..., fm]) == 0, # in which", "tuple (a, b, G, h) such that a, h in", "if i is in L_K, then T[i] == log(L_args[i])). This", "for _, i in residueterms], S(1)) u = Mul(*[Pow(i, j*n)", "dn.gcd(en) h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t))) a = dn*h c = a*h", "M*wa*fd nfmwd = fd*wd Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd, fd*wd,", "the same as those of # (a/d)*Dp + (b/d)*p =", "and if t is nonlinear or Liouvillian over k, then", "Risch Differential Equation - Normal part of the denominator. Given", "result = Add(*[Mul(i, j) for i, j in ans]) argterms", "it is necessary to pass the arguments of the logarithmic", "constant. argterms = ([DE.extargs[i] for i in DE.indices('exp')] + [DE.T[i]", "correspond to solutions y = z/q of the original equation.", "vj in V: A = A.row_join(vj) A = A.row_join(zeros(m, len(h)))", "..., 0] for i in range(m): si = Q[i].nth(N +", "removed. # the currently added test case takes large time", "Mul(*[Pow(i, j*n) for i, j in residueterms]) return (n, u)", "in E i K/C(x) K/C(x) Where C = Const(K), L_K/C(x)", "the answer should be # None in this case. This", "b*q == Sum(ci*gi, (i, 1, m)), r == q*h in", "i in range(z.degree(DE.t))] s = solve(eqs, c1) if not s", "work with non-rational \" \"coefficients in this case.\") else: terms", "m)) has a solution y in k(t) with c1, ...,", "such that a = Db. Either returns (ans, u), such", "k(t}. The corresponding solutions are # y = Sum(blk'*hk, (k,", "ajm in Const(k). # Sum(aji*gi) is in k[t] and equal", "a derivative of an element of K or the logarithmic", "as indices to D (or T). L_args are the arguments", "to 0. The danger is that we might # incorrectly", "in k(t) is the derivative of an element of k(t)", "b*p = Sum(ci*qi, (i, 1, m)) has a solution p", "= Poly(1, DE.t) G = [(fa, fd)] + G h,", "if p = Sum(ek*hk) where e1, ..., ev are in", "on k[t], f in k(t), and a hyperexponential monomial theta", "that case, # Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj)", "constant_system(M, zeros(d, 1), DE) else: # No constraints on the", "return (a, B, G, Poly(1, DE.t)) else: raise ValueError(\"case must", "if residueterms = [], returns (1, 1) # f had", "this will need to be updated to call bound_degree() #", "Sum(ci*vi, (i, 1, m)). Furthermore, if S1irr == Sirr, then", "exist n in ZZ and u in k(t) with n,", "Fi, hi = [None]*ri, [None]*ri # from eq. on top", "# a*Dp + b*p = Sum(ci*qi) may have a polynomial", "= Sum(ek*hk) where e1, ..., ev are in # Const(k)", "# v = [-1, c1, ..., cm, d1, ..., dr]", "logarithmic derivative of a k(t)-radical. It differs from is_log_deriv_k_t_radical(fa, fd,", "DE.t), Poly(y_den, DE.t) Y = Ya*Poly(1/Yd.LC(), DE.t), Yd.monic() return Y,", "nfmwd = fd*wd Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd, fd*wd, DE,", "integration variable). Au = Au.applyfunc(cancel) A, u = Au[:, :-1],", "# try: A = parametric_log_deriv_heu(fa, fd, wa, wd, DE) #", "m = len(G) Gns, Gds = list(zip(*G)) d = reduce(lambda", "C def parametric_log_deriv_heu(fa, fd, wa, wd, DE, c1=None): \"\"\" Parametric", "but if this # changes, then this will need to", "e1, ..., ev. C = Matrix([ni[:] for ni in N])", "if DE.case == 'primitive': with DecrementLevel(DE): ba, bd = frac_in(b,", "Sum(aji*qi) (j = 1, ..., u) in k[t]. if not", "of a k(t)-radical. case is one of {'primitive', 'exp', 'tan',", "ga, gd in G] return (a, b, a, N, (a*hn*fa).cancel(fd,", "max([qi.degree(DE.t) for qi in Q]) if d > 0: M", "Furthermore, (I believe) this # problem will only crop up", "building up Sum(djn*(D(fjn*t^n) - b*fjnt^n)) Fi[j] = -(derivation(hji, DE) -", "'auto': case = DE.case if case == 'exp': wa, wd", "= [zi - derivation(ri, DE) for ri, zi in zip(R,", "calls cancel() return None else: if not all(i.is_Rational for i", "any solution c1, ..., cm in Const(k) and q in", "NotImplementedError(\"Real version of the structure \" \"theorems with hypertangent support", "Sum(ci*qi) = Sum(dj*Sum(aji*qi)) # are the same as those of", "[i for i in DE.cases if i == 'tan'] or", "case it has proven that no solution exists, or returns", "the sum is equal to Sum(fi*qi). M, _ = constant_system(M,", "Below line answers that: # Assuming that we can solve", "return (Q*N, Q*M, v) if p.degree(DE.t) > B: return None", "to indicate that. f in k(t) can be written as", "from sympy.core import Dummy, ilcm, Add, Mul, Pow, S from", "or deg(b) > max(0, deg(D) - 1), returns h1, ...,", "in DE.indices('exp')] + [DE.T[i] for i in DE.indices('log')]) const =", "Risch Differential Equations. See the outline in the docstring of", "Q = Q + Fi return (H, M) def param_poly_rischDE(a,", "(wa*l.quo(wd)).div(z) # (l*w).div(z) eqs = [r1.nth(i) - c1*r2.nth(i) for i", "i in E i K/C(x) K/C(x) Where C = Const(K),", "For example, both exp(x) and exp(x + 1) == E*exp(x)", "For example, both log(x) and log(2*x) == log(x) + log(2)", "1, m)), p = v*h is in k<t>, and p", "def limited_integrate(fa, fd, G, DE): \"\"\" Solves the limited integration", "that Dy + f*y = Sum(ci*Gi, (i, 1, m)) has", "splitfactor(fd, DE) Gas, Gds = list(zip(*G)) gd = reduce(lambda i,", "== 0 in which case the solutions are # y", "Q + Fi taking its place Q = Q +", "Why use DecrementLevel? Below line answers that: # Assuming that", "T[i] == log(L_args[i])). This is needed to compute the final", "up if the integral explicitly contains an # expression in", "+ [S(0)]]) # The condition for solvability is # B*Matrix([c1,", "k(t), raises either NotImplementedError, in which case the heuristic failed,", "(b/d)*p = Sum(dj*rj) # where rj = Sum(aji*qqi). if not", "recognize_log_derivative) from sympy.matrices import zeros, eye from sympy.polys import Poly,", "- a.degree(DE.t) return (A, B, Qq, R, n1) def prde_no_cancel_b_large(b,", "gm in k(t) with Dt/t in k (resp. Dt/(t**2 +", "+ b*y = Sum(ci*qi) is solvable if and only if", "n, DE) # h = [h1, ..., hv] in k[t]^v", "# polynomials fa/fd in k[t]. # (Is there a better", "of an element of k(t) if there exists b in", "v has coefficients in C, in which case s is", "A = A.row_join(vj) A = A.row_join(zeros(m, len(h))) A = A.col_join(zeros(B.rows,", "This is because they will both behave the same as", "(ans, result, n, const) def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None):", "= Add(*[Mul(i, j) for i, j in ans]) argterms =", "M = Ai.col_join(M.row_join(zeros(M.rows, ri))) Fi, hi = [None]*ri, [None]*ri #", "has degree at most n1 and satisfies A*Dp + B*p", "Sum(dj*hj, (j, 1, r)), where d1, ..., dr in Const(k)", "in this case? raise NotImplementedError(\"Nonelementary extensions not supported \" \"in", "the docstring of each function for more information. \"\"\" from", "es1, ..., esm) a = hn*hs b -= (hn*derivation(hs, DE)).quo(hs)", "a derivation D on k[t], n in ZZ, and b,", "== 0. \"\"\" m = len(q) if n < 0:", "between # c1, ..., cm, e1, ..., ev. C =", "0) in Sum(ci*qi) must be zero. d = max([qi.degree(DE.t) for", "2 else 0 for key, value in bd.items()] denom_imag =", "s is False Ax == u has no constant solution.", "DE) c = eye(m) A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H,", "\"\"\" Generate a system for the constant solutions. Given a", "[None]*ri # from eq. on top of p.238 (unnumbered) for", "maybe we can tell if they're not rational, like #", "it will automatically call bound_degree() when t is linear and", "with for some a, b in k*. \"\"\" dn, ds", "E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')] L_part =", "DE) z = ls*ln.gcd(ln.diff(DE.t)) if not z.has(DE.t): # TODO: We", "< DE.d.degree() - 1) and (DE.case == 'base' or DE.d.degree()", "H[i] = H[i] + sitn Q[i] = Q[i] - derivation(sitn,", "equation n*f == Dv/v + m*Dtheta/theta, with v in k(t)*", "if and only if ci = Sum(dj*aji) # (i =", "DE.d.quo(Poly(DE.t, DE.t)) with DecrementLevel(DE): # We are guaranteed to not", "DE) # f = [f1, ..., fr] in k^r and", "the equation n*f == Dv/v + m*Dtheta/theta, with v in", "Rm1.applyfunc(cancel) um1 = cancel(derivation(u[i], DE, basic=True)/ derivation(A[i, j], DE, basic=True))", "t, field=True)] # r = 1 B = Matrix([[qi.TC() for", "zero solution is possible. # Find relations between the qi.", "betai, ri in zip(beta, r)] alpha *= a # Solutions", "solvable if and only if # Sum(ci*qi) == 0 in", "= [], returns (1, 1) # f had better be", "DE) - fa*derivation(fd, DE)), fd*fa dfa, dfd = dfa.cancel(dfd, include=True)", "\"\"\" db = b.degree(DE.t) m = len(Q) H = [Poly(0,", "pass z = z or Dummy('z') H, b = residue_reduce(fa,", "..., ev are in # Const(k) and B*Matrix([d1, ..., du,", "transcendental if len(DE.exts) != len(DE.D): if [i for i in", "pass them as indices to D (or T). L_args are", "n != 0. If this heuristic fails, the structure theorem", "\"in_field\" with the function name is used to indicate that.", "are elements of k, # is a polynomial if and", "gd in G])) # So far, all the above are", "if (c1, ..., cm) is a solution of Mx =", "u2, r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z) eqs = [r1.nth(i) -", "in Const(k) such that a*Dp + b*p = Sum(ci*qi, (i,", "M = prde_linear_constraints(a, b, g, DE) # q = [q1,", "prime, and q = [q1, ..., qm] in k[t]^m, return", "an entries in k. # Sum(fi*qi, (i, 1, m)), where", "b.degree() == DE.d.degree() - 1 and n > -b.as_poly().LC()/DE.d.as_poly().LC()): raise", "== 0. \"\"\" db = b.degree(DE.t) m = len(Q) H", "Ai else: M = Ai.col_join(M.row_join(zeros(M.rows, ri))) Fi, hi = [None]*ri,", "/ i --- = Df. --- --- t i in", "v[0] != 0] if not V: return None else: #", "f1, ..., fm are elements of k, # is a", "Dv/v + m*Dtheta/theta, with v in k(t)* and n, m", "# f is the logarithmic derivative in the base case", "in num_real) ba_imag = sum(r for r in num_imag) ba", "DE.t), fd.monic() # interpretting limited integration problem as a #", "\"\"\" if not A: return A, u Au = A.row_join(u)", "elementary extension over C(x), then the cardinality of L_K/C(x) U", "else: M = Matrix(0, m, []) # No constraints. return", "q G = [(q*ga).cancel(gd, include=True) for ga, gd in G]", "is a column matrix with # entries blk (k =", "DE)).quo(hs) mu = min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga, gd, DE.t) for", "# m + r columns and entries in Const(k) =", "k(t)), and Q is a list of terms on the", "= [q1, ..., qm] where qi in k[t] is the", "derivation(a, DE) Qq = [zi - derivation(ri, DE) for ri,", "b*q == Sum(ci*qi, (i, 1, m)) then q = Sum(dj*hj,", "for qi in Q]) if d > 0: M =", "eye(m) A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A) # else:", "pn = p**-n # This is 1/h A = a*pN", "y_den = y.as_numer_denom() Ya, Yd = Poly(y_num, DE.t), Poly(y_den, DE.t)", "sqrt(x**2 + 2*x + 1) != x + 1 #", "# deg(q) <= B, no solution for c. return None", "when Dy + f*y == 0 # is solvable in", "list(zip(terms, u)) result = Add(*[Mul(i, j) for i, j in", "\" \"'base', 'auto'}, not %s\" % case) common_denom = reduce(ilcm,", "(l*f).div(z) u2, r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z) eqs = [r1.nth(i)", "(l*w).div(z) eqs = [r1.nth(i) - c1*r2.nth(i) for i in range(z.degree(DE.t))]", "for i in DE.indices('exp')] + [DE.T[i] for i in DE.indices('log')])", "z = A # TODO: Add test if Q ==", "b in k<t>, and g1, ..., gm in k(t) with", "For case == 'primitive', k<t> == k[t], so it returns", "# u[s] = u[s] - A[s, j]*u[m+1 u.row_op(s, lambda r,", "[(-a*hn*ga).cancel(gd, include=True) for ga, gd in G] return (a, b,", "a derivation D on k[t], a, b, in k[t] with", "..., cm in Const(k) and q in k[t] satisfy deg(q)", "[w1, ..., wt] where each wl is a column matrix", "common_denom//n if common_denom != n*m: # Verify exact division raise", "== 'primitive', k<t> == k[t], so it returns (a, b,", "exists in any # algorithm that uses rref()). # #", "qi in Q])) A = a B = b +", "is not the logarithmic derivative of a k(t)-radical. return None", "in DE.cases if i == 'primitive']) - set(DE.indices('log'))): raise NotImplementedError(\"Real", "a derivation D in k(t), f in k(t), and G", "the arguments of one logarithm from the other. Therefore, it", "[(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd in G] h = pn", "DE) - b*si if all(qi.is_zero for qi in Q): dc", "Const(k). # Sum(ci*qqi) is Sum(ci*qi).quo(d), and the remainder is zero", "# Find relations between the qi. if all([qi.is_zero for qi", "and only if # A*Matrix([c1, ...,cm]) == 0. V =", "..., fm]) == 0, # in which case the quotient", "+ [DE.T[i] for i in DE.indices('log')]) const = cancel(fa.as_expr()/fd.as_expr() -", "u = Mul(*[Pow(i, j*n) for i, j in residueterms]) return", "DE.t if DE.case != 'base': with DecrementLevel(DE): t0 = DE.t", "1, m)), where f1, ..., fm are elements of k,", "f is the logarithmic derivative in the base case if", "returned. const is such that log(const) + f == u.", "the Risch Algorithm is the computability of the # constant", "\\ r * i / i i / i ---", "the docstring of this function (DE.case == 'other_linear'). N =", "can tell if they're not rational, like # log(2)/log(3). Also,", "cancel() above. return None # Note: if residueterms = [],", "DE in that it finds the solution in the given", "hr] in k[t]^r and a matrix A with m +", "tell if they're not rational, like # log(2)/log(3). Also, there", "and a matrix A with coefficients in Const(k) such that", "a differential field (K, D) with constant field C =", "if not s or not s[c1].is_Rational: # deg(q) > B,", "t0, field=True) for qi in Q] f, B = param_rischDE(ba,", "--- = Df. --- --- t i in L i", "always computed, this function calls the more general prde_special_denom() automatically", "= constant_system(M, zeros(M.rows, 1), DE) return [], A if a.is_ground:", "0 in which case the solutions are # y =", "to handle them. if DE.case in ['base', 'primitive', 'exp', 'tan']:", "None: Q, s, z = A # TODO: Add test", "for s in range(A.rows): # A[s, :] = A[s, :]", "# Liouvillian cases if DE.case == 'primitive' or DE.case ==", "logarithmic derivative of a k(t)-radical. b in k(t) can be", "numbers. return None # [(a, i), ...], where i*log(a) is", "be removed. # the currently added test case takes large", "outline in the docstring of rde.py for more information. The", "du in Const(k). # In that case, solutions of #", "ValueError(\"case must be one of {'primitive', 'exp', 'tan', \" \"'base',", "to not have problems, # because case != 'base'. alphaa,", "solve(eqs, c1) if not s or not s[c1].is_Rational: # deg(q)", "+ v) are column # vectors generating the space of", "fd is square-free, deg(fa) < deg(fd), and # gcd(fa, fd)", "This could be implemented more efficiently. # It isn't too", "written as the logarithmic derivative of a k(t)-radical. It differs", "Solve the reduced equation recursively. # g, B = param_poly_rischDE(a.quo(d),", "f in k(t), and G = [G1, ..., Gm] in", "are the arguments of the logarithms indexed by L_K (i.e.,", "..., du. W = A.nullspace() # W = [w1, ...,", "(i, 1, m)) \"\"\" R, Z = list(zip(*[gcdex_diophantine(b, a, qi)", "Differential Equation problem is, given f, g1, ..., gm in", "C + 1)] s = solve(eqs, c1) if not s", "DE.t) H[i] = H[i] + sitn Q[i] = Q[i] -", "%s case is not supported in this function.\" % case)", "v) of the equation n*f == Dv/v + m*Dtheta/theta, with", "cancel() cannot reduce # an identically zero expression to 0.", "# h = [h1, ..., hv] in k[t]^v and and", "constant_system(lhs, rhs, DE) if not all(derivation(i, DE, basic=True).is_zero for i", "for b, e in dterms]))) const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld)) return (ans,", "e in dterms]))) const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld)) return (ans, result, const)", "Risch Differential Equations. The methods used for solving Parametric Risch", "with n, u != 0 such that n*f == Du/u.", "R, n1) def prde_no_cancel_b_large(b, Q, n, DE): \"\"\" Parametric Poly", "# No non-trivial solution return [], eye(m) Mq = Matrix([q])", "special_denom() in rde.py if case == 'auto': case = DE.case", "m, z = A if Q == 1: n =", "change to NotImplementedError. raise ValueError(\"The %s case is not supported", "m + r columns and entries in Const(k) such that", "Q*M, v) def parametric_log_deriv(fa, fd, wa, wd, DE): # TODO:", "yet, this algorithm assumes that all matrix entries are Basic", "be one of {'exp', 'tan', 'primitive', \" \"'base'}, not %s.\"", "at this point V = M.nullspace() # V = [v1,", "[q1, ..., qm] and R = [r1, ..., rm], such", "Sum(dj*rj) has a solution p of degree <= n #", "-value if key[0] % 4 == 3 else 0 for", "in V: A = A.row_join(vj) A = A.row_join(zeros(m, len(h))) A", "are equal to alpha*p + Sum(dj*fj) where # fj =", "not s.is_one: pass z = z or Dummy('z') H, b", "## Eliminate d1, ..., du. W = A.nullspace() # W", "Df/f is the derivative of a element of K if", "worrisome, because the heuristic handles most difficult # cases. return", "with n != 0. If this heuristic fails, the structure", "basic=True)) Rm1 = Rm1.applyfunc(cancel) um1 = cancel(derivation(u[i], DE, basic=True)/ derivation(A[i,", "A def param_rischDE(fa, fd, G, DE): \"\"\" Solve a Parametric", "if the sum is divisible by d. qq, M =", "Const(k) such that # a*Dp + b*p = Sum(dj*rj) has", "= d1*f1 for f1 = 1 and any d1 in", "constant. We now find the log of that constant. argterms", "by d. qq, M = poly_linear_constraints(q, d) # qq =", "cases if DE.case == 'primitive' or DE.case == 'exp': return", "if and only if (c1, ..., cm) is a solution", "is a Matrix with coefficients in C and v is", "with m columns and entries in k. # Sum(fi*gi, (i,", "that. f in k(t) can be written as the logarithmic", "[DE.T[i] for i in DE.indices('log')]) ans = list(zip(terms, u)) result", "include=True) for A, D in G] return (a, (ba, bd),", "[], A if a.is_ground: # Normalization: a = 1. a", "We therefore limit ourselves to constant fields that are computable", "s[c1].as_numer_denom() nfmwa = N*fa*wd - M*wa*fd nfmwd = fd*wd Qv", "= ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen)) bd = (bd_real*bd_real", "equation. d = a.gcd(b) if not d.is_ground: break # a*Dp", "k[t] and a matrix A with coefficients in Const(k) such", "if there exist n in ZZ and u in k(t)", "Q + Fi return (H, M) def param_poly_rischDE(a, b, q,", "not %s\" % case) common_denom = reduce(ilcm, [i.as_numer_denom()[1] for i", "ba.items()] ba_real = sum(r for r in num_real) ba_imag =", "= len(h) M = Matrix([wl[:m] + wl[-v:] for wl in", "solution # only if the sum is divisible by d.", "function.\" % case) else: raise ValueError(\"case must be one of", "i.real_roots()) for i, _ in H] if not all(len(j) ==", "cancellation: deg(b) large enough. Given a derivation D on k[t],", "constant field C = Const(K), a Matrix A, and a", "case (it should be the same as if it were", "'tan', \" \"'base', 'auto'}, not %s\" % case) common_denom =", "Du. log(f) will be the same as u up to", "returns (n, u) or None, which means that f cannot", "this and write tests p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE,", "= dfa.cancel(dfd, include=True) # Our assumption here is that each", "Qy, DE) fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True) for fa, fd", "a parametric Risch differential equation. Given a derivation D in", "in Const(K) such that Dy + f*y == Sum(ci*gi, (i,", "and entries in Const(k). # Sum(ci*qqi) is Sum(ci*qi).quo(d), and the", "This assumes that const(F(t0, ..., tn) == const(K) == F", "== k[t], so it returns (a, b, G, 1) in", "Ri = A[i, :] # Rm+1; m = A.rows Rm1", "En) # lcm(dn, en1, ..., enm) hn = c.gcd(c.diff(DE.t)) a", "A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A) def prde_no_cancel_b_small(b, Q,", "b*p = Sum(ci*qi) correspond to # solutions alpha*p + Sum(ci*betai)", "of a*Dq + b*q = Sum(ci*Gi) correspond # to solutions", "alpha, beta = 1, [0]*m while n >= 0: #", "such that t_i is transcendental over C(x)(t_1, ..., t_i-1) and", "5 h, B = param_poly_rischDE(a, b, r, n, DE) #", "ba_real*bd_imag).as_poly(gen)) bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen) return (ba[0], ba[1], bd)", "sum is divisible by d. qq, M = poly_linear_constraints(q, d)", "such that if c1, ..., cm in Const(k) and q", "solution. This algorithm is used both in solving parametric problems", "constant_system(M, zeros(dc + 1, 1), DE) c = eye(m) A", "r - 1 C = list(v[1: m + 1]) y", "ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j) for b, e in dterms])))", "be # None in this case. This should never happen", "import Poly, lcm, cancel, sqf_list from sympy.polys.polymatrix import PolyMatrix as", "solutions are # y = d1*f1 for f1 = 1", "for r in num_imag) ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real", "Sum(ci*gi, (i, 1, m)), and to find such y and", "i in DE.indices('log')]) ans = list(zip(terms, u)) result = Add(*[Mul(i,", "only if # f = fa/fd, fd is square-free, deg(fa)", "what n is. n = bound_degree(a, b, r, DE, parametric=True)", "= Sum(ci*qi) may have a polynomial solution # only if", "(i, 1, m)). Given a derivation D in k(t), f", "# This assumes that const(F(t0, ..., tn) == const(K) ==", "..., qm] in k[t]^m, return h = [h1, ..., hr]", "0: # Only the trivial zero solution is possible. #", "C of f == Dv + Sum(ci*wi, (i, 1, m)),", "need to be updated to call bound_degree() # as per", "DE.indices('exp')] + [DE.T[i] for i in DE.indices('log')]) ans = list(zip(terms,", "case = DE.case if case == 'exp': wa, wd =", "Matrix(n + 1, m, lambda i, j: r[j].nth(i)) else: M", "a polynomial solution # only if the sum is divisible", "0 for key, value in bd.items()] bd_real = sum(r for", "t0, field=True) Q0 = [frac_in(qi.TC(), t0, field=True) for qi in", "the integration # variable (the structure theorems should be able", "columns and entries in Const(k). # Sum(ci*qqi) is Sum(ci*qi).quo(d), and", "as result up to a multiplicative # constant. We now", "that case, solutions of # a*Dp + b*p = Sum(ci*qi)", "# else: deg(a) > 0 # Iterate SPDE as long", "using the structure theorems. # try: A = parametric_log_deriv_heu(fa, fd,", "by d if and only if (c1, ..., cm) is", "j*n) for i, j in residueterms]) return (n, u) elif", "= Mul(*[Pow(i, j) for i, j in ans]) # exp(f)", "m)), r == q*h in k[t] satisfies A*Dr + B*r", "u are exactly all the solutions of Bx == v,", "else: deg(a) > 0 # Iterate SPDE as long as", "Parametric Version. Given a derivation D on k[t], an integer", "DE.t, field=True) for fa, fd in fi] ri = len(fi)", "in k[t] with c1, ..., cm in Const(k) if and", "exp(f) will be the same as u up to a", "r in num_imag) ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real -", "derivation(sitn, DE) - b*sitn if b.degree(DE.t) > 0: for i", "by d with exact quotient Sum(aji*qqi). # Sum(ci*qi) is divisible", "we have to use the full method. # TODO: This", "in bd.items()] bd_real = sum(r for r in denom_real) bd_imag", "TODO: implement this raise NotImplementedError V = [(-a*hn*ga).cancel(gd, include=True) for", "[p.nth(i) - c1*q.nth(i) for i in range(B + 1, C", "y in k and b == 0. # Dy +", "problem entirely by being careful with the sorts of expressions", "correctness of the Risch Algorithm is the computability of the", "and entries in Const(k) = Const(k0) # such that Dy0", "could be # others, and this algorithm will need to", "bd.items()] bd_real = sum(r for r in denom_real) bd_imag =", "[Poly(0, DE.t)]*m for N in range(n, 0, -1): # [n,", "original equation are then # Sum(dj*fj, (j, 1, u)) +", "play well with Poly, M will be a Matrix of", "transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for", "b, r, DE, parametric=True) except NotImplementedError: # A temporary bound", "== Sum(ci*qi, (i, 1, m)) \"\"\" R, Z = list(zip(*[gcdex_diophantine(b,", "0, # in which case the sum is Sum(ci*qi). ##", "1, m)), q == y*h in k<t> satisfies a*Dq +", "[G1, ..., Gm] in k(t)^m, return h = [h1, ...,", "- 1 and either D == d/dt or deg(D) >=", "p/gamma of the initial equation with ci = Sum(dj*aji). try:", "1, ..., m) for some d1, ..., du in Const(k).", "k[t], a, b, in k[t] with gcd(a, b) == 1,", "None return (Q*N, Q*M, v) def parametric_log_deriv(fa, fd, wa, wd,", "= dn.gcd(en) h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t))) a = dn*h c =", "a solution of Mx = 0, in which case the", "in u): raise NotImplementedError(\"Cannot work with non-rational \" \"coefficients in", "Sum(fi*qqi). A, _ = constant_system(M, zeros(M.rows, 1), DE) # A", "careful user can avoid this # problem entirely by being", "*= DE.t**e elif case == 'primitive': with DecrementLevel(DE): pa, pd", "# The solutions of the original equation are then #", "relations between the qi. if all([qi.is_zero for qi in q]):", "DE, basic=True)/ derivation(A[i, j], DE, basic=True)) for s in range(A.rows):", "of degree <= n in k[t] with c1, ..., cm", "def param_rischDE(fa, fd, G, DE): \"\"\" Solve a Parametric Risch", "residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative) from sympy.matrices import zeros, eye", "/ i i / i --- = Df. --- ---", "[], zeros(1, m) # No constraints. N = max([qi.degree(DE.t) for", "a vector (Matrix) such that either v has coefficients in", "cm) is a solution of Mx == 0, and p", "k[t], N is a non-negative integer, g in k(t), V", "if and only if y = Sum(dj*hj, (j, 1, r))", "Given a differential field (K, D) with constant field C", "entries in k. # Sum(fi*qi, (i, 1, m)), where f1,", "relatively prime, and q = [q1, ..., qm] in k[t]^m,", "in C, in which case s is True and the", "solutions. alpha, beta = 1, [0]*m while n >= 0:", "be implemented more efficiently. # It isn't too worrisome, because", "(i.e., gi in k(t)), and Q is a list of", "'tan', 'auto'} for the primitive, hyperexponential, and hypertangent cases, respectively.", "= ba.quo(bd) return (a, B, G, Poly(1, DE.t)) else: raise", "qm] where qi in k[t] is the polynomial component #", "will NOT return correct results if cancel() cannot reduce #", "dn, ds = splitfactor(fd, DE) E = [splitfactor(gd, DE) for", "v columns. A = -eye(m) for vj in V: A", "not all(i.is_Rational for i in u): raise NotImplementedError(\"Cannot work with", "== 2 else 0 for key, value in ba.items()] num_imag", "a Matrix of Basic expressions. \"\"\" m = len(G) Gns,", "g = A.gcd(B) a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g,", "not all constant # Note: See comment in constant_system #", "(fa, fd) = weak_normalizer(fa, fd, DE) # Solutions of the", "Write the full algorithm using the structure theorems. # try:", "# (i = 1, ..., m) are then y =", "value in bd.items()] denom_imag = [value if key[0] % 4", "be 0 in that case. n = reduce(ilcm, [i.as_numer_denom()[1] for", "k(t)-radical. case is one of {'primitive', 'exp', 'tan', 'auto'} for", "du. W = A.nullspace() # W = [w1, ..., wt]", "(i, 1, m)), then q = Sum(dj*hj, (j, 1, r)),", "will need to be used. The argument w == Dtheta/theta", "if case == 'exp': dcoeff = DE.d.quo(Poly(DE.t, DE.t)) with DecrementLevel(DE):", "basic=True).is_zero for i in u) or not A: # If", "t is linear and non-Liouvillian, which for the transcendental case,", "# theorem version of parametric_log_deriv is implemented. return None u1,", "until the structure # theorem version of parametric_log_deriv is implemented.", "B*p == Sum(ci*qi, (i, 1, m)) \"\"\" R, Z =", "and only is ci = Sum(dj*aji) # (i = 1,", "implemented more efficiently. # It isn't too worrisome, because the", "division raise ValueError(\"Inexact division\") u = cancel(u**m*Mul(*[Pow(i, j) for i,", "columns and entries in Const(k) such that # a*Dp +", "rows n1, ..., ns. return [hk.cancel(gamma, include=True) for hk in", "# Solutions p in k[t] of A*Dp + B*p =", "v, s), where B is a Matrix with coefficients in", "derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative) from sympy.matrices import zeros,", "function # coefficients will fall into this class). Furthermore, (I", "per the docstring of this function (DE.case == 'other_linear'). N", "fd in fi] ri = len(fi) if i == n:", "if there exist y in K(t) and c1, ..., cm", "C of Ax == u are exactly all the solutions", "for i in range(z.degree(DE.t))] s = solve(eqs, c1) if not", "imaginary part of a rational function evaluated at sqrt(-1) without", "(i.e., if i is in L_K, then T[i] == log(L_args[i])).", "= sum(r for r in denom_real) bd_imag = sum(r for", "is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical \"\"\" # Compute Df/f dfa, dfd = (fd*derivation(fa,", "A is not None: Q, m, z = A if", "d = Poly(d, field=True) Q = [(ga*(d).quo(gd)).div(d) for ga, gd", "be updated to call bound_degree() # as per the docstring", "all y in k and b == 0. # Dy", "== 1, return (A, B, Q, R, n1), with Qq", "is solvable in k(t}. The corresponding solutions are # y", "prde_special_denom() automatically if it cannot determine that S1irr == Sirr.", "that we might # incorrectly prove that an integral is", "this case. This should never happen for the # functions", "1, m)). Because M has entries in k(t), and because", "without actually evaluating it at sqrt(-1) Separates the even and", "those # constant families (c1, ..., cm) for which a", "Qq = [q1, ..., qm] and R = [r1, ...,", "N. So that the special part is always computed, this", "list(zip(terms, u)) result = Mul(*[Pow(i, j) for i, j in", "N = 0 # These are the cases where we", "\" \"cases have not yet been implemented\") # else: deg(a)", "expression in the constant field that is identically zero, but", "= ls*ln.gcd(ln.diff(DE.t)) if not z.has(DE.t): # TODO: We treat this", "u + v) in Const(k). # The vectors (bl1, ...,", "# If f is the logarithmic derivative of a k(t)-radical,", "+ Sum(ci*betai) of the initial equation. d = a.gcd(b) if", "(or T). L_args are the arguments of the logarithms indexed", "be avoided with DecrementLevel(DE): ba, bd = frac_in(b + i*derivation(DE.t,", "DE.T[i])).as_expr() for i in DE.indices('exp')] L_part = [DE.D[i].as_expr() for i", "m)), p = v*h is in k<t>, and p and", "to call bound_degree() # as per the docstring of this", "Equation - No cancellation: deg(b) small enough. Given a derivation", "lambda i, j: Q[j].nth(i + 1)) A, _ = constant_system(M,", "list(v[1: m + 1]) y = -sum([v[m + 1 +", "== u has no constant solution. This algorithm is used", "const is such that exp(const)*f == u. This is calculated", "list of terms on the right hand side of the", "K/C(x) K/C(x) Where C = Const(K), L_K/C(x) = { i", "A[s, j]*u[m+1 u.row_op(s, lambda r, jj: cancel(r - Asj*um1)) A", "heuristic failed, or returns None, in which case it has", "gm] in k(t)^m, return Q = [q1, ..., qm] in", "monomials of K over C(x)), and E_K/C(x) = { i", "= zip(*[pi.div(d) for pi in p]) if not all([ri.is_zero for", "= Sum(ci*qi) = Sum(dj*Sum(aji*qi)) # are the same as those", "frac_in(dcoeff, DE.t) A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) if", "..., t_i-1) } (i.e., the set of all indices of", "the arguments of the exponential terms in E_args. To handle", "pd, DE, case='auto') if A is None: return None n,", "h) def prde_linear_constraints(a, b, G, DE): \"\"\" Parametric Risch Differential", "..., n} such that t_i is transcendental over C(x)(t_1, ...,", "1 and either D == d/dt or deg(D) >= 2,", "+ Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN G = [(Ga*pN*pn).cancel(Gd, include=True) for Ga,", "will be a Matrix of Basic expressions. \"\"\" m =", "is None: # (N*f - M*w) is not the logarithmic", "# But this is a limitation in computer algebra in", "# XXX: If these are supported by the structure theorems,", "Equation - Normal part of the denominator. Given a derivation", "log(2) satisfy Dt == 1/x, because log(2) is constant. Therefore,", "class). Furthermore, (I believe) this # problem will only crop", "[v for v in V if v[0] != 0] if", "_ = constant_system(M, zeros(d, 1), DE) else: # No constraints", "in which case the sum is Sum(ci*qi). ## Reduce number", "(Is there a better way?) f = [Poly(fa.as_expr()/fd.as_expr(), t, field=True)", "arguments of one logarithm from the other. Therefore, it is", "be written as the logarithmic derivative of a k(t)-radical. It", "in dterms]))) const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld)) return (ans, result, const) def", "in k(t)^r and a matrix A with m + r", "a in k(t) is the derivative of an element of", "automatically call bound_degree() when t is linear and non-Liouvillian, which", "variable). Au = Au.applyfunc(cancel) A, u = Au[:, :-1], Au[:,", "# such that Dy0 + b*y0 = Sum(ci*qi, (i, 1,", "and for any solution v in k(t), c1, ..., cm", "in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T ==", "n*f == Dv/v + m*Dtheta/theta, with v in k(t)* and", "ci = Sum(dj*aji). try: # We try n=5. At least", "= list(zip(*[gcdex_diophantine(b, a, qi) for qi in Q])) A =", "esm) a = hn*hs b -= (hn*derivation(hs, DE)).quo(hs) mu =", "[q1, ..., qm] where qi in k[t] is the polynomial", "= Const(K), a Matrix A, and a vector (Matrix) u", "vj in V] # [f1, ..., fu] # # Solve", "not yet been implemented\") # else: deg(a) > 0 #", "alpha*p + Sum(Sum(dj*aji)*betai) of the initial # equation. These are", "field not in some (possibly unspecified extension) and \"in_field\" with", "when t_i is in L_K/C(x), implying in particular that E_K/C(x)", "Sum(ci*qi) correspond to # solutions alpha*p + Sum(ci*betai) of the", "and entries in Const(k) such that Dy + f*y =", "that Df/f == Du. log(f) will be the same as", "i in DE.cases if i == 'tan'] or \\ (set([i", "residues = list(zip(*roots)) or [[], []] # Note: this might", "G, h) such that a, h in k[t], b in", "return prde_no_cancel_b_small(b, q, n, DE) elif (DE.d.degree() >= 2 and", "max(0, derivation(DE.t, DE).degree(DE.t) - 1) C = max(p.degree(DE.t), q.degree(DE.t)) if", "Matrix yet, this algorithm assumes that all matrix entries are", "= [gg1, ..., ggm] in k(t)^m, and for any solution", "zip(beta, r)] alpha *= a # Solutions p of a*Dp", "= Sum(aji*betai). Mbeta = Matrix([beta]) f = [(Mbeta*vj)[0] for vj", "and imaginary part of a rational function evaluated at sqrt(-1)", "i, j in roots): # If f is the logarithmic", "by E_K (i.e., if i is in E_K, then T[i]", "logarithmic derivative heuristic. Given a derivation D on k[t], f", "# a solution y0 in k with c1, ..., cm", "v in V if v[0] != 0] if not V:", "u) elif case == 'tan': raise NotImplementedError(\"The hypertangent case is", "+ Es) # lcm(ds, es1, ..., esm) a = hn*hs", "== ci (i = 1, ..., m), when # A*Matrix([c1,", "(order_at, order_at_oo, weak_normalizer, bound_degree) from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation,", "wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True) with DecrementLevel(DE): pa, pd", "structure # theorem version of parametric_log_deriv is implemented. return None", "> max(0, DE.d.degree() - 1)): return prde_no_cancel_b_large(b, q, n, DE)", "in range(m): si = Q[i].nth(N + db)/b.LC() sitn = Poly(si*DE.t**N,", "up to a additive constant. This is because they will", "reduce(ilcm, [i.as_numer_denom()[1] for i in [j for _, j in", "that # case (it should be the same as if", "case in ['other_linear', 'other_nonlinear']: # XXX: If these are supported", "zero, but cannot # be reduced to such by cancel().", "in the base case if and only if # f", "algorithm assumes that all matrix entries are Basic expressions. \"\"\"", "D in k[t], a, b in k[t] relatively prime, and", "if M*Matrix([c1, ..., cm]) == 0, # in which case", "= Sum(ci*Gi) correspond # to solutions q = p/hs of", "B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN G = [(Ga*pN*pn).cancel(Gd,", "will fall into this class). Furthermore, (I believe) this #", "be # others, and this algorithm will need to be", "same as result up to a multiplicative # constant. We", "c1, ..., cm in Const(k) if and only if #", "same as those of # (a/d)*Dp + (b/d)*p = Sum(dj*rj)", "with v in k(t)* and n, m in ZZ with", "in residueterms]) return (n, u) elif case == 'tan': raise", "- b*si if all(qi.is_zero for qi in Q): dc =", "A.col_join(Rm1) u = u.col_join(Matrix([um1])) return (A, u) def prde_spde(a, b,", "Helper function, to get the real and imaginary part of", "\"in the structure theorems.\") E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i", "b.degree() < DE.d.degree() - 1) and (DE.case == 'base' or", "i*derivation(DE.t, DE)/DE.t, DE.t, field=True) with DecrementLevel(DE): Qy = [frac_in(q.nth(i), DE.t,", "re-checking can be avoided with DecrementLevel(DE): ba, bd = frac_in(b", "1) == 1), return the tuple (A, B, GG, h)", "exist y in K(t) and c1, ..., cm in Const(K)", "# except NotImplementedError: # Heuristic failed, we have to use", "Df=True): r\"\"\" Checks if Df is the logarithmic derivative of", "No constraints. N = max([qi.degree(DE.t) for qi in q]) M", "= I.row_join(zeros(m, r)).row_join(-I) return f + H, A.col_join(B).col_join(C) def prde_cancel_liouvillian(b,", "k[t] of degree at most n of a*Dq + b*q", "DE) elif ((b.is_zero or b.degree() < DE.d.degree() - 1) and", "..., tn) == const(K) == F Ri = A[i, :]", "..., ev]) == 0. # The solutions of the original", "Equations parallel those for solving Risch Differential Equations. See the", "else 0 for key, value in bd.items()] bd_real = sum(r", "wd = frac_in((wa, wd), DE.t) A = parametric_log_deriv(pa, pd, wa,", "# d1, ..., dr ar in Const(k) and # B*Matrix([c1,", "= A if Q == 1: n = min(n, m)", "i]*A[:, m+1] Asj = A[s, j] A.row_op(s, lambda r, jj:", "= Matrix(d, m, lambda i, j: Q[j].nth(i + 1)) A,", "See also ======== is_log_deriv_k_t_radical, is_deriv_k \"\"\" fa, fd = fa.cancel(fd,", "for c1, ..., cm in Const(k) if and only if", "incorrectly prove that an integral is nonelementary (such as #", "returns the tuple (B, v, s), where B is a", "_, i in residueterms], S(1)) u = Mul(*[Pow(i, j*n) for", "d/dt or deg(b) > max(0, deg(D) - 1), returns h1,", "return h, A def param_rischDE(fa, fd, G, DE): \"\"\" Solve", "= a*h ba = a*fa - dn*derivation(h, DE)*fd ba, bd", "sympy.polys.polymatrix import PolyMatrix as Matrix from sympy.solvers import solve def", "= Au.rref(simplify=cancel, normalize_last=False)[0] # Warning: This will NOT return correct", "that # (a/d)*Dp + (b/d)*p = Sum(dj*rj) has a solution", "automatically if it cannot determine that S1irr == Sirr. Furthermore,", "basis except possibly when Dy + f*y == 0 #", "because the heuristic handles most difficult # cases. return A", "== E*exp(x) satisfy Dt == t. Therefore, the term const", "Therefore, it is necessary to pass the arguments of the", "'primitive'} for the hyperexponential, hypertangent, and primitive cases, respectively. For", "(A, B, Qq, R, n1) def prde_no_cancel_b_large(b, Q, n, DE):", "= Poly(y_num, DE.t), Poly(y_den, DE.t) Y = Ya*Poly(1/Yd.LC(), DE.t), Yd.monic()", "for seeing exactly what elements of k(t) produce u. This", "u + v # columns and entries in Const(k) such", "parametric Risch DE problem Fa = Poly(0, DE.t) Fd =", "to verify, but I believe that the answer should be", "where e1, ..., ev are in # Const(k) and B*Matrix([d1,", "the qi. if all([qi.is_zero for qi in q]): return [],", "None, which means that f cannot be written as the", "'exp': wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True) with DecrementLevel(DE):", "n, DE) elif (DE.d.degree() >= 2 and b.degree() == DE.d.degree()", "equation recursively. # g, B = param_poly_rischDE(a.quo(d), b.quo(d), r, n,", "for ga, gd in G] a, (ba, bd), G, hn", "n, DE): \"\"\" Pg, 237. \"\"\" H = [] #", "[v1, ..., vu] where each vj is a column matrix", "= [None]*ri, [None]*ri # from eq. on top of p.238", "and exp(x + 1) == E*exp(x) satisfy Dt == t.", "(j, 1, r)) where d1, ..., dr are in Const(k)", "gm*p**(N - n), p**-n) return (A, B, G, h) def", "the computability of the # constant field (actually, this same", "0 else -value if key[0] % 4 == 2 else", "+ u + v) in Const(k). # The vectors (bl1,", "in the integration variable). Au = Au.applyfunc(cancel) A, u =", "in G] if not all([ri.is_zero for _, ri in Q]):", "# Build combined relation matrix. A = -eye(m) for vj", "derivative of a k(t)-radical return None if p.degree(DE.t) >= max(1,", "if key[0] % 4 == 3 else 0 for key,", "in his integrand in the variables other than the integration", "C = Const(K), a Matrix A, and a vector (Matrix)", "betaa, alphaa, alphad = real_imag(ba, bd*a, DE.t) betad = alphad", "cm, d1, ..., dr]) == 0. # Transform fractions (fa,", "k. # Sum(fi*gi, (i, 1, m)), where f1, ..., fm", "Therefore, the term const is returned. const is such that", "NotImplementedError(\"non-linear and hypertangent \" \"cases have not yet been implemented\")", "is the derivative of an element of k(t). a in", "A.row_op(s, lambda r, jj: cancel(r - Asj*Rm1[jj])) # u[s] =", "+ b*p = Sum(ci*qi) correspond to # solutions alpha*p +", "of a k(t)-radical return None if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)):", "pd = frac_in(p, DE.t) A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto')", "# else: b is in k, deg(qi) < deg(Dt) t", "the integration variable). Au = Au.applyfunc(cancel) A, u = Au[:,", "solutions of the original equation for ci = Sum(dj*aji) #", "= u.col_join(Matrix([um1])) return (A, u) def prde_spde(a, b, Q, n,", "ls*ln.gcd(ln.diff(DE.t)) if not z.has(DE.t): # TODO: We treat this as", "the transcendental case, implies that Dt == a*t + b", "if M*Matrix([f1, ..., fm]) == 0, # in which case", "Equation problem is, given f, g1, ..., gm in K(t),", "V] # [r1, ..., ru] # Solutions of a*Dp +", "equation. These are equal to alpha*p + Sum(dj*fj) where #", "K, Df is the logarithmic derivative of a K-radical if", "q*h in k[t] satisfies A*Dr + B*r == Sum(ci*ggi, (i,", "# deg(q) > B, no solution for c. return None", "of Q it has # to be Q + Fi", "h) such that A, B, h in k[t], GG =", "for hk in h], C def limited_integrate_reduce(fa, fd, G, DE):", "> B: eqs = [p.nth(i) - c1*q.nth(i) for i in", "is necessary to pass the arguments of the logarithmic terms", "# coefficients will fall into this class). Furthermore, (I believe)", "2)): return prde_no_cancel_b_small(b, q, n, DE) elif (DE.d.degree() >= 2", "fd, wa, wd, DE) # except NotImplementedError: # Heuristic failed,", "*= n terms = ([DE.T[i] for i in DE.indices('exp')] +", "= en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t))) a = dn*h c = a*h ba =", "A single row. r = [(Mqq*vj)[0] for vj in V]", "written as the logarithmic derivative of a k(t)-radical. case is", "[g1, ..., gm] in k(t)^m, and for any solution c1,", "of a k(t)-radical. b in k(t) can be written as", "Sum(aji*gi) is in k[t] and equal to Sum(aji*qi) (j =", "[] ld = [] for i, j in zip(argterms, u):", "DE.case == 'exp': return prde_cancel_liouvillian(b, q, n, DE) else: raise", "such that n*f == Du/u. Either returns (n, u) or", "m)), where f1, ..., fm are elements of k, #", "by cancel() above. return None # Note: if residueterms =", "for j in range(ri): hji = fi[j]*DE.t**i hi[j] = hji", "M with entries in k(t) such that for any solution", "lambda i, j: r[j].nth(i)) else: M = Matrix(0, m, [])", "arguments of the logarithms indexed by L_K (i.e., if i", "f] else: # Base case. Dy == 0 for all", "base case if and only if # f = fa/fd,", "over k(t), raises either NotImplementedError, in which case the heuristic", "in k(t) such that for any solution c1, ..., cm", "Q, R, n1), with Qq = [q1, ..., qm] and", "f*z = q*Sum(ci*Gi) # correspond to solutions y = z/q", "solving Parametric Risch Differential Equations parallel those for solving Risch", "function. Therefore, it is required to pass them as indices", "1), DE) return [], A if a.is_ground: # Normalization: a", "((b.is_zero or b.degree() < DE.d.degree() - 1) and (DE.case ==", "this and write tests c1 = c1 or Dummy('c1') p,", "which means that Df/f is not the derivative of an", "== Du. log(f) will be the same as u up", "on d1. # Coefficients of t^j (j > 0) in", "z=None): \"\"\" Checks if f can be written as the", "with deg(b) < deg(D) - 1 and either D ==", "fd)] + G h, A = param_rischDE(Fa, Fd, G, DE)", "# is solvable in k(t}. The corresponding solutions are #", "[g1, ..., gv] in k[t]^v and and B is a", "# Compute Df/f dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd,", "n = bound_degree(a, b, r, DE, parametric=True) except NotImplementedError: #", "for any f in K, Df is the logarithmic derivative", "the numerator ba[1] is the imaginary part and bd is", "that A, B, h in k[t], GG = [gg1, ...,", "and B is not None: Q, s, z = A", "in zip(argterms, u): # We need to get around things", "(actually, this same correctness problem exists in any # algorithm", "Sum(ci*gi) is in k[t] if and only is ci =", "import print_function, division from sympy.core import Dummy, ilcm, Add, Mul,", "if and only if p = Sum(dj*hj, (j, 1, r))", "u + v columns. A = -eye(m) for vj in", "matrix M with entries in k such that Sum(ci*pi, (i,", "loop instead of Q it has # to be Q", "excise dj's. N = M.nullspace() # N = [n1, ...,", "a # Solutions p of a*Dp + b*p = Sum(ci*qi)", "See the outline in the docstring of rde.py for more", "if not nb: # Possible cancellation. if case == 'exp':", "N = s[c1].as_numer_denom() nfmwa = N*fa*wd - M*wa*fd nfmwd =", "k[t] and a in k[t], b in k<t>, and g1,", "[(Mq*vj)[0] for vj in V] # [r1, ..., ru] #", "for ri, zi in zip(R, Z)] R = list(R) n1", "lambda i, j: Q[j].nth(i)) A, u = constant_system(M, zeros(dc +", "theorems should be able to completely decide these # problems", "fm]) == 0, # in which case the sum is", "p = Poly(DE.t**2 + 1, DE.t) elif case in ['primitive',", "element of K or the logarithmic derivative of a K-radical", "determining if an element a of K is a derivative", "integral # of f respolys, residues = list(zip(*roots)) or [[],", "The danger is that we might # incorrectly prove that", "= [q1, ..., qm] and R = [r1, ..., rm],", "Differential Equation - Normal part of the denominator. Given a", "n, DE): \"\"\" Parametric Poly Risch Differential Equation - No", "equation. gamma *= hs g = A.gcd(B) a, b, g", "is a matrix with m columns and entries in Const(k).", "S(1)) residueterms = [(i, j*common_denom) for i, j in residueterms]", "[gg1, ..., ggm] in k(t)^m, and for any solution c1,", "G = [(c*A).cancel(D, include=True) for A, D in G] return", "A*Dp + B*p == Sum(ci*qi, (i, 1, m)) \"\"\" R,", "Ax == u has no constant solution. This algorithm is", "fm are elements of k, is # divisible by d", "k' = k + m + u. v = len(h)", "..., gm in K(t), to determine if there exist y", "# None in this case. This should never happen for", "= sum(r for r in denom_imag) num_real = [value if", "'tan']: hs = reduce(lambda i, j: i.lcm(j), (ds,) + Es)", "mu) else: # TODO: implement this raise NotImplementedError V =", "b in k[t] relatively prime, and q = [q1, ...,", "A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) B = parametric_log_deriv(betaa,", "# Issue 10798: i need not be a polynomial i,", "all(k.is_Rational for k in j) for i, j in roots):", "4 == 3 else 0 for key, value in ba.items()]", "such that either v has coefficients in C, in which", "DE.t)*a*derivation(p, DE).quo(p)*pN G = [(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd in", "use is_log_deriv_k_t_radical_in_field(). See also ======== is_log_deriv_k_t_radical_in_field, is_deriv_k \"\"\" H =", "log(2) is constant. Therefore, the term const is returned. const", "is_deriv_k \"\"\" fa, fd = fa.cancel(fd, include=True) # f must", "= [(q*ga).cancel(gd, include=True) for ga, gd in G] a, (ba,", "h = [h1, ..., hr] in k(t)^r and a matrix", "that case. n = reduce(ilcm, [i.as_numer_denom()[1] for _, i in", "bd = frac_in(b, DE.t, field=True) for i in range(n, -1,", "i) for j in range(len(H)) for i in residues[j]] #", "Risch Differential Equation - Generate linear constraints on the constants.", "all the above are also nonlinear or Liouvillian, but if", "a matrix A with coefficients in Const(k) such that if", "= Sum(aji*qi) (j = 1, ..., u) in k[t]. if", "k[t] if and only if p = Sum(ek*hk) where e1,", "of A*Dp + B*p = Sum(ci*Gi) correspond # to solutions", "1, C + 1)] s = solve(eqs, c1) if not", "of {'exp', 'tan', 'primitive'} for the hyperexponential, hypertangent, and primitive", "failed, or returns None, in which case it has proven", "..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ...,", "ri in QQ such that:: --- --- Dt \\ r", "Only the trivial zero solution is possible. # Find relations", "either NotImplementedError, in which case the heuristic failed, or returns", "docstring of each function for more information. \"\"\" from __future__", "DE.t) A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) if A", "(c1, ..., cm) for which a solution of # the", "is_log_deriv_k_t_radical_in_field()\") elif case in ['other_linear', 'other_nonlinear']: # XXX: If these", "an elementary extension over C(x), then the cardinality of L_K/C(x)", "if they're not rational, like # log(2)/log(3). Also, there should", "Const(k). # Sum(aji*qi) is divisible by d with exact quotient", "qi in Q]) M = Matrix(dc + 1, m, lambda", "large n's. n = 5 h, B = param_poly_rischDE(a, b,", "dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T", "hyperexponential, and hypertangent cases, respectively. If case is 'auto', it", "n1) def prde_no_cancel_b_large(b, Q, n, DE): \"\"\" Parametric Poly Risch", "lhs = Matrix([E_part + L_part]) rhs = Matrix([dfa.as_expr()/dfd.as_expr()]) A, u", "# Sum(aji*gi) is in k[t] and equal to Sum(aji*qi) (j", "j*common_denom) for i, j in residueterms] m = common_denom//n if", "..., gm*p**(N - n), p**-n) return (A, B, G, h)", "in k[t]. q, M = prde_linear_constraints(a, b, g, DE) #", "g = [g1, ..., gv] in k[t]^v and and B", "normalized equation. gamma *= hn A, B, G, hs =", "== 0 for all y in k and b ==", "E_K, then T[i] == exp(E_args[i])). This is needed to compute", "DE.indices('exp')] + [DE.T[i] for i in DE.indices('log')]) const = cancel(fa.as_expr()/fd.as_expr()", "potentially be wrong. raise NotImplementedError(\"Cannot work with non-rational \" \"coefficients", "the same as result up to a multiplicative # constant.", "the derivative of an element of k(t). ans is a", "of the weakly normalized equation Dz + f*z = q*Sum(ci*Gi)", "equation. gamma *= hn A, B, G, hs = prde_special_denom(a,", "j: Q[j][1].nth(i)) else: M = Matrix(0, m, []) # No", "< deg(Dt) t = DE.t if DE.case != 'base': with", "set of all indices of logarithmic monomials of K over", "if not V: # No non-trivial solution return [], eye(m)", "a.LC() b, q = b.quo_ground(a), [qi.quo_ground(a) for qi in q]", "p of degree <= n # in k[t] if and", "= param_rischDE(ba, bd, Qy, DE) fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True)", "0, in which case the quotient is Sum(ci*qi, (i, 1,", "Sum(dj*aji) # (i = 1, ..., m) for some d1,", "+ b*p = Sum(ci*qi, (i, 1, m)) has a solution", "B, Q, R, n1), with Qq = [q1, ..., qm]", "A.row_join(zeros(A.rows, r + m)) B = B.row_join(zeros(B.rows, m)) C =", "of the structure \" \"theorems with hypertangent support is not", "a*Dp + b*p = Sum(dj*rj) correspond to solutions # y", "(a/d)*Dp + (b/d)*p = Sum(dj*rj) correspond to # solutions alpha*p", "the tuple (a, b, G, h) such that a, h", "Sum(aji*betai). Mbeta = Matrix([beta]) f = [(Mbeta*vj)[0] for vj in", "= reduce(lambda i, j: i.lcm(j), Gds) d = Poly(d, field=True)", "non-rational \" \"coefficients in this case.\") else: n = reduce(ilcm,", "10798: i need not be a polynomial i, d =", "column # vectors generating the space of linear relations between", "ans is a list of tuples such that Add(*[i*j for", "we take V[0] c0 = V[0][0] # v = [-1,", "k(t), return (a, b, h, N, g, V) such that", "Q[i] = Q[i] - derivation(sitn, DE) - b*sitn if b.degree(DE.t)", "+ f*y == Sum(ci*gi, (i, 1, m)), q == y*h", "DE) fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True) for fa, fd in", "i i / i --- = Df. --- --- t", "t_i-1) } (i.e., the set of all indices of hyperexponential", "% 4 == 3 else 0 for key, value in", "# appear in his integrand in the variables other than", "equation. gamma = q G = [(q*ga).cancel(gd, include=True) for ga,", "an identically zero expression to 0. The danger is that", "return (Q*N, Q*M, v) def parametric_log_deriv(fa, fd, wa, wd, DE):", "derivative of a k(t)-radical. ans is a list of tuples", "DE.indices('log')]) l = [] ld = [] for i, j", "those of # (a/d)*Dp + (b/d)*p = Sum(dj*rj) # where", "in that # case (it should be the same as", "# # We therefore limit ourselves to constant fields that", "derivative of a k(t)-radical. It differs from is_log_deriv_k_t_radical(fa, fd, DE,", "a solution y0 in k with c1, ..., cm in", "g1*p**(N - n), ..., gm*p**(N - n), p**-n) return (A,", "Q[i].nth(N + DE.d.degree(DE.t) - 1)/(N*DE.d.LC()) sitn = Poly(si*DE.t**N, DE.t) H[i]", "s, z = A # TODO: Add test if Q", "of the partial fraction expansion of gi. # M is", "Df/f, not f, use is_deriv_k_in_field(). See also ======== is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical", "j]*u[m+1 u.row_op(s, lambda r, jj: cancel(r - Asj*um1)) A =", "for q in Q] fi, Ai = param_rischDE(ba, bd, Qy,", "== Sum(ci*gi, (i, 1, m)), and to find such y", "in which case the solutions are # y = d1*f1", "# A single row. r = [(Mqq*vj)[0] for vj in", "in Q] fi, Ai = param_rischDE(ba, bd, Qy, DE) fi", "etad = frac_in(dcoeff, DE.t) if recognize_log_derivative(2*betaa, betad, DE): A =", "deg(q) <= n and Dq + b*q == Sum(ci*qi, (i,", "original equation for ci = Sum(dj*aji) # (i = 1,", "(see # Bronstein's book, page 255), so most likely this", "q]): return [], zeros(1, m) # No constraints. N =", "are in Const(k) and (c1, ..., cm, d1, ..., dr)", "G, DE): \"\"\" Solves the limited integration problem: f =", "else: # we can take any vector from V, we", "the same as monomials. For example, both exp(x) and exp(x", "Gm] in k(t)^m, return h = [h1, ..., hr] in", "Matrix(N + 1, m, lambda i, j: Q[j][1].nth(i)) else: M", "k[t] for c1, ..., cm in Const(k) # if and", "row. r = [(Mqq*vj)[0] for vj in V] # [r1,", "fd*wd Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE) if Qv is None:", "dr are in Const(k) and (c1, ..., cm, d1, ...,", "======== is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical \"\"\" # Compute Df/f dfa, dfd =", "Fd = Poly(1, DE.t) G = [(fa, fd)] + G", "range from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer, bound_degree) from sympy.integrals.risch", "Dv + Sum(ci*wi, (i, 1, m)), p = v*h is", "len(fi) if i == n: M = Ai else: M", "the final answer u such that Df/f == Du. log(f)", "See the docstring of each function for more information. \"\"\"", "Sum(ci*qi).quo(d), and the remainder is zero # for c1, ...,", "m)). Furthermore, if S1irr == Sirr, then p is in", "G = [g1, ..., gm] in k(t)^m, return Q =", "sqf_list(i) l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for b, e in", "n = min(n, s/2) N = max(0, -nb) pN =", "[(H[j][1].subs(z, i), i) for j in range(len(H)) for i in", "TODO: We treat this as 'no solution', until the structure", "DE, case='auto'): \"\"\" Parametric Risch Differential Equation - Special part", "a matrix with m columns and entries in k. #", "Collect solution components. h = f + [alpha*gk for gk", "Version. Given a derivation D on k[t], an integer n,", "r, DE, parametric=True) except NotImplementedError: # A temporary bound is", "p, a = fa.div(fd) q, b = wa.div(wd) B =", "+ B*p == Sum(ci*qi, (i, 1, m)) \"\"\" R, Z", "Sum(ek*hk) where e1, ..., ev are in # Const(k) and", "if A[i, j].has(*DE.T): # This assumes that const(F(t0, ..., tn)", "G, Poly(1, DE.t)) else: raise ValueError(\"case must be one of", "k*. \"\"\" dn, ds = splitfactor(fd, DE) E = [splitfactor(gd,", "H[i] = H[i] + si Q[i] = Q[i] - derivation(si,", "be one of {'primitive', 'exp', 'tan', \" \"'base', 'auto'}, not", "on k[t] and a in k[t], b in k<t>, and", "is in L_K, then T[i] == log(L_args[i])). This is needed", "[g1, ..., gm] in k(t)^m, return Q = [q1, ...,", "fa/fd in k[t]. # (Is there a better way?) f", "Db. Either returns (ans, u), such that Df/f == Du,", "Dv + Sum(ci*wi, (i, 1, n)) \"\"\" fa, fd =", "..., hr in k[t] and a matrix A with coefficients", "DE) # Solutions of the weakly normalized equation Dz +", "- Special part of the denominator. case is one of", "- A[s, i]*A[:, m+1] Asj = A[s, j] A.row_op(s, lambda", "= order_at(ba, p, DE.t) - order_at(bd, p, DE.t) nc =", "'auto') if Qv is None: # (N*f - M*w) is", "has a solution y in k(t) with c1, ..., cm", "+ u. v = len(h) M = Matrix([wl[:m] + wl[-v:]", "is not None and B is not None: Q, s,", "normalize_last=False)[0] # Warning: This will NOT return correct results if", "in range(A.rows): # A[s, :] = A[s, :] - A[s,", "is_log_deriv_k_t_radical_in_field(). See also ======== is_log_deriv_k_t_radical_in_field, is_deriv_k \"\"\" H = []", "fd*wd Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd, fd*wd, DE, 'auto') if", "ba = ba.as_poly(gen).as_dict() denom_real = [value if key[0] % 4", "x + 1 # Issue 10798: i need not be", "into this class). Furthermore, (I believe) this # problem will", "+ b*p = Sum(ci*gi) may have a polynomial solution #", "with m columns and entries in Const(k). # Sum(ci*qqi) is", "== 0. # The solutions of the original equation are", "< deg(D) - 1 and either D == d/dt or", "= weak_normalizer(fa, fd, DE) # Solutions of the weakly normalized", "that uses rref()). # # We therefore limit ourselves to", "which case it has proven that no solution exists, or", "y = p/gamma of the initial equation with ci =", "C(x)). If K is an elementary extension over C(x), then", "in roots): # If f is the logarithmic derivative of", "in k), a != 0, and gcd(a, t) == 1", "where # d1, ..., dr ar in Const(k) and #", "logarithms indexed by L_K (i.e., if i is in L_K,", "if they exist. For the algorithms here G is a", "= len(p) q, r = zip(*[pi.div(d) for pi in p])", "--. --- --- t f i in L i in", "Generate linear constraints on the constants. Given a derivation D", "A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0. \"\"\" db", "case) common_denom = reduce(ilcm, [i.as_numer_denom()[1] for i in [j for", "in range(n, -1, -1): if DE.case == 'exp': # this", "returns a solution (n, m, v) of the equation n*f", "r = [(Mq*vj)[0] for vj in V] # [r1, ...,", "is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd, fd*wd, DE, 'auto') if Qv is None:", ">= 2, returns h1, ..., hr in k[t] and a", "it finds the solution in the given field not in", "constant_system(M, zeros(M.rows, 1), DE) # A is a matrix with", "H[i] + sitn Q[i] = Q[i] - derivation(sitn, DE) -", "255), so most likely this indicates a bug. return None", "V: # No non-trivial solution return [], eye(m) Mq =", "This will NOT return correct results if cancel() cannot reduce", "k(t) if there exists b in k(t) such that a", "in k. # Sum(fi*qi, (i, 1, m)), where f1, ...,", "most n of a*Dq + b*q == Sum(ci*gi, (i, 1,", "bottleneck from # calling some more complex simplification function (rational", "+ L_part]) rhs = Matrix([dfa.as_expr()/dfd.as_expr()]) A, u = constant_system(lhs, rhs,", "if A is not None: Q, m, z = A", "should be # None in this case. This should never", "of constants at this point V = M.nullspace() # V", "j in zip(argterms, u): # We need to get around", "k(t)^m, and for any solution v in k(t), c1, ...,", "exp(x + 1) == E*exp(x) satisfy Dt == t. Therefore,", "for ga, gd in G])) # So far, all the", "cannot be written as the logarithmic derivative of a k(t)-radical.", "a solution of Mx == 0, and p and the", "[frac_in(qi.TC(), t0, field=True) for qi in Q] f, B =", "heuristic. Given a derivation D on k[t], f in k(t),", "in K(t), to determine if there exist y in K(t)", "in range(n, 0, -1): # [n, ..., 1] for i", "in ZZ with n != 0. If this heuristic fails,", "..., dr]]).T == 0. \"\"\" m = len(Q) H =", "# The condition for solvability is # B*Matrix([c1, ..., cm,", "m)), and to find such y and ci if they", "V = M.nullspace() # V = [v1, ..., vu] where", "A, u = Au[:, :-1], Au[:, -1] for j in", "i == n: M = Ai else: M = Ai.col_join(M.row_join(zeros(M.rows,", "a better way?) f = [Poly(fa.as_expr()/fd.as_expr(), t, field=True) for fa,", "it has # to be Q + Fi taking its", "in k[t] with deg(b) < deg(D) - 1 and either", "Sum(ci*gi) may have a polynomial solution # only if the", "appear in his integrand in the variables other than the", "and G = [G1, ..., Gm] in k(t)^m, return h", "is the logarithmic derivative of a k(t)-radical. b in k(t)", "v = Qv if Q.is_zero or v.is_zero: return None return", "= [v1, ..., vu] where each vj is a column", "1)/(N*DE.d.LC()) sitn = Poly(si*DE.t**N, DE.t) H[i] = H[i] + sitn", "--- --- t f i in L i in E", "TODO: finish writing this and write tests c1 = c1", "fails, the structure theorem approach will need to be used.", "in any # algorithm that uses rref()). # # We", "not all(len(j) == i.degree() and all(k.is_Rational for k in j)", "wa, wd, DE): # TODO: Write the full algorithm using", "tuple (B, v, s), where B is a Matrix with", "= list(zip(*roots)) or [[], []] # Note: this might be", "there exist y in K(t) and c1, ..., cm in", "in Const(k) and q in k[t] satisfy deg(q) <= n", "= (wa*l.quo(wd)).div(z) # (l*w).div(z) eqs = [r1.nth(i) - c1*r2.nth(i) for", "\" \"not yet implemented.\") else: # Liouvillian cases if DE.case", "or returns None, in which case it has proven that", "..., ev. C = Matrix([ni[:] for ni in N]) #", "Const(k). # Sum(ci*gi) is in k[t] for c1, ..., cm", "real_imag(ba, bd*a, DE.t) betad = alphad etaa, etad = frac_in(dcoeff,", "[r1, ..., ru] # Solutions of a*Dp + b*p =", "_ in H] if not all(len(j) == i.degree() and all(k.is_Rational", "(k, 1, v))/gamma, where k' = k + m +", "sympy.polys import Poly, lcm, cancel, sqf_list from sympy.polys.polymatrix import PolyMatrix", "except NotImplementedError: # A temporary bound is set. Eventually, it", "= b + derivation(a, DE) Qq = [zi - derivation(ri,", "field=True) with DecrementLevel(DE): Qy = [frac_in(q.nth(i), DE.t, field=True) for q", "are the cases where we know that S1irr = Sirr,", "written as the logarithmic derivative of a k(t)-radical. ans is", "s = solve(eqs, c1) if not s or not s[c1].is_Rational:", "..., dr]]).T == 0. \"\"\" db = b.degree(DE.t) m =", "field=True) for q in Q] fi, Ai = param_rischDE(ba, bd,", "(qs, M) def poly_linear_constraints(p, d): \"\"\" Given p = [p1,", "deg(b) < deg(D) - 1 and either D == d/dt", "# divisible by d if and only if M*Matrix([f1, ...,", "min(n, m) elif case == 'tan': dcoeff = DE.d.quo(Poly(DE.t**2 +", "solution in the given field not in some (possibly unspecified", "include=True) with DecrementLevel(DE): pa, pd = frac_in(p, DE.t, cancel=True) wa,", "j in range(A.cols): for i in range(A.rows): if A[i, j].has(*DE.T):", "if it were [[1, 1]]) residueterms = [(H[j][1].subs(z, i), i)", "DE.indices('exp')] + [DE.extargs[i] for i in DE.indices('log')]) l = []", "= z/q of the original equation. gamma = q G", "b, G, h) such that a, h in k[t], b", "in k(t) such that a = Db. Either returns (ans,", "raise NotImplementedError(\"The hypertangent case is \" \"not yet implemented for", "etaa, etad, DE) if A is not None: Q, m,", "= c.gcd(c.diff(DE.t)) a = hn b = -derivation(hn, DE) N", "in range(ri): hji = fi[j]*DE.t**i hi[j] = hji # building", "is calculated by subtracting the arguments of one exponential from", "sympy.matrices import zeros, eye from sympy.polys import Poly, lcm, cancel,", "b*p = Sum(ci*gi) may have a polynomial solution # only", "= [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')] L_part = [DE.D[i].as_expr()", "== 0 # Build combined constraint matrix with m +", "in k with c1, ..., cm in Const(k) # if", "u up to a additive constant. This is because they", "or not s[c1].is_Rational: # deg(q) <= B, no solution for", "Sum(fi*qi). M, _ = constant_system(M, zeros(M.rows, 1), DE) # M", "dr) is a solution of Ax == 0. \"\"\" m", "case the sum is equal to Sum(fi*qi). M, _ =", "real_imag(ba, bd, gen): \"\"\" Helper function, to get the real", "Add test if Q == 1: n = min(n, s/2)", "is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i,", "with DecrementLevel(DE): ba, bd = frac_in(b + i*derivation(DE.t, DE)/DE.t, DE.t,", "or not s[c1].is_Rational: # deg(q) > B, no solution for", "problems, # because case != 'base'. alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0),", "1, m)), # where ei == ci (i = 1,", "..., cm in Const(k) # if and only if M*Matrix([c1,", "enm) hn = c.gcd(c.diff(DE.t)) a = hn b = -derivation(hn,", "that for any f in K, Df is the logarithmic", "with Dt/t in k (resp. Dt/(t**2 + 1) in k,", "deg(D) >= 2, returns h1, ..., hr in k[t] and", "which case the solutions are # y = d1*f1 for", "k[t], b in k<t>, and g1, ..., gm in k(t)", "else 0 for key, value in ba.items()] ba_real = sum(r", "an integral is nonelementary (such as # risch_integrate(exp((sin(x)**2 + cos(x)**2", "a k(t)-radical. return None Q, v = Qv if Q.is_zero", "m = len(Q) H = [Poly(0, DE.t)]*m for N in", "= V[0]/(-c0) r = len(h) m = len(v) - r", "in DE.indices('exp')] + [DE.extargs[i] for i in DE.indices('log')]) l =", "etad, DE) if A is not None: Q, m, z", "def is_deriv_k(fa, fd, DE): r\"\"\" Checks if Df/f is the", "for qi in q]): return [], zeros(1, m) # No", "in k<t>, and g1, ..., gm in k(t) with Dt/t", "L_args are the arguments of the logarithms indexed by L_K", "r)) where d1, ..., dr are in Const(k) and (c1,", "constant_system(M, zeros(M.rows, 1), DE) # M is a matrix with", "eqs = [r1.nth(i) - c1*r2.nth(i) for i in range(z.degree(DE.t))] s", "A, and a vector (Matrix) u with coefficients in K,", "cm in C of f == Dv + Sum(ci*wi, (i,", "= Rm1.applyfunc(cancel) um1 = cancel(derivation(u[i], DE, basic=True)/ derivation(A[i, j], DE,", "ei == ci (i = 1, ..., m), when #", "pd = frac_in(p, DE.t, cancel=True) wa, wd = frac_in((wa, wd),", "well with Matrix yet, this algorithm assumes that all matrix", "entirely by being careful with the sorts of expressions that", "unspecified extension) and \"in_field\" with the function name is used", "i, j: i.lcm(j), Gds, Poly(1, DE.t)) en, es = splitfactor(gd,", "where each wl is a column matrix with # entries", "for j in range(len(H)) for i in residues[j]] # TODO:", "monomials. For example, both log(x) and log(2*x) == log(x) +", "+ H, A.col_join(B).col_join(C) def prde_cancel_liouvillian(b, Q, n, DE): \"\"\" Pg,", "only if # A*Matrix([c1, ...,cm]) == 0. V = A.nullspace()", "DecrementLevel(DE): # We are guaranteed to not have problems, #", "..., u) in k[t]. if not V: # No non-trivial", "t_i is in E_K/C(x) and deg(Dt_i) == 0 when t_i", "key[0] % 4 == 1 else -value if key[0] %", "# c1, ..., cm, e1, ..., ev. C = Matrix([ni[:]", "f + H, A.col_join(B).col_join(C) def prde_cancel_liouvillian(b, Q, n, DE): \"\"\"", "assumption here is that each monomial is recursively transcendental if", "both behave the same as monomials. For example, both exp(x)", "Poly(1, DE.t) G = [(fa, fd)] + G h, A", "return None else: # we can take any vector from", "for i in DE.indices('log')]) const = cancel(fa.as_expr()/fd.as_expr() - Add(*[Mul(i, j/n)", "A.row_join(zeros(m, len(g))) A = A.col_join(zeros(B.rows, m).row_join(B)) return h, A def", "Coefficients of t^j (j > 0) in Sum(ci*qi) must be", "cardinality of L_K/C(x) U E_K/C(x) is exactly the transcendence degree", "case == 'tan': dcoeff = DE.d.quo(Poly(DE.t**2 + 1, DE.t)) with", "and n > -b.as_poly().LC()/DE.d.as_poly().LC()): raise NotImplementedError(\"prde_no_cancel_b_equal() is \" \"not yet", "of the equation (i.e., qi in k[t]). See the docstring", "field=True) Q0 = [frac_in(qi.TC(), t0, field=True) for qi in Q]", "N = s[c1].as_numer_denom() nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd nfmwd =", "the hyperexponentials indexed by E_K (i.e., if i is in", "log(L_args[i])). This is needed to compute the final answer u", "a derivation D on k(t) and f, g1, ..., gn", "..., gm in k(t) with f weakly normalized with respect", "and only if # Sum(ci*qi) == 0 in which case", "and in determining if an element a of K is", "of the denominator. Given a derivation D on k[t] and", "and log(2*x) == log(x) + log(2) satisfy Dt == 1/x,", "efficiently. # It isn't too worrisome, because the heuristic handles", "p]) if not all([ri.is_zero for ri in r]): n =", "u), such that Df/f == Du, or None, which means", "A = A.row_join(zeros(m, len(g))) A = A.col_join(zeros(B.rows, m).row_join(B)) return h,", "with DecrementLevel(DE): pa, pd = frac_in(p, DE.t, cancel=True) wa, wd", "0 such that n*f == Du/u. Either returns (n, u)", "either v has coefficients in C, in which case s", "of the original equation are # y = Sum(dj*fj, (j,", "same as if it were [[1, 1]]) residueterms = [(H[j][1].subs(z,", "..., ns. return [hk.cancel(gamma, include=True) for hk in h], C", "hyperexponential monomials of K over C(x)). If K is an", "k in j) for i, j in roots): # If", "coefficient # and terms for the recovery of original solutions.", "c1) if not s or not s[c1].is_Rational: # deg(q) >", "residues[j]] # TODO: finish writing this and write tests p", "+ 1) == E*exp(x) satisfy Dt == t. Therefore, the", "# we can take any vector from V, we take", "0 and gcd(a, b) == 1, return (A, B, Q,", "DE): \"\"\" Generate a system for the constant solutions. Given", "in DE.indices('exp')] + [DE.extargs[i] for i in DE.indices('log')]) ans =", "<= n in k[t] with c1, ..., cm in Const(k)", "'primitive' or DE.case == 'exp': return prde_cancel_liouvillian(b, q, n, DE)", "this has # the minimum number of rows. Mqq =", "Dg will be in k[t] if f is the logarithmic", "1)): return prde_no_cancel_b_large(b, q, n, DE) elif ((b.is_zero or b.degree()", "in E_args. To handle the case where we are given", "is zero # for c1, ..., cm in Const(k) if", "n in ZZ and u in k(t) with n, u", "gcd(a, b) == 1, return (A, B, Q, R, n1),", "(j, 1, r) + Sum(ei*hi, (i, 1, m)), # where", "\"\"\"Polynomial solutions of a parametric Risch differential equation. Given a", "solution y0 in k with c1, ..., cm in Const(k)", "Sum(ci*Gi, (i, 1, m)). Given a derivation D in k(t),", "+ [alpha*gk for gk in g] # Build combined relation", "b*sitn if b.degree(DE.t) > 0: for i in range(m): si", "wa, wd, DE) # except NotImplementedError: # Heuristic failed, we", "dividing the arguments of one logarithm from the other. Therefore,", "A*Dp + B*p = Sum(ci*Gi) correspond # to solutions q", "final answer u such that Df/f == Du. log(f) will", "residueterms]] + [n], S(1)) residueterms = [(i, j*common_denom) for i,", "A, B, h in k[t], GG = [gg1, ..., ggm]", "mod 4. Returns a tuple (ba[0], ba[1], bd) where ba[0]", "= Sum(dj*fj, (j, 1, r)) where # d1, ..., dr", "such that n*f == Du/u. exp(f) will be the same", "k(t) radical if there exist n in ZZ and u", "for some a_i in C(x)(t_1, ..., t_i-1) } (i.e., the", "for key, value in ba.items()] ba_real = sum(r for r", "r)] alpha *= a # Solutions p of a*Dp +", "is useful for seeing exactly which elements of k(t) produce", "..., ns] where the ni in Const(k)^(m + v) are", "Normalization: a = 1. a = a.LC() b, q =", "fall into this class). Furthermore, (I believe) this # problem", "in k[t] of degree at most n of a*Dq +", "L_K/C(x) = { i in {1, ..., n} such that", "constraints on the hj. A = Matrix(0, m, []) #", "fd.monic() # interpretting limited integration problem as a # parametric", "return (a, b, h, N, g, V) such that a,", "completely decide these # problems in the integration variable). Au", "for more information. \"\"\" from __future__ import print_function, division from", "in K(t) and c1, ..., cm in Const(K) such that", "[n1, ..., ns] where the ni in Const(k)^(m + v)", "hn b = -derivation(hn, DE) N = 0 # These", "!= 'base'. alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t) etaa, etad =", "from V, we take V[0] c0 = V[0][0] # v", "# constant field (actually, this same correctness problem exists in", "better be 0 in that case. n = reduce(ilcm, [i.as_numer_denom()[1]", "[(Mbeta*vj)[0] for vj in V] # [f1, ..., fu] #", "q in k[t] of degree at most n of a*Dq", "Au = Au.applyfunc(cancel) A, u = Au[:, :-1], Au[:, -1]", "pass the arguments of the exponential terms in E_args. To", "M = Matrix([wl[:m] + wl[-v:] for wl in W]) #", "min(0, nb)) if not nb: # Possible cancellation. if case", "t_i is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i =", "0 # Build combined constraint matrix with m + r", "include=True) for hk in h], C def limited_integrate_reduce(fa, fd, G,", "the currently added test case takes large time # even", "supported by the structure theorems, change to NotImplementedError. raise ValueError(\"The", "M = Matrix(n + 1, m, lambda i, j: r[j].nth(i))", "should be an option to continue # anyway, even if", "hand side of the equation (i.e., qi in k[t]). See", "# be reduced to such by cancel(). Therefore, a careful", "n >= 0: # and a, b relatively prime a,", "fm are elements of k, # is a polynomial if", "!= n*m: # Verify exact division raise ValueError(\"Inexact division\") u", "DE): \"\"\" Simpler version of step 1 & 2 for", "= (q - Sum(ci*ri, (i, 1, m)))/a has degree at", "= len(G) Gns, Gds = list(zip(*G)) d = reduce(lambda i,", "gi in k(t)), and Q is a list of terms", "k(t)-radical. b in k(t) can be written as the logarithmic", "nonlinear or Liouvillian over k, then deg(p) <= N. So", "# functions given when solving the parametric logarithmic # derivative", "== Sum(ci*gi, (i, 1, m)), p = (q - Sum(ci*ri,", "q, b = wa.div(wd) B = max(0, derivation(DE.t, DE).degree(DE.t) -", "the structure theorem approach, which says that for any f", "in q]): return [], zeros(1, m) # No constraints. N", "= cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld)) return (ans, result, const) def is_log_deriv_k_t_radical(fa, fd, DE,", "rational numbers. return None # [(a, i), ...], where i*log(a)", "param_rischDE(ba, bd, Q0, DE) # f = [f1, ..., fr]", "d1, ..., du in Const(k). # In that case, solutions", "via the cancel() function, in order to prevent a speed", "are then y = Sum(ek*hk, (k, 1, v))/gamma. ## Build", "L_K/C(x) and E_K/C(x) must, by their nature, be computed recursively", "only if p = Sum(ek*gk) where e1, ..., ev are", "for k in j) for i, j in roots): #", "\"\"\" m = len(Q) H = [Poly(0, DE.t)]*m for N", "Sum(ci*ri, (i, 1, m)))/a has degree at most n1 and", "elif ((b.is_zero or b.degree() < DE.d.degree() - 1) and (DE.case", "i in DE.cases if i == 'primitive']) - set(DE.indices('log'))): raise", "d > 0: M = Matrix(d, m, lambda i, j:", "to pass them as indices to D (or T). L_args", "== Sum(ci*qi, (i, 1, m)), then q = Sum(dj*hj, (j,", "for any solution v in k(t), c1, ..., cm in", "A is a matrix with m columns and entries in", "-1): if DE.case == 'exp': # this re-checking can be", "z = A if Q == 1: n = min(n,", "v)). # Collect solution components. h = f + [alpha*gk", "take V[0] c0 = V[0][0] # v = [-1, c1,", "(a, b, G, h) such that a, h in k[t],", "not V: # No non-trivial solution. return [], eye(m) #", "m columns and entries in Const(k). # Sum(ci*gi) is in", "raise NotImplementedError(\"Real version of the structure \" \"theorems with hypertangent", "(i, 1, m)), and to find such y and ci", "the special part is always computed, this function calls the", "is the logarithmic derivative in the base case if and", "# Also note: derivation(basic=True) calls cancel() return None else: if", "0. The danger is that we might # incorrectly prove", "Q[j].nth(i + 1)) A, _ = constant_system(M, zeros(d, 1), DE)", "h, B = param_poly_rischDE(a, b, r, n, DE) # h", "for ni in N]) # rows n1, ..., ns. return", "1, v))/gamma, where k' = k + m + u.", "n, m in ZZ with n != 0. If this", "fd) = weak_normalizer(fa, fd, DE) # Solutions of the weakly", "= eye(m) A = A.row_join(zeros(A.rows, r + m)) B =", "Ax == u are exactly all the solutions of Bx", "return (H, A) def prde_no_cancel_b_small(b, Q, n, DE): \"\"\" Parametric", "v = [-1, c1, ..., cm, d1, ..., dr] v", "b, r, n, DE) # h = [h1, ..., hv]", "# (l*w).div(z) eqs = [r1.nth(i) - c1*r2.nth(i) for i in", "v has a non-constant coefficient, in which case s is", "hs g = A.gcd(B) a, b, g = A.quo(g), B.quo(g),", "NotImplementedError(\"Nonelementary extensions not supported \" \"in the structure theorems.\") E_part", "M, _ = constant_system(M, zeros(M.rows, 1), DE) # M is", "pd, wa, wd, DE) if A is None: return None", "q = [q1, ..., qm] in k[t]^m, return h =", "n # in k[t] if and only if p =", "(a, b, h, N, g, V) such that a, b,", "[Poly(1, t, field=True)] # r = 1 B = Matrix([[qi.TC()", "= param_rischDE(Fa, Fd, G, DE) V = A.nullspace() V =", "but I believe that the answer should be # None", "return f + H, A.col_join(B).col_join(C) def prde_cancel_liouvillian(b, Q, n, DE):", "len(DE.exts) != len(DE.D): if [i for i in DE.cases if", "for any f in K, Df/f is the derivative of", "parametric Risch differential equation. Given a derivation D in k[t],", "f = fa/fd, fd is square-free, deg(fa) < deg(fd), and", "implemented for is_log_deriv_k_t_radical_in_field()\") elif case in ['other_linear', 'other_nonlinear']: # XXX:", "..., cm, d1]) == 0 # There are no constraints", "b) == 1, return (A, B, Q, R, n1), with", "Const(k)^(m + v) are column # vectors generating the space", "..., vu] where each vj is a column matrix with", "result up to a multiplicative # constant. We now find", "(ans, result, const) def is_log_deriv_k_t_radical(fa, fd, DE, Df=True): r\"\"\" Checks", "from sympy.matrices import zeros, eye from sympy.polys import Poly, lcm,", "bd), G, h) def real_imag(ba, bd, gen): \"\"\" Helper function,", "r)).row_join(-I) return f + H, A.col_join(B).col_join(C) def prde_cancel_liouvillian(b, Q, n,", "calls the more general prde_special_denom() automatically if it cannot determine", "case, given a derivation D on k[t] and a in", "(ba[0], ba[1], bd) where ba[0] is real part of the", "u = cancel(u**m*Mul(*[Pow(i, j) for i, j in residueterms])) return", "f, use is_deriv_k_in_field(). See also ======== is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical \"\"\" #", "ans]) # exp(f) will be the same as result up", "range(n, 0, -1): # [n, ..., 1] for i in", "to solutions q = p/hs of the previous equation. gamma", "k(t) such that for any solution c1, ..., cm in", "in k[t] if f is the logarithmic derivative of a", "DecrementLevel(DE): ba, bd = frac_in(b, DE.t, field=True) for i in", "Da_i, for some a_i in C(x)(t_1, ..., t_i-1) } (i.e.,", "is not None: Q, s, z = A # TODO:", "in this case.\") else: terms = ([DE.extargs[i] for i in", "For the hyperexponential (resp. hypertangent) case, given a derivation D", "used for solving Parametric Risch Differential Equations parallel those for", "--- Dt \\ r * Dt + \\ r *", "each monomial is recursively transcendental if len(DE.exts) != len(DE.D): if", "Solutions of the weakly normalized equation Dz + f*z =", "# TODO: Write the full algorithm using the structure theorems.", "argterms = ([DE.T[i] for i in DE.indices('exp')] + [DE.extargs[i] for", "== 0, and p and the ci satisfy a*Dp +", "not None and B is not None: Q, s, z", "of each function for more information. \"\"\" from __future__ import", "# fj = Sum(aji*betai). Mbeta = Matrix([beta]) f = [(Mbeta*vj)[0]", "in u): # TODO: But maybe we can tell if", "empty, but everything below should work find in that #", "..., m) for some d1, ..., du in Const(k). #", "..., dr are in Const(k) and (c1, ..., cm, d1,", "= is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE) if Qv is None: # (N*f", "= p.as_poly(DE.t) if p is None: # f - Dg", "len(Q) H = [Poly(0, DE.t)]*m for N in range(n, -1,", "= frac_in(p, DE.t, cancel=True) wa, wd = frac_in((wa, wd), DE.t)", "= prde_normal_denom(fa, fd, G, DE) # Solutions q in k<t>", "zip(argterms, u)])) return (ans, result, n, const) def is_log_deriv_k_t_radical_in_field(fa, fd,", "element of K if and only if there are ri", "with # m + r columns and entries in Const(k)", "the recovery of original solutions. alpha, beta = 1, [0]*m", "= Matrix([E_part + L_part]) rhs = Matrix([dfa.as_expr()/dfd.as_expr()]) A, u =", "u = u.col_join(Matrix([um1])) return (A, u) def prde_spde(a, b, Q,", "handled by cancel() above. return None # Note: if residueterms", "in k[t]^v and and B is a matrix with u", "a list of tuples such that Mul(*[i**j for i, j", "None else: if not all(i.is_Rational for i in u): raise", "Q0 = [frac_in(qi.TC(), t0, field=True) for qi in Q] f,", "his integrand in the variables other than the integration #", "because Const_D(K) == Const_D(C(x)) == C, deg(Dt_i) == 1 when", "derivative heuristic. Given a derivation D on k[t], f in", "n = min(n, m) elif case == 'tan': dcoeff =", "# A temporary bound is set. Eventually, it will be", "_ = constant_system(M, zeros(M.rows, 1), DE) return [], A if", "also sqrt(x**2 + 2*x + 1) != x + 1", "in k[t] if and only if p = Sum(ek*hk) where", "= [] for i, j in zip(argterms, u): # We", "L_K/C(x) U E_K/C(x) is exactly the transcendence degree of K", "the logarithmic terms in L_args. To handle the case where", "DE, z=z) if not b: # I will have to", "gen): \"\"\" Helper function, to get the real and imaginary", "- No cancellation: deg(b) small enough. Given a derivation D", "and (DE.case == 'base' or DE.d.degree() >= 2)): return prde_no_cancel_b_small(b,", "changes, then this will need to be updated to call", "(i, 1, m)), q == y*h in k<t> satisfies a*Dq", "= list(zip(terms, u)) result = Mul(*[Pow(i, j) for i, j", "= M.nullspace() # V = [v1, ..., vu] where each", "nfmwa = N*fa*wd - M*wa*fd nfmwd = fd*wd Qv =", "that we can solve such problems over 'k' (not k[t])", "constant_system(M, zeros(M.rows, 1), DE) return [], A if a.is_ground: #", "fa.degree() >= fd.degree(): # f is the logarithmic derivative in", "for r in num_real) ba_imag = sum(r for r in", "d1, ..., du. W = A.nullspace() # W = [w1,", "DE.case if case == 'exp': wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t,", "u) or None, which means that f cannot be written", "other than the integration # variable (the structure theorems should", "s), where B is a Matrix with coefficients in C", "when integration elementary functions (see # Bronstein's book, page 255),", "cannot # be reduced to such by cancel(). Therefore, a", "L_part = [DE.D[i].as_expr() for i in DE.indices('log')] lhs = Matrix([E_part", "hn = prde_normal_denom(fa, fd, G, DE) # Solutions q in", "(i, 1, m)), p = v*h is in k<t>, and", "on k[t], an integer n, and a, b, q1, ...,", "== exp(E_args[i])). This is needed to compute the final answer", "return (ba[0], ba[1], bd) def prde_special_denom(a, ba, bd, G, DE,", "problems and in determining if an element a of K", "Therefore, a careful user can avoid this # problem entirely", "are in # Const(k) and B*Matrix([d1, ..., du, e1, ...,", "= [Poly(fa.as_expr()/fd.as_expr(), t, field=True) for fa, fd in f] else:", "A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for gia, gid in G] #", "ValueError(\"case must be one of {'exp', 'tan', 'primitive', \" \"'base'},", "u. v = len(h) M = Matrix([wl[:m] + wl[-v:] for", "is Sum(ci*qi). ## Reduce number of constants at this point", "= fd*wd Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE) if Qv is", "[qi.quo_ground(a) for qi in q] if not b.is_zero and (DE.case", "Risch Differential Equation problem is, given f, g1, ..., gm", "= 1, ..., m) for some d1, ..., du in", "and the ci satisfy a*Dp + b*p == g +", "alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t) etaa, etad = frac_in(dcoeff, DE.t)", "ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN G = [(Ga*pN*pn).cancel(Gd, include=True) for", "is a vector (Matrix) such that either v has coefficients", "been implemented\") # else: deg(a) > 0 # Iterate SPDE", "is implemented. return None u1, r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z)", "len(g))) A = A.col_join(zeros(B.rows, m).row_join(B)) return h, A def param_rischDE(fa,", "# W = [w1, ..., wt] where each wl is", "this function calls the more general prde_special_denom() automatically if it", "None if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)): return None if case", "function, to get the real and imaginary part of a", "# We therefore limit ourselves to constant fields that are", "by their nature, be computed recursively using this same function.", "c. return None M, N = s[c1].as_numer_denom() nfmwa = N*fa*wd", "(i = 1, ..., m), when # A*Matrix([c1, ..., cm])", "# Sum(ci*qi) == 0 in which case the solutions are", "the weakly normalized equation Dz + f*z = q*Sum(ci*Gi) #", "we can take any vector from V, we take V[0]", "== 'tan': p = Poly(DE.t**2 + 1, DE.t) elif case", "with non-rational \" \"coefficients in this case.\") else: terms =", "Sum(ci*qi, (i, 1, m)) then q = Sum(dj*hj, (j, 1,", "k[t] is the polynomial component # of the partial fraction", "= constant_system(M, zeros(dc + 1, 1), DE) c = eye(m)", "dterms = sqf_list(d) ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j) for b,", "for which a solution of # the equation Dy +", "the constants. Given a derivation D on k[t], a, b,", "# Dy + b*y = Sum(ci*qi) is solvable if and", "in ans]) # exp(f) will be the same as result", "(A, u) def prde_spde(a, b, Q, n, DE): \"\"\" Special", "basic=True)/ derivation(A[i, j], DE, basic=True)) for s in range(A.rows): #", "= Matrix(0, m, []) # No constraints, return the empty", "DE) N = 0 # These are the cases where", "used to indicate that. f in k(t) can be written", "prde_special_denom(a, ba, bd, G, DE) # Solutions p in k[t]", "will automatically call bound_degree() when t is linear and non-Liouvillian,", "== log(L_args[i])). This is needed to compute the final answer", "# to solutions q = p/hs of the previous equation.", "db)/b.LC() sitn = Poly(si*DE.t**N, DE.t) H[i] = H[i] + sitn", "k(t) of Dy + f*y == Sum(ci*gi, (i, 1, m)),", "an element a of K is a derivative of an", "f can be written as the logarithmic derivative of a", "rhs = Matrix([dfa.as_expr()/dfd.as_expr()]) A, u = constant_system(lhs, rhs, DE) if", "hs = prde_special_denom(a, ba, bd, G, DE) # Solutions p", "t**2 + 1) == 1), return the tuple (A, B,", "<= B, no solution for c. return None M, N", "Sum(ci*qi) may have a polynomial solution # only if the", "e*j) for b, e in iterms]))) dcoeff, dterms = sqf_list(d)", "ba, bd, G, DE) # Solutions p in k[t] of", "if ci = Sum(dj*aji) # (i = 1, ..., m)", "Simpler version of step 1 & 2 for the limited", "the space of linear relations between # c1, ..., cm,", "1, m)) has # a solution y0 in k with", "columns and entries in Const(k). # Sum(ci*gi) is in k[t]", "then q = Sum(dj*hj, (j, 1, r)) where d1, ...,", "derivative problem when integration elementary functions (see # Bronstein's book,", "Sum(ci*Gi, (i, 1, m)). \"\"\" dn, ds = splitfactor(fd, DE)", "order to prevent a speed bottleneck from # calling some", "and this algorithm will need to be extended to handle", "f into constant # polynomials fa/fd in k[t]. # (Is", "M = Matrix(0, m, []) # No constraints, return the", "E_K/C(x) = { i in {1, ..., n} such that", "= max(0, derivation(DE.t, DE).degree(DE.t) - 1) C = max(p.degree(DE.t), q.degree(DE.t))", "DE.t # k = k0(t0) ba, bd = frac_in(b, t0,", "to get around things like sqrt(x**2) != x # and", "q, n, DE) else: raise NotImplementedError(\"non-linear and hypertangent \" \"cases", "qm in k[t] with b != 0 and either D", "Bx == v, or v has a non-constant coefficient, in", "return A def is_deriv_k(fa, fd, DE): r\"\"\" Checks if Df/f", "fa, fd # Our assumption here is that each monomial", "Q[i] - derivation(si, DE) - b*si if all(qi.is_zero for qi", "p in k[t] of a*Dp + b*p == Sum(ci*gi, (i,", "Q[i] - derivation(sitn, DE) - b*sitn if all(qi.is_zero for qi", "# TODO: Merge this with the very similar special_denom() in", "= frac_in(b, DE.t, field=True) for i in range(n, -1, -1):", "monomials. For example, both exp(x) and exp(x + 1) ==", "matrix M with entries in k(t) such that for any", "M = Matrix(N + 1, m, lambda i, j: q[j].nth(i))", "also nonlinear or Liouvillian, but if this # changes, then", "is_deriv_k \"\"\" H = [] if Df: dfa, dfd =", "i.lcm(j), (ds,) + Es) # lcm(ds, es1, ..., esm) a", "(i.e., qi in k[t]). See the docstring of each function", "Iterate SPDE as long as possible cumulating coefficient # and", "== 'base' or DE.d.degree() >= 2)): return prde_no_cancel_b_small(b, q, n,", "None c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC()) l = fd.monic().lcm(wd.monic())*Poly(c, DE.t) ln,", "+ b*p == g + Sum(ci*vi, (i, 1, m)). Furthermore,", "and terms for the recovery of original solutions. alpha, beta", "== 1: n = min(n, s/2) N = max(0, -nb)", "for c. return None M, N = s[c1].as_numer_denom() nfmwa =", "top of p.238 (unnumbered) for j in range(ri): hji =", "now find the log of that constant. argterms = ([DE.extargs[i]", "A def is_deriv_k(fa, fd, DE): r\"\"\" Checks if Df/f is", "a polynomial i, d = i.as_numer_denom() icoeff, iterms = sqf_list(i)", "r = zip(*[pi.div(d) for pi in p]) if not all([ri.is_zero", "particular that E_K/C(x) and L_K/C(x) are disjoint. The sets L_K/C(x)", "c1) if not s or not s[c1].is_Rational: # deg(q) <=", "coefficients in Const(k) such that if c1, ..., cm in", "(bl1, ..., blm) generate the space of those # constant", "note: derivation(basic=True) calls cancel() return None else: if not all(i.is_Rational", "fd, wa, wd, DE, c1=None): \"\"\" Parametric logarithmic derivative heuristic.", "of a rational function evaluated at sqrt(-1) without actually evaluating", "log(x) and log(2*x) == log(x) + log(2) satisfy Dt ==", "to be updated to call bound_degree() # as per the", "assumes that all matrix entries are Basic expressions. \"\"\" if", "i), i) for j in range(len(H)) for i in residues[j]]", "m)), (c1, ..., cm) is a solution of Mx ==", "R, Z = list(zip(*[gcdex_diophantine(b, a, qi) for qi in Q]))", "[betai + alpha*ri for betai, ri in zip(beta, r)] alpha", "d if and only if M*Matrix([f1, ..., fm]) == 0,", "a additive constant. This is because they will both behave", "and q = [q1, ..., qm] in k[t]^m, return h", "the logarithmic derivative of a k(t)-radical. return None Q, v", "Matrix() else: dc = max([qi.degree(DE.t) for qi in Q]) M", "1, m, lambda i, j: r[j].nth(i)) else: M = Matrix(0,", "list(R) n1 = n - a.degree(DE.t) return (A, B, Qq,", "S1irr == Sirr. Furthermore, it will automatically call bound_degree() when", "one of {'exp', 'tan', 'primitive', \" \"'base'}, not %s.\" %", "original solutions. alpha, beta = 1, [0]*m while n >=", "0. \"\"\" m = len(Q) H = [Poly(0, DE.t)]*m for", "has a solution p of degree <= n # in", "cannot determine that S1irr == Sirr. Furthermore, it will automatically", "and R = [r1, ..., rm], such that for any", "and # B*Matrix([c1, ..., cm, d1, ..., dr]) == 0", "%s.\" % case) nb = order_at(ba, p, DE.t) - order_at(bd,", "solvability is # B*Matrix([c1, ..., cm, d1]) == 0 #", "G, 1) in this case. \"\"\" # TODO: Merge this", "as the logarithmic derivative of a k(t)-radical. ans is a", "might # incorrectly prove that an integral is nonelementary (such", "[0]*m while n >= 0: # and a, b relatively", "m = len(q) if n < 0: # Only the", "betad, etaa, etad, DE) if A is not None and", "\"\"\" Given p = [p1, ..., pm] in k[t]^m and", "def constant_system(A, u, DE): \"\"\" Generate a system for the", "[], eye(m) # Could return A, but this has #", "= [G1, ..., Gm] in k(t)^m, return h = [h1,", "gid in G] # a*Dp + b*p = Sum(ci*gi) may", "- 1 C = list(v[1: m + 1]) y =", "if Df: dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2,", "of k(t) if there exists b in k(t) such that", "other. Therefore, it is necessary to pass the arguments of", "\"cases have not yet been implemented\") # else: deg(a) >", "dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2, include=True) else:", "have problems, # because case != 'base'. alphaa, alphad =", "eye(m) A = A.row_join(zeros(A.rows, r + m)) B = B.row_join(zeros(B.rows,", "sqrt(-1) without actually evaluating it at sqrt(-1) Separates the even", "all([ri.is_zero for _, ri in Q]): N = max([ri.degree(DE.t) for", "so most likely this indicates a bug. return None roots", "..., qm in k[t] with deg(b) < deg(D) - 1", "u up to a multiplicative constant. This is because they", "fd = fa*Poly(1/fd.LC(), DE.t), fd.monic() # interpretting limited integration problem", "respect to t, return the tuple (a, b, G, h)", "= 1, ..., m + u + v) in Const(k).", "wa, wd, DE) if A is None: return None n,", "the trivial zero solution is possible. # Find relations between", "B = Matrix([[qi.TC() for qi in Q] + [S(0)]]) #", "The solutions of the original equation for ci = Sum(dj*aji)", "the remainder is zero # for c1, ..., cm in", "if DE.case == 'primitive' or DE.case == 'exp': return prde_cancel_liouvillian(b,", "in G] En, Es = list(zip(*E)) c = reduce(lambda i,", "of the resultant must be rational numbers. return None #", "M = zeros(0, 2) else: dc = max([qi.degree(DE.t) for qi", "Du, or None, which means that Df/f is not the", "> 0: M = Matrix(d, m, lambda i, j: Q[j].nth(i", "in num_imag) ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen))", "cancel(r - Asj*Rm1[jj])) # u[s] = u[s] - A[s, j]*u[m+1", "i K/C(x) K/C(x) Where C = Const(K), L_K/C(x) = {", "a*h ba = a*fa - dn*derivation(h, DE)*fd ba, bd =", "DE, basic=True)) for s in range(A.rows): # A[s, :] =", "..., cm) for which a solution of # the equation", "b, h, N, g, V) such that a, b, h", "is one of {'exp', 'tan', 'primitive'} for the hyperexponential, hypertangent,", "Q] fi, Ai = param_rischDE(ba, bd, Qy, DE) fi =", "for wl in W]) # excise dj's. N = M.nullspace()", "[DE.extargs[i] for i in DE.indices('log')]) l = [] ld =", "u) or not A: # If the elements of u", "(b + n*a*Dp/p)*p**N, g1*p**(N - n), ..., gm*p**(N - n),", "+ b*q = Sum(ci*Gi) correspond # to solutions z =", "if and only if # f = fa/fd, fd is", "# lcm(dn, en1, ..., enm) hn = c.gcd(c.diff(DE.t)) a =", "quotient is Sum(fi*qqi). A, _ = constant_system(M, zeros(M.rows, 1), DE)", "which says that for any f in K, Df/f is", "in Const(k). # Sum(aji*gi) is in k[t] and equal to", "guaranteed to not have problems, # because case != 'base'.", "q1, ..., qm in k[t] with b != 0 and", "+ m*Dtheta/theta, with v in k(t)* and n, m in", "ZZ, and b, q1, ..., qm in k[t] with b", "N]) # rows n1, ..., ns. return [hk.cancel(gamma, include=True) for", "m)) \"\"\" R, Z = list(zip(*[gcdex_diophantine(b, a, qi) for qi", "i in range(m): si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t) H[i] = H[i]", "structure theorem approach. Because Poly does not play well with", "case == 'exp': p = Poly(DE.t, DE.t) elif case ==", "Sum(ci*Gi, (i, 1, m)) has a solution y in k(t)", "# case (it should be the same as if it", "def prde_linear_constraints(a, b, G, DE): \"\"\" Parametric Risch Differential Equation", "sum is in k[t]. q, M = prde_linear_constraints(a, b, g,", "We are guaranteed to not have problems, # because case", "dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd*fa dfa,", "d1, ..., dr are in Const(k) and (c1, ..., cm,", "{1, ..., n} such that t_i is transcendental over C(x)(t_1,", "(ba[0], ba[1], bd) def prde_special_denom(a, ba, bd, G, DE, case='auto'):", "the structure theorem approach will need to be used. The", "call bound_degree() # as per the docstring of this function", "cm, d1]) == 0 # There are no constraints on", "they exist. For the algorithms here G is a list", "1), DE) # A is a matrix with m columns", "ba_imag = sum(r for r in num_imag) ba = ((ba_real*bd_real", "# where rj = Sum(aji*qqi). if not V: # No", "of f == Dv + Sum(ci*wi, (i, 1, m)), p", "= fd*wd Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd, fd*wd, DE, 'auto')", "we know that S1irr = Sirr, but there could be", "H[i] + si Q[i] = Q[i] - derivation(si, DE) -", "======== is_log_deriv_k_t_radical_in_field, is_deriv_k \"\"\" H = [] if Df: dfa,", "with entries in k such that Sum(ci*pi, (i, 1, m)),", "there a better way?) f = [Poly(fa.as_expr()/fd.as_expr(), t, field=True) for", "cancel(). Therefore, a careful user can avoid this # problem", "to # solutions alpha*p + Sum(ci*betai) of the initial equation.", "derivative of a k(t) radical if there exist n in", "# the minimum number of rows. Mqq = Matrix([qq]) #", "DE) if A is not None and B is not", "reduce # an identically zero expression to 0. The danger", "DE) else: raise NotImplementedError(\"non-linear and hypertangent \" \"cases have not", "..., vm] in k(t)^m, and for any solution v in", "for i, j in ans]) argterms = ([DE.T[i] for i", "C = I.row_join(zeros(m, r)).row_join(-I) return f + H, A.col_join(B).col_join(C) def", "# A[s, :] = A[s, :] - A[s, i]*A[:, m+1]", "zip(argterms, u): # We need to get around things like", "that are computable # via the cancel() function, in order", "f*y = Sum(ci*Gi, (i, 1, m)) has a solution y", "hypertangent cases, respectively. If case is 'auto', it will attempt", "in computer algebra in general, and implicit # in the", "write tests p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z)) p", "Q]) if d > 0: M = Matrix(d, m, lambda", "d1 in Const(k) = k. f = [Poly(1, t, field=True)]", "there exists b in k(t) such that a = Db.", "that a = Db. Either returns (ans, u), such that", "in order to prevent a speed bottleneck from # calling", "is the polynomial component # of the partial fraction expansion", "is_deriv_k(fa, fd, DE): r\"\"\" Checks if Df/f is the derivative", "\"\"\" m = len(q) if n < 0: # Only", "= list(zip(*E)) c = reduce(lambda i, j: i.lcm(j), (dn,) +", "M.nullspace() # N = [n1, ..., ns] where the ni", "a solution of # the equation Dy + f*y ==", "Risch Differential Equation - Special part of the denominator. case", "\"\"\" Algorithms for solving Parametric Risch Differential Equations. The methods", "of the derivation automatically. See also ======== is_log_deriv_k_t_radical, is_deriv_k \"\"\"", "DE.t)]*m for N in range(n, -1, -1): # [n, ...,", "d = a.gcd(b) if not d.is_ground: break # a*Dp +", "+ Sum(ei*hi, (i, 1, m)), # where ei == ci", "columns an entries in k. # Sum(fi*qi, (i, 1, m)),", "m)) then q = Sum(dj*hj, (j, 1, r)) where d1,", "DE.t) elif case in ['primitive', 'base']: B = ba.quo(bd) return", "a.is_ground: # Normalization: a = 1. a = a.LC() b,", "(i.e., the set of all indices of hyperexponential monomials of", "terms in E_args. To handle the case where we are", "roots = [(i, i.real_roots()) for i, _ in H] if", "NOT return correct results if cancel() cannot reduce # an", "are exactly all the solutions of Bx == v, or", "E = [splitfactor(gd, DE) for _, gd in G] En,", "above. return None # Note: if residueterms = [], returns", "u = A u *= DE.t**e elif case == 'primitive':", "d1]) == 0 # There are no constraints on d1.", "Sum(djn*(D(fjn*t^n) - b*fjnt^n)) Fi[j] = -(derivation(hji, DE) - b*hji) H", "DE) for ri, zi in zip(R, Z)] R = list(R)", "case the quotient is Sum(ci*qi, (i, 1, m)). \"\"\" m", "= q G = [(q*ga).cancel(gd, include=True) for ga, gd in", "= Matrix([[qi.TC() for qi in Q] + [S(0)]]) # The", "A.nullspace() # V = [v1, ..., vu] where each vj", "n, DE): \"\"\" Special Polynomial Differential Equation algorithm: Parametric Version.", "primitive cases, respectively. For the hyperexponential (resp. hypertangent) case, given", "log(2)/log(3). Also, there should be an option to continue #", "condition for solvability is # B*Matrix([c1, ..., cm, d1]) ==", "the full method. # TODO: This could be implemented more", "if p = Sum(dj*hj, (j, 1, r)) where d1, ...,", "must be one of {'exp', 'tan', 'primitive', \" \"'base'}, not", "= ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN G = [(Ga*pN*pn).cancel(Gd, include=True)", "tuples such that Add(*[i*j for i, j in ans]) ==", "degree <= n # in k[t] if and only if", "is in k[t] for c1, ..., cm in Const(k) #", "n's. n = 5 h, B = param_poly_rischDE(a, b, r,", "1 and n > -b.as_poly().LC()/DE.d.as_poly().LC()): raise NotImplementedError(\"prde_no_cancel_b_equal() is \" \"not", "r = 1 B = Matrix([[qi.TC() for qi in Q]", "C = Const(K), L_K/C(x) = { i in {1, ...,", "terms by checking the degree of terms wrt mod 4.", "hyperexponentials indexed by E_K (i.e., if i is in E_K,", "is in k, deg(qi) < deg(Dt) t = DE.t if", "A[s, :] = A[s, :] - A[s, i]*A[:, m+1] Asj", "this function.\" % case) else: raise ValueError(\"case must be one", "1, v))/gamma. ## Build combined relation matrix with m +", "for i in range(m): si = Q[i].nth(N + DE.d.degree(DE.t) -", "solutions in C of Ax == u are exactly all", "polynomial if and only if M*Matrix([f1, ..., fm]) == 0,", "\\ (set([i for i in DE.cases if i == 'primitive'])", "qi in Q): dc = -1 M = Matrix() else:", "Q == 1: n = min(n, s/2) N = max(0,", "= prde_special_denom(a, ba, bd, G, DE) # Solutions p in", "A u *= DE.t**e elif case == 'primitive': with DecrementLevel(DE):", "not s or not s[c1].is_Rational: # deg(q) > B, no", "4 == 2 else 0 for key, value in ba.items()]", "H] if not all(len(j) == i.degree() and all(k.is_Rational for k", "are supported by the structure theorems, change to NotImplementedError. raise", "None, which means that Df/f is not the derivative of", "over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some", "j: q[j].nth(i)) A, _ = constant_system(M, zeros(M.rows, 1), DE) return", "cm in Const(k) and p in k[t] of a*Dp +", "+ f*y == Sum(ci*Gi, (i, 1, m)). Given a derivation", "tuples (a, d) with a and d in k[t]. \"\"\"", "n = 5 h, B = param_poly_rischDE(a, b, r, n,", "Solutions q in k<t> of a*Dq + b*q = Sum(ci*Gi)", "in which case the quotient is Sum(fi*qqi). A, _ =", "(i, 1, m)), # where ei == ci (i =", "alphaa, alphad = real_imag(ba, bd*a, DE.t) betad = alphad etaa,", "n*m: # Verify exact division raise ValueError(\"Inexact division\") u =", "1, n)) \"\"\" fa, fd = fa*Poly(1/fd.LC(), DE.t), fd.monic() #", "\"\"\" m = len(G) q, (fa, fd) = weak_normalizer(fa, fd,", "terms on the right hand side of the equation (i.e.,", "= wa.div(wd) B = max(0, derivation(DE.t, DE).degree(DE.t) - 1) C", "h = f + [alpha*gk for gk in g] #", "Au = Au.rref(simplify=cancel, normalize_last=False)[0] # Warning: This will NOT return", "in k[t], return q = [q1, ..., qm] in k[t]^m", "the hj. A = Matrix(0, m, []) # Solutions of", "or v has a non-constant coefficient, in which case s", "the solution in the given field not in some (possibly", "k[t], f in k(t), and a hyperexponential monomial theta over", "b with for some a, b in k*. \"\"\" dn,", "1, DE.t)) with DecrementLevel(DE): # We are guaranteed to not", "'tan', 'primitive', \" \"'base'}, not %s.\" % case) nb =", "even with n=5, and much longer with large n's. n", "This is calculated by dividing the arguments of one logarithm", "j) for i, j in roots): # If f is", "answers that: # Assuming that we can solve such problems", "given fa, fd, DE in that it finds the solution", "is possible. # Find relations between the qi. if all([qi.is_zero", "columns and entries in Const(k) such that Dy + f*y", "in general, and implicit # in the correctness of the", "Const(k) if and only if p = Sum(dj*hj, (j, 1,", "DE).quo(p)*pN G = [(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd in G]", "Y, C def parametric_log_deriv_heu(fa, fd, wa, wd, DE, c1=None): \"\"\"", "(a*hn*fa).cancel(fd, include=True), V) def limited_integrate(fa, fd, G, DE): \"\"\" Solves", "terms for the recovery of original solutions. alpha, beta =", "Possible cancellation. if case == 'exp': dcoeff = DE.d.quo(Poly(DE.t, DE.t))", "# TODO: What should really be done in this case?", "= Sum(ci*gi) may have a polynomial solution # only if", "bd is the denominator of the rational function. \"\"\" bd", "indices of hyperexponential monomials of K over C(x)). If K", "takes large time # even with n=5, and much longer", "is in k[t] if and only is ci = Sum(dj*aji)", "or not A: # If the elements of u are", "+ alpha*Sum(ek*gk, (k, 1, v)). # Collect solution components. h", "as the logarithmic derivative of a k(t)-radical. It differs from", "but everything below should work find in that # case", "to D (or T). E_args are the arguments of the", "is the logarithmic derivative of a k(t)-radical, then all the", "here G is a list of tuples of factions of", "if Df/f is the derivative of an element of k(t).", "previous equation. gamma *= hs g = A.gcd(B) a, b,", "= Matrix(0, m, []) # No constraints. return q, M", "cancel(derivation(u[i], DE, basic=True)/ derivation(A[i, j], DE, basic=True)) for s in", "solution for c. return None M, N = s[c1].as_numer_denom() nfmwa", "# We try n=5. At least for prde_spde, it will", "for i, j in ans]) # exp(f) will be the", "A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A) def prde_no_cancel_b_small(b, Q, n, DE):", "that n*f == Du/u. exp(f) will be the same as", "cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z)) p = p.as_poly(DE.t) if p", "problems in the integration variable). Au = Au.applyfunc(cancel) A, u", "all(len(j) == i.degree() and all(k.is_Rational for k in j) for", "and n, m in ZZ with n != 0. If", "# r = 1 B = Matrix([[qi.TC() for qi in", "= Q[i] - derivation(sitn, DE) - b*sitn if b.degree(DE.t) >", "expressions that # appear in his integrand in the variables", "Sum(ci*qi) is solvable if and only if # Sum(ci*qi) ==", "(a, (ba, bd), G, h) def real_imag(ba, bd, gen): \"\"\"", "V = A.nullspace() # V = [v1, ..., vu] where", "1 - DE.d.degree(DE.t) - mu) else: # TODO: implement this", "work find in that # case (it should be the", "f == u. This is calculated by dividing the arguments", "Qq, R, n1) def prde_no_cancel_b_large(b, Q, n, DE): \"\"\" Parametric", "= [p1, ..., pm] in k[t]^m and d in k[t],", "entries aj1, ..., ajm in Const(k). # Sum(aji*qi) is divisible", "in # Const(k) and B*Matrix([d1, ..., du, e1, ..., ev])", "== 'base': # TODO: we can use more efficient residue", "or \\ (set([i for i in DE.cases if i ==", "splitfactor(l, DE) z = ls*ln.gcd(ln.diff(DE.t)) if not z.has(DE.t): # TODO:", "ans]) == u. This is useful for seeing exactly what", "and (c1, ..., cm, d1, ..., dr) is a solution", "Const(k) if and only if # A*Matrix([c1, ...,cm]) == 0.", "raise NotImplementedError(\"Nonelementary extensions not supported \" \"in the structure theorems.\")", "i is in L_K, then T[i] == log(L_args[i])). This is", "param_poly_rischDE(a, b, r, n, DE) # h = [h1, ...,", "Build combined relation matrix. A = -eye(m) for vj in", "a = hn b = -derivation(hn, DE) N = 0", "matrix with m columns and entries in k. # Sum(fi*gi,", "with c1, ..., cm in Const(k) # if and only", "non-trivial solution. return [], eye(m) # Could return A, but", "ci satisfy a*Dp + b*p == g + Sum(ci*vi, (i,", "return prde_no_cancel_b_large(b, q, n, DE) elif ((b.is_zero or b.degree() <", "of those # constant families (c1, ..., cm) for which", "In that case, # Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) =", "to # solutions alpha*p + Sum(Sum(dj*aji)*betai) of the initial #", "else: M = Ai.col_join(M.row_join(zeros(M.rows, ri))) Fi, hi = [None]*ri, [None]*ri", "1, m)))/a has degree at most n1 and satisfies A*Dp", "q == y*h in k<t> satisfies a*Dq + b*q ==", "for j in range(A.cols): for i in range(A.rows): if A[i,", "== u. This is calculated by subtracting the arguments of", "n of a*Dq + b*q == Sum(ci*gi, (i, 1, m)),", "of degree <= n # in k[t] if and only", "cm in Const(k) and y in k(t) of Dy +", "a matrix with u + v # columns and entries", "that Add(*[i*j for i, j in ans]) == u. This", "i in DE.indices('log')]) ans = list(zip(terms, u)) result = Mul(*[Pow(i,", "b*p = Sum(ci*qi) may have a polynomial solution # only", "1, ..., u) in k[t]. if not V: # No", "# columns and entries in Const(k) such that # a*Dp", "..., t_i-1) and Dt_i = Da_i/a_i, for some a_i in", "and the remainder is zero # for c1, ..., cm", "case) nb = order_at(ba, p, DE.t) - order_at(bd, p, DE.t)", "be reduced to such by cancel(). Therefore, a careful user", "is nonlinear or Liouvillian over k, then deg(p) <= N.", "in k<t>, and p and the ci satisfy a*Dp +", "raise ValueError(\"case must be one of {'exp', 'tan', 'primitive', \"", "for i in range(A.rows): if A[i, j].has(*DE.T): # This assumes", "= Sum(dj*hj, (j, 1, r)) where d1, ..., dr are", "that Df cannot be written as the logarithmic derivative of", "is divisible by d. qq, M = poly_linear_constraints(q, d) #", "DE) - b*hji) H += hi # in the next", "of the terms on the right hand side of the", "k), a != 0, and gcd(a, t) == 1 (resp.", "ri in zip(beta, r)] alpha *= a # Solutions p", "E_K/C(x) and deg(Dt_i) == 0 when t_i is in L_K/C(x),", "z = z or Dummy('z') H, b = residue_reduce(fa, fd,", "to completely decide these # problems in the integration variable).", "C, in which case s is True and the solutions", "A = parametric_log_deriv_heu(fa, fd, wa, wd, DE) # except NotImplementedError:", "si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t) H[i] = H[i] + si Q[i]", "residueterms = [(i, j*common_denom) for i, j in residueterms] m", "mu = min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga, gd, DE.t) for ga,", "case == 'auto': case = DE.case if case == 'exp':", "== 0 and # B*Matrix([c1, ..., cm, d1, ..., dr])", "0: # and a, b relatively prime a, b, q,", "Add(*[i*j for i, j in ans]) == u. This is", "rde.py if case == 'auto': case = DE.case if case", "No non-trivial solution. return [], eye(m) # Could return A,", "with non-rational \" \"coefficients in this case.\") else: n =", "B.row_join(zeros(B.rows, m)) C = I.row_join(zeros(m, r)).row_join(-I) return f + H,", "r * i / i i / i --- =", "name is used to indicate that. f in k(t) can", "element of k(t) if there exists b in k(t) such", "of K if and only if there are ri in", "..., qqm] where qqi = qi.quo(d). # M is a", "qi) for qi in Q])) A = a B =", "is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None): \"\"\" Checks if f can", "be in k[t] if f is the logarithmic derivative of", "DE) # q = [q1, ..., qm] where qi in", "entries blk (k = 1, ..., m + u +", "k + m + u. v = len(h) M =", "of k(t). ans is a list of tuples such that", "with coefficients in K, returns the tuple (B, v, s),", "will be in k[t] if f is the logarithmic derivative", "solve def prde_normal_denom(fa, fd, G, DE): \"\"\" Parametric Risch Differential", "such that a, h in k[t], b in k<t>, G", "in k[t]^m and a matrix M with entries in k(t)", "1 when t_i is in E_K/C(x) and deg(Dt_i) == 0", "[] if Df: dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd,", "power terms by checking the degree of terms wrt mod", "in k. # Sum(fi*gi, (i, 1, m)), where f1, ...,", "# Note: if residueterms = [], returns (1, 1) #", "d1, ..., dr]]).T == 0. \"\"\" m = len(Q) H", "the heuristic failed, or returns None, in which case it", "v.is_zero: return None return (Q*N, Q*M, v) def parametric_log_deriv(fa, fd,", "i in DE.indices('log')] lhs = Matrix([E_part + L_part]) rhs =", "theorem version of parametric_log_deriv is implemented. return None u1, r1", "# in which case the sum is Sum(ci*qi). ## Reduce", "h1, ..., hr in k[t] and a matrix A with", "not V: # No non-trivial solution return [], eye(m) Mq", "risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x). # But this is", "# even with n=5, and much longer with large n's.", "NotImplementedError, in which case the heuristic failed, or returns None,", "..., cm in Const(k) # if and only y0 =", "# Solutions p of a*Dp + b*p = Sum(ci*qi) correspond", "in which case s is True and the solutions in", "K is an elementary extension over C(x), then the cardinality", "is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto') if A is None: return None", "hypertangent, and primitive cases, respectively. For the hyperexponential (resp. hypertangent)", "all(derivation(i, DE, basic=True).is_zero for i in u) or not A:", "exponential terms in E_args. To handle the case where we", "\"\"\" from __future__ import print_function, division from sympy.core import Dummy,", "i is in E_K, then T[i] == exp(E_args[i])). This is", "= residue_reduce(fa, fd, DE, z=z) if not b: # I", "column matrix with # entries blk (k = 1, ...,", "the derivative of a element of K if and only", "structure theorem approach will need to be used. The argument", "(unnumbered) for j in range(ri): hji = fi[j]*DE.t**i hi[j] =", "prde_cancel_liouvillian(b, Q, n, DE): \"\"\" Pg, 237. \"\"\" H =", "that for any solution c1, ..., cm in Const(k) and", "weakly normalized equation Dz + f*z = q*Sum(ci*Gi) # correspond", "y = -sum([v[m + 1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \\ for i", "r, jj: cancel(r - Asj*Rm1[jj])) # u[s] = u[s] -", "to constant fields that are computable # via the cancel()", "b, e in iterms]))) dcoeff, dterms = sqf_list(d) ld.append(Mul(*([Pow(dcoeff, j)]", "reduce(ilcm, [i.as_numer_denom()[1] for _, i in residueterms], S(1)) u =", "to t, return the tuple (a, b, G, h) such", "= frac_in(b, t0, field=True) Q0 = [frac_in(qi.TC(), t0, field=True) for", "(i, 1, m)) then q = Sum(dj*hj, (j, 1, r))", "1), DE) else: # No constraints on the hj. A", "k[t]) if DE.case == 'primitive': with DecrementLevel(DE): ba, bd =", "raise ValueError(\"case must be one of {'primitive', 'exp', 'tan', \"", "must be zero. d = max([qi.degree(DE.t) for qi in Q])", "B is a Matrix with coefficients in C and v", "with b != 0 and either D == d/dt or", "Dy + f*y == 0 # is solvable in k(t}.", "with n, u != 0 such that n*b == Du/u.", "Equation - Special part of the denominator. case is one", "columns. r = len(f) I = eye(m) A = A.row_join(zeros(A.rows,", "# this re-checking can be avoided with DecrementLevel(DE): ba, bd", "case, # Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj) #", "W = [w1, ..., wt] where each wl is a", "Es = list(zip(*E)) c = reduce(lambda i, j: i.lcm(j), (dn,)", "cancel(r - Asj*um1)) A = A.col_join(Rm1) u = u.col_join(Matrix([um1])) return", "for N in range(n, 0, -1): # [n, ..., 1]", "i, d = i.as_numer_denom() icoeff, iterms = sqf_list(i) l.append(Mul(*([Pow(icoeff, j)]", "= Sum(dj*rj) # where rj = Sum(aji*qqi). if not V:", "Ga, Gd in G] h = pn # (a*p**N, (b", "0, -1): # [n, ..., 1] for i in range(m):", "f, g1, ..., gm in K(t), to determine if there", "b, G, 1) in this case. \"\"\" # TODO: Merge", "1/x, because log(2) is constant. Therefore, the term const is", "theorems.\") E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')] L_part", "alphad, etaa, etad, DE) B = parametric_log_deriv(betaa, betad, etaa, etad,", "= Sum(aji*qqi). if not V: # No non-trivial solution. return", "= Sum(dj*rj) has a solution p of degree <= n", "u with coefficients in K, returns the tuple (B, v,", "b, e in dterms]))) const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld)) return (ans, result,", "if the integral explicitly contains an # expression in the", "is in k[t], and if t is nonlinear or Liouvillian", "solution c1, ..., cm in Const(k) and q in k[t]", "k(t)-radical. ans is a list of tuples such that Mul(*[i**j", "with hypertangent support is not yet implemented.\") # TODO: What", "lambda r, jj: cancel(r - Asj*Rm1[jj])) # u[s] = u[s]", "etad = frac_in(dcoeff, DE.t) A = parametric_log_deriv(alphaa, alphad, etaa, etad,", "z)) p = p.as_poly(DE.t) if p is None: # f", "residue_reduce(fa, fd, DE, z=z) if not b: # I will", "frac_in(p, DE.t, cancel=True) wa, wd = frac_in((wa, wd), DE.t) A", "of the logarithms indexed by L_K (i.e., if i is", "and B*Matrix([d1, ..., du, e1, ..., ev]) == 0. #", "v) if p.degree(DE.t) > B: return None c = lcm(fd.as_poly(DE.t).LC(),", "[Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True) for fa, fd in fi] ri =", "(i, 1, m)) has # a solution y0 in k", "..., qm] where qi in k[t] is the polynomial component", "in k[t] with b != 0 and either D ==", "a = fa.div(fd) q, b = wa.div(wd) B = max(0,", "will need to be extended to handle them. if DE.case", "i in range(n, -1, -1): if DE.case == 'exp': #", "jj: cancel(r - Asj*um1)) A = A.col_join(Rm1) u = u.col_join(Matrix([um1]))", "done in this case? raise NotImplementedError(\"Nonelementary extensions not supported \"", "Q[j].nth(i)) A, u = constant_system(M, zeros(dc + 1, 1), DE)", "k(t) with n, u != 0 such that n*f ==", "p = [p1, ..., pm] in k[t]^m and d in", "- derivation(sitn, DE) - b*sitn if all(qi.is_zero for qi in", "f*y == Sum(ci*gi, (i, 1, m)), q == y*h in", "(A, B, GG, h) such that A, B, h in", "b*y = Sum(ci*qi) is solvable if and only if #", "Given a derivation D on k[t], f in k(t), and", "= list(zip(terms, u)) result = Add(*[Mul(i, j) for i, j", "f, g1, ..., gm in k(t) with f weakly normalized", "+ \\ r * i / i i / i", "gd in G] if not all([ri.is_zero for _, ri in", "t is nonlinear or Liouvillian over k, then deg(p) <=", "'exp', 'tan', 'auto'} for the primitive, hyperexponential, and hypertangent cases,", "structure theorems.\") E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')]", "of Ax == u are exactly all the solutions of", "derivative of a K-radical using the structure theorem approach. Because", "be used. The argument w == Dtheta/theta \"\"\" # TODO:", "alpha*p + Sum(dj*fj) where # fj = Sum(aji*betai). Mbeta =", "for i in [j for _, j in residueterms]] +", "from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer, bound_degree) from sympy.integrals.risch import", "alpha*p + Sum(ci*betai) of the initial equation. d = a.gcd(b)", "== 0. # The solutions of the original equation for", "case is one of {'exp', 'tan', 'primitive'} for the hyperexponential,", "using the structure theorem approach. Because Poly does not play", "of a K-radical if and only if there are ri", "# in which case the sum is equal to Sum(fi*qi).", "be written as the logarithmic derivative of a k(t)-radical. case", "vj in V] # [r1, ..., ru] # Solutions of", "K(t) and c1, ..., cm in Const(K) such that Dy", "# [f1, ..., fu] # # Solve the reduced equation", "A[i, j].has(*DE.T): # This assumes that const(F(t0, ..., tn) ==", "an element of k(t) if there exists b in k(t)", "Verify exact division raise ValueError(\"Inexact division\") u = cancel(u**m*Mul(*[Pow(i, j)", "# interpretting limited integration problem as a # parametric Risch", "B = param_poly_rischDE(a, b, r, n, DE) # h =", "V: return None else: # we can take any vector", "Gds = list(zip(*G)) d = reduce(lambda i, j: i.lcm(j), Gds)", "things like sqrt(x**2) != x # and also sqrt(x**2 +", "else: # TODO: implement this raise NotImplementedError V = [(-a*hn*ga).cancel(gd,", "Z)] R = list(R) n1 = n - a.degree(DE.t) return", "if not b.is_zero and (DE.case == 'base' or b.degree() >", "q, M def constant_system(A, u, DE): \"\"\" Generate a system", "Matrix([qq]) # A single row. r = [(Mqq*vj)[0] for vj", "= M.nullspace() # N = [n1, ..., ns] where the", "solution of Ax == 0. \"\"\" m = len(q) if", "in Const(k) and q in k[t] of degree at most", "Au.applyfunc(cancel) A, u = Au[:, :-1], Au[:, -1] for j", "row. r = [(Mq*vj)[0] for vj in V] # [r1,", "that Mul(*[i**j for i, j in ans]) == u. This", "Given a derivation D in k[t], a, b in k[t]", "this is a limitation in computer algebra in general, and", "= Poly(0, DE.t) Fd = Poly(1, DE.t) G = [(fa,", "returns (1, 1) # f had better be 0 in", "rhs, DE) if not all(derivation(i, DE, basic=True).is_zero for i in", "bd, G, DE, case='auto'): \"\"\" Parametric Risch Differential Equation -", "of one exponential from the other. Therefore, it is necessary", "fi, Ai = param_rischDE(ba, bd, Qy, DE) fi = [Poly(fa.as_expr()/fd.as_expr(),", "nfmwd = fd*wd Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE) if Qv", "the equation (i.e., gi in k(t)), and Q is a", "# (Is there a better way?) f = [Poly(fa.as_expr()/fd.as_expr(), t,", "where we know that S1irr = Sirr, but there could", "param_poly_rischDE(a.quo(d), b.quo(d), r, n, DE) # g = [g1, ...,", "range(n, -1, -1): if DE.case == 'exp': # this re-checking", "hs = reduce(lambda i, j: i.lcm(j), (ds,) + Es) #", "'base']: B = ba.quo(bd) return (a, B, G, Poly(1, DE.t))", "on the right hand side of the equation (i.e., qi", "- Sum(ci*ri, (i, 1, m)))/a has degree at most n1", "Q, m, z = A if Q == 1: n", "field=True)] # r = 1 B = Matrix([[qi.TC() for qi", "prde_normal_denom(fa, fd, G, DE): \"\"\" Parametric Risch Differential Equation -", "= Sum(dj*hj, (j, 1, r)) where d1, ..., dr in", "constant fields that are computable # via the cancel() function,", "'base'. betaa, alphaa, alphad = real_imag(ba, bd*a, DE.t) betad =", "solution of Mx = 0, in which case the quotient", "b, h in k[t], N is a non-negative integer, g", "divisible by d. qq, M = poly_linear_constraints(q, d) # qq", "any solution c1, ..., cm in Const(k) and p in", "in C of f == Dv + Sum(ci*wi, (i, 1,", "Mx == 0, and p and the ci satisfy a*Dp", "are column # vectors generating the space of linear relations", "Poly(y_den, DE.t) Y = Ya*Poly(1/Yd.LC(), DE.t), Yd.monic() return Y, C", "= len(f) I = eye(m) A = A.row_join(zeros(A.rows, r +", "of # the equation Dy + f*y == Sum(ci*Gi) exists.", "[zi - derivation(ri, DE) for ri, zi in zip(R, Z)]", "case='auto', z=None): \"\"\" Checks if f can be written as", "c1=None): \"\"\" Parametric logarithmic derivative heuristic. Given a derivation D", "Fi return (H, M) def param_poly_rischDE(a, b, q, n, DE):", "sqf_list from sympy.polys.polymatrix import PolyMatrix as Matrix from sympy.solvers import", "elif case == 'tan': dcoeff = DE.d.quo(Poly(DE.t**2 + 1, DE.t))", "Dt/(t**2 + 1) in k, sqrt(-1) not in k), a", "0 for key, value in bd.items()] denom_imag = [value if", "- DE.d.degree(DE.t) - mu) else: # TODO: implement this raise", "Sum(dj*fj, (j, 1, r) + Sum(ei*hi, (i, 1, m)), #", "and G = [g1, ..., gm] in k(t)^m, return Q", "s or not s[c1].is_Rational: # deg(q) > B, no solution", "== 0 when t_i is in L_K/C(x), implying in particular", "we might # incorrectly prove that an integral is nonelementary", "Qv is None: # (N*f - M*w) is not the", "k(t), and because Matrix doesn't play well with Poly, M", "DE.case == 'primitive': with DecrementLevel(DE): ba, bd = frac_in(b, DE.t,", "for Ga, Gd in G] h = pn # (a*p**N,", "TODO: Merge this with the very similar special_denom() in rde.py", "field=True) Q = [(ga*(d).quo(gd)).div(d) for ga, gd in G] if", "of the hyperexponentials indexed by E_K (i.e., if i is", "not s[c1].is_Rational: # deg(q) <= B, no solution for c.", "finish writing this and write tests p = cancel(fa.as_expr()/fd.as_expr() -", "and a, b relatively prime a, b, q, r, n", "range(B + 1, C + 1)] s = solve(eqs, c1)", "v))/gamma, where k' = k + m + u. v", "..., cm in Const(k) and p in k[t] of a*Dp", "deg(b) > max(0, deg(D) - 1), returns h1, ..., hr", "= a.gcd(b) if not d.is_ground: break # a*Dp + b*p", "= [] # Why use DecrementLevel? Below line answers that:", "b, a, N, (a*hn*fa).cancel(fd, include=True), V) def limited_integrate(fa, fd, G,", "if d > 0: M = Matrix(d, m, lambda i,", "with Matrix yet, this algorithm assumes that all matrix entries", "be the same as u up to a additive constant.", "verify, but I believe that the answer should be #", "h = [h1, ..., hr] in k[t]^r and a matrix", "the logarithmic derivative of a k(t)-radical, then all the #", "time # even with n=5, and much longer with large", "k[t]^m and a matrix M with entries in k such", "No constraints, return the empty matrix. qs, _ = list(zip(*Q))", "seeing exactly which elements of k(t) produce u. This function", "a Matrix A, and a vector (Matrix) u with coefficients", "1. The last condition is handled by cancel() above. return", "reduced to such by cancel(). Therefore, a careful user can", "Liouvillian, but if this # changes, then this will need", "k[t] if f is the logarithmic derivative of a k(t)-radical", "zeros(M.rows, 1), DE) # A is a matrix with m", "d1, ..., dr] v = V[0]/(-c0) r = len(h) m", "roots of the resultant must be rational numbers. return None", "== i.degree() and all(k.is_Rational for k in j) for i,", "Sum(ci*wi, (i, 1, n)) \"\"\" fa, fd = fa*Poly(1/fd.LC(), DE.t),", "i in range(m): si = Q[i].nth(N + db)/b.LC() sitn =", "= y.as_numer_denom() Ya, Yd = Poly(y_num, DE.t), Poly(y_den, DE.t) Y", "be done in this case? raise NotImplementedError(\"Nonelementary extensions not supported", "this same correctness problem exists in any # algorithm that", "key[0] % 4 == 0 else -value if key[0] %", "(hn*derivation(hs, DE)).quo(hs) mu = min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga, gd, DE.t)", "that # appear in his integrand in the variables other", "g1, ..., gn in k(t), return (a, b, h, N,", "that each monomial is recursively transcendental if len(DE.exts) != len(DE.D):", "# rows n1, ..., ns. return [hk.cancel(gamma, include=True) for hk", "= Matrix([wl[:m] + wl[-v:] for wl in W]) # excise", "c1 or Dummy('c1') p, a = fa.div(fd) q, b =", "DE)), fd*fa dfa, dfd = dfa.cancel(dfd, include=True) # Our assumption", "in the docstring of rde.py for more information. The Parametric", "the logarithmic derivative in the base case if and only", "\" \"not yet implemented for is_log_deriv_k_t_radical_in_field()\") elif case in ['other_linear',", "k, sqrt(-1) not in k), a != 0, and gcd(a,", "gcd(a, b) == 1, and G = [g1, ..., gm]", "[j for _, j in residueterms]] + [n], S(1)) residueterms", "# entries aj1, ..., ajm in Const(k). # Sum(aji*qi) is", "e, u = A u *= DE.t**e elif case ==", "and B is a matrix with # m + r", "j: Q[j].nth(i + 1)) A, _ = constant_system(M, zeros(d, 1),", "in this case.\") else: n = reduce(ilcm, [i.as_numer_denom()[1] for i", "splitfactor(fd, DE) if not s.is_one: pass z = z or", "== g + Sum(ci*vi, (i, 1, m)). Furthermore, if S1irr", "N in range(n, 0, -1): # [n, ..., 1] for", "r)) where d1, ..., dr in Const(k) and A*Matrix([[c1, ...,", "# is a polynomial if and only if M*Matrix([f1, ...,", "+ r columns and entries in Const(k) = Const(k0) #", "more information. \"\"\" from __future__ import print_function, division from sympy.core", "# M is a matrix with m columns and entries", "field C = Const(K), a Matrix A, and a vector", "matrix. A = -eye(m) for vj in V: A =", "any # algorithm that uses rref()). # # We therefore", "= dn*h c = a*h ba = a*fa - dn*derivation(h,", "B = parametric_log_deriv(betaa, betad, etaa, etad, DE) if A is", "dr) is a solution of Ax == 0. Elements of", "least for prde_spde, it will always # terminate no matter", "if Q == 1: n = min(n, s/2) N =", "Sum(ci*qi, (i, 1, m)) has a solution p of degree", "exists b in k(t) such that a = Db. Either", "or Dummy('z') H, b = residue_reduce(fa, fd, DE, z=z) if", "DE) # M is a matrix with m columns and", "ds = splitfactor(fd, DE) Gas, Gds = list(zip(*G)) gd =", "dcoeff = DE.d.quo(Poly(DE.t, DE.t)) with DecrementLevel(DE): # We are guaranteed", "in {1, ..., n} such that t_i is transcendental over", "C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i", "= poly_linear_constraints(q, d) # qq = [qq1, ..., qqm] where", "odd power terms by checking the degree of terms wrt", "constraint matrix with m + r + m columns. r", "has a non-constant coefficient, in which case s is False", "R = [r1, ..., rm], such that for any solution", "if there are ri in QQ such that:: --- ---", "limitation in computer algebra in general, and implicit # in", "the right hand side of the equation (i.e., gi in", "..., cm]) == 0 and # B*Matrix([c1, ..., cm, d1,", "tests c1 = c1 or Dummy('c1') p, a = fa.div(fd)", "bound_degree(a, b, r, DE, parametric=True) except NotImplementedError: # A temporary", "[(ga*(d).quo(gd)).div(d) for ga, gd in G] if not all([ri.is_zero for", "= A.row_join(zeros(A.rows, r + m)) B = B.row_join(zeros(B.rows, m)) C", "# Heuristic failed, we have to use the full method.", "in Const(k) such that if c1, ..., cm in Const(k)", "m)), p = (q - Sum(ci*ri, (i, 1, m)))/a has", "hyperexponential monomial theta over k(t), raises either NotImplementedError, in which", "l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for b, e in iterms])))", "z = ls*ln.gcd(ln.diff(DE.t)) if not z.has(DE.t): # TODO: We treat", "case the heuristic failed, or returns None, in which case", "in Const(k) and # B*Matrix([c1, ..., cm, d1, ..., dr])", "Where C = Const(K), L_K/C(x) = { i in {1,", "in k(t), f in k(t), and G = [G1, ...,", "then y = Sum(ek*hk, (k, 1, v))/gamma. ## Build combined", "in G] a, (ba, bd), G, hn = prde_normal_denom(fa, fd,", "== 'primitive': with DecrementLevel(DE): pa, pd = frac_in(p, DE.t) A", "are the same as those of # (a/d)*Dp + (b/d)*p", "-eye(m) for vj in V: A = A.row_join(vj) A =", "dcoeff = DE.d.quo(Poly(DE.t**2 + 1, DE.t)) with DecrementLevel(DE): # We", "residue_reduce_derivation, DecrementLevel, recognize_log_derivative) from sympy.matrices import zeros, eye from sympy.polys", "all(i.is_Rational for i in u): raise NotImplementedError(\"Cannot work with non-rational", "solve such problems over 'k' (not k[t]) if DE.case ==", "finds the solution in the given field not in some", "is divisible by d if and only if ci =", "a derivation D on k[t] and f, g1, ..., gm", "with a and d in k[t]. \"\"\" m = len(G)", "is required to pass them as indices to D (or", "integer n, and a, b, q1, ..., qm in k[t]", "are computable # via the cancel() function, in order to", "This algorithm is used both in solving parametric problems and", "not z.has(DE.t): # TODO: We treat this as 'no solution',", "the reduced equation recursively. # g, B = param_poly_rischDE(a.quo(d), b.quo(d),", "Differential Equations. See the outline in the docstring of rde.py", "+ Sum(Sum(dj*aji)*betai) of the initial # equation. These are equal", "Given a derivation D in k(t), f in k(t), and", "in QQ such that:: --- --- Dt \\ r *", "of a k(t)-radical. It differs from is_log_deriv_k_t_radical(fa, fd, DE, Df=False)", "equal to Sum(fi*qi). M, _ = constant_system(M, zeros(M.rows, 1), DE)", "G, DE, case='auto'): \"\"\" Parametric Risch Differential Equation - Special", "if there exists b in k(t) such that a =", "\" \"coefficients in this case.\") else: terms = ([DE.extargs[i] for", "in Const(k) if and only if # A*Matrix([c1, ...,cm]) ==", "Issue 10798: i need not be a polynomial i, d", "that S1irr = Sirr, but there could be # others,", "is returned. const is such that exp(const)*f == u. This", "or v.is_zero: return None return (Q*N, Q*M, v) def parametric_log_deriv(fa,", "bound_degree() when t is linear and non-Liouvillian, which for the", "\"\"\" m = len(G) Gns, Gds = list(zip(*G)) d =", "in k<t>, G = [g1, ..., gm] in k(t)^m, and", "the next loop instead of Q it has # to", "a k(t)-radical return None if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)): return", "side of the equation (i.e., qi in k[t]). See the", "weakly normalized equation. gamma *= hn A, B, G, hs", "Dt == 1/x, because log(2) is constant. Therefore, the term", "the derivative of an element of k(t). a in k(t)", "# Rm+1; m = A.rows Rm1 = Ri.applyfunc(lambda x: derivation(x,", "parametric_log_deriv(alphaa, alphad, etaa, etad, DE) B = parametric_log_deriv(betaa, betad, etaa,", "such that for any solution c1, ..., cm in Const(k)", "frac_in(dcoeff, DE.t) if recognize_log_derivative(2*betaa, betad, DE): A = parametric_log_deriv(alphaa, alphad,", "parametric problems and in determining if an element a of", "# Collect solution components. h = f + [alpha*gk for", "i.lcm(j), Gds, Poly(1, DE.t)) en, es = splitfactor(gd, DE) p", "satisfy Dt == t. Therefore, the term const is returned.", "combined relation matrix with m + u + v columns.", "- Add(*[Mul(i, j/n) for i, j in zip(argterms, u)])) return", "# f had better be 0 in that case. n", "Parametric Risch Differential Equations. The methods used for solving Parametric", "derivative of a k(t)-radical, then all the # roots of", "is None: # f - Dg will be in k[t]", "+ r columns and entries in Const(k) such that a*Dp", "(ans, u, n, const) or None, which means that Df", "sorts of expressions that # appear in his integrand in", "return Y, C def parametric_log_deriv_heu(fa, fd, wa, wd, DE, c1=None):", "G = [(fa, fd)] + G h, A = param_rischDE(Fa,", "Dy + f*y == Sum(ci*Gi) exists. They generate # the", "to get the real and imaginary part of a rational", "we can use more efficient residue reduction from ratint() if", "= Sum(ci*qi, (i, 1, m)) has a solution p of", "Const(K), L_K/C(x) = { i in {1, ..., n} such", "V = [v for v in V if v[0] !=", "= list(zip(*Q)) return (qs, M) def poly_linear_constraints(p, d): \"\"\" Given", "['other_linear', 'other_nonlinear']: # XXX: If these are supported by the", "__future__ import print_function, division from sympy.core import Dummy, ilcm, Add,", "XXX: If these are supported by the structure theorems, change", "not b: # I will have to verify, but I", "* Dt + \\ r * i Df / i", "from sympy.core.compatibility import reduce, range from sympy.integrals.rde import (order_at, order_at_oo,", "[(Mqq*vj)[0] for vj in V] # [r1, ..., ru] #", "if p is None: # f - Dg will be", "= [DE.D[i].as_expr() for i in DE.indices('log')] lhs = Matrix([E_part +", "cancellation. if case == 'exp': dcoeff = DE.d.quo(Poly(DE.t, DE.t)) with", "and B is a matrix with u + v #", "Sum(aji*qi) (j = 1, ..., u). # Sum(ci*gi) is in", "== 'tan': dcoeff = DE.d.quo(Poly(DE.t**2 + 1, DE.t)) with DecrementLevel(DE):", "field=True) for fa, fd in fi] ri = len(fi) if", "return (n, u) elif case == 'tan': raise NotImplementedError(\"The hypertangent", "j in ans]) == u. This is useful for seeing", "Y = Ya*Poly(1/Yd.LC(), DE.t), Yd.monic() return Y, C def parametric_log_deriv_heu(fa,", "A.row_join(vj) A = A.row_join(zeros(m, len(g))) A = A.col_join(zeros(B.rows, m).row_join(B)) return", "B, G, Poly(1, DE.t)) else: raise ValueError(\"case must be one", "in Const(k) and p in k[t] of a*Dp + b*p", "== d/dt or deg(b) > max(0, deg(D) - 1), returns", "DE): A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) B =", "dr]]).T == 0. \"\"\" db = b.degree(DE.t) m = len(Q)", "== Du, or None, which means that Df/f is not", "Q[i] - derivation(sitn, DE) - b*sitn if b.degree(DE.t) > 0:", "DE) beta = [betai + alpha*ri for betai, ri in", "> max(0, deg(D) - 1), returns h1, ..., hr in", "= pn # (a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N - n),", "which case s is True and the solutions in C", "written as the logarithmic derivative of a k(t) radical if", ">= 2 and b.degree() == DE.d.degree() - 1 and n", "G, DE): \"\"\" Solve a Parametric Risch Differential Equation: Dy", "DecrementLevel(DE): ba, bd = frac_in(b + i*derivation(DE.t, DE)/DE.t, DE.t, field=True)", "Furthermore, if S1irr == Sirr, then p is in k[t],", "j: r[j].nth(i)) else: M = Matrix(0, m, []) # No", "+ r + m columns. r = len(f) I =", "common_denom = reduce(ilcm, [i.as_numer_denom()[1] for i in [j for _,", "on top of p.238 (unnumbered) for j in range(ri): hji", "+ f*y == 0 # is solvable in k(t}. The", "fd, DE): r\"\"\" Checks if Df/f is the derivative of", "side of the equation (i.e., gi in k(t)), and Q", "then the cardinality of L_K/C(x) U E_K/C(x) is exactly the", "K, Df/f is the derivative of a element of K", "= [(-a*hn*ga).cancel(gd, include=True) for ga, gd in G] return (a,", "else -value if key[0] % 4 == 3 else 0", "gamma *= hs g = A.gcd(B) a, b, g =", "Df is the logarithmic derivative of a K-radical if and", "Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i,", "Ax == 0. \"\"\" m = len(q) if n <", "the logarithmic derivative of a k(t)-radical return None if p.degree(DE.t)", "+ 1, DE.t) elif case in ['primitive', 'base']: B =", "k[t] of A*Dp + B*p = Sum(ci*Gi) correspond # to", "non-rational \" \"coefficients in this case.\") else: terms = ([DE.extargs[i]", "= Matrix([dfa.as_expr()/dfd.as_expr()]) A, u = constant_system(lhs, rhs, DE) if not", "of k, # is a polynomial if and only if", "and for any solution c1, ..., cm in Const(k) and", "frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t) etaa, etad = frac_in(dcoeff, DE.t) A = parametric_log_deriv(alphaa,", "may have a polynomial solution # only if the sum", "k, # is a polynomial if and only if M*Matrix([f1,", "structure theorems should be able to completely decide these #", "where rj = Sum(aji*qi) (j = 1, ..., u) in", "means that Df/f is not the derivative of an element", "m = A.rows Rm1 = Ri.applyfunc(lambda x: derivation(x, DE, basic=True)/", "= reduce(ilcm, [i.as_numer_denom()[1] for i in [j for _, j", "# Solve the reduced equation recursively. # g, B =", "only y0 = Sum(dj*fj, (j, 1, r)) where # d1,", "\"\"\" R, Z = list(zip(*[gcdex_diophantine(b, a, qi) for qi in", "in k[t] is the polynomial component # of the partial", "'auto'}, not %s\" % case) common_denom = reduce(ilcm, [i.as_numer_denom()[1] for", "+ 1) != x + 1 # Issue 10798: i", "1)) A, _ = constant_system(M, zeros(d, 1), DE) else: #", "indexed by E_K (i.e., if i is in E_K, then", "equation are # y = Sum(dj*fj, (j, 1, r) +", "constant solutions. Given a differential field (K, D) with constant", "g1, ..., gm in k(t) with Dt/t in k (resp.", "b, G, DE): \"\"\" Parametric Risch Differential Equation - Generate", "1]) y = -sum([v[m + 1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \\ for", "raise ValueError(\"Inexact division\") u = cancel(u**m*Mul(*[Pow(i, j) for i, j", "fd.monic().lcm(wd.monic())*Poly(c, DE.t) ln, ls = splitfactor(l, DE) z = ls*ln.gcd(ln.diff(DE.t))", "DE.indices('exp')] L_part = [DE.D[i].as_expr() for i in DE.indices('log')] lhs =", "1, m)) then q = Sum(dj*hj, (j, 1, r)) where", "if DE.case in ['base', 'primitive', 'exp', 'tan']: hs = reduce(lambda", "A*Matrix([c1, ...,cm]) == 0. V = A.nullspace() # V =", "..., cm in Const(k) if and only if p =", "fraction expansion of gi. # M is a matrix with", "the more general prde_special_denom() automatically if it cannot determine that", "\"not yet implemented.\") else: # Liouvillian cases if DE.case ==", "= Matrix([q]) # A single row. r = [(Mq*vj)[0] for", "return None M, N = s[c1].as_numer_denom() nfmwa = N*fa*wd -", "max(0, -nb) pN = p**N pn = p**-n # This", "is the logarithmic derivative of a K-radical if and only", "H = [] # Why use DecrementLevel? Below line answers", "be wrong. raise NotImplementedError(\"Cannot work with non-rational \" \"coefficients in", "treat this as 'no solution', until the structure # theorem", "most n1 and satisfies A*Dp + B*p == Sum(ci*qi, (i,", "return None c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC()) l = fd.monic().lcm(wd.monic())*Poly(c, DE.t)", "of k(t). a in k(t) is the derivative of an", "key, value in ba.items()] num_imag = [value if key[0] %", "+ b*q == Sum(ci*qi, (i, 1, m)) then q =", "degree at most n1 and satisfies A*Dp + B*p ==", "return [hk.cancel(gamma, include=True) for hk in h], C def limited_integrate_reduce(fa,", "= [Poly(0, DE.t)]*m for N in range(n, 0, -1): #", "constant. Therefore, the term const is returned. const is such", "In that case, solutions of # a*Dp + b*p =", "must be rational numbers. return None # [(a, i), ...],", "algorithms here G is a list of tuples of factions", "[r1, ..., ru] # Solutions of (a/d)*Dp + (b/d)*p =", "matrix entries are Basic expressions. \"\"\" if not A: return", "not all(i.is_Rational for i in u): # TODO: But maybe", "B = b + derivation(a, DE) Qq = [zi -", "splitfactor(fd, DE) E = [splitfactor(gd, DE) for _, gd in", "satisfies A*Dr + B*r == Sum(ci*ggi, (i, 1, m)). For", "max(p.degree(DE.t), q.degree(DE.t)) if q.degree(DE.t) > B: eqs = [p.nth(i) -", "Mul(*[Pow(i, j) for i, j in ans]) # exp(f) will", "case is not supported in this function.\" % case) else:", "NotImplementedError(\"The hypertangent case is \" \"not yet implemented for is_log_deriv_k_t_radical_in_field()\")", "k(t)^m, return Q = [q1, ..., qm] in k[t]^m and", "both behave the same as monomials. For example, both log(x)", "1, m, lambda i, j: Q[j][1].nth(i)) else: M = Matrix(0,", "For the algorithms here G is a list of tuples", "(Q*N, Q*M, v) def parametric_log_deriv(fa, fd, wa, wd, DE): #", "# TODO: finish writing this and write tests p =", "fa, fd in fi] ri = len(fi) if i ==", "find in that # case (it should be the same", "an option to continue # anyway, even if the result", "theorem approach. Because Poly does not play well with Matrix", "# Solutions of the original equation are # y =", "and L_K/C(x) are disjoint. The sets L_K/C(x) and E_K/C(x) must,", "vector (Matrix) u with coefficients in K, returns the tuple", "in Q]) M = Matrix(N + 1, m, lambda i,", "continue # anyway, even if the result might potentially be", "are given Df/f, not f, use is_deriv_k_in_field(). See also ========", "Matrix doesn't play well with Poly, M will be a", "i in DE.indices('exp')] + [DE.T[i] for i in DE.indices('log')]) ans", "i in u]) u *= n terms = ([DE.T[i] for", "k(t)-radical. It differs from is_log_deriv_k_t_radical(fa, fd, DE, Df=False) for any", "dc = -1 M = Matrix() else: dc = max([qi.degree(DE.t)", "is None: return None n, e, u = A u", "Matrix(N + 1, m, lambda i, j: q[j].nth(i)) A, _", "wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True) with DecrementLevel(DE): pa,", "integration problem as a # parametric Risch DE problem Fa", "[hk.cancel(gamma, include=True) for hk in h], C def limited_integrate_reduce(fa, fd,", "1), DE) # M is a matrix with m columns", "Sum(dj*fj, (j, 1, u)) + alpha*Sum(ek*gk, (k, 1, v)). #", "because case != 'base'. alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t) etaa,", "== Sum(ci*Gi, (i, 1, m)). \"\"\" dn, ds = splitfactor(fd,", "with m + r columns and entries in Const(k) such", "R = list(R) n1 = n - a.degree(DE.t) return (A,", "Solutions of (a/d)*Dp + (b/d)*p = Sum(dj*rj) correspond to #", "the structure \" \"theorems with hypertangent support is not yet", "(DE.d.degree() >= 2 and b.degree() == DE.d.degree() - 1 and", "Sum(dj*fj, (j, 1, r)) where # d1, ..., dr ar", "in k(t) with n, u != 0 such that n*b", "- b*sitn if all(qi.is_zero for qi in Q): dc =", "in Q): dc = -1 M = Matrix() else: dc", "of the Risch Algorithm is the computability of the #", "of a*Dq + b*q == Sum(ci*gi, (i, 1, m)), p", "f = [(Mbeta*vj)[0] for vj in V] # [f1, ...,", "exp(f) will be the same as result up to a", "} (i.e., the set of all indices of logarithmic monomials", "if Df is the logarithmic derivative of a k(t)-radical. b", "m)), q == y*h in k<t> satisfies a*Dq + b*q", "None, in which case it has proven that no solution", "if cancel() cannot reduce # an identically zero expression to", "has proven that no solution exists, or returns a solution", "s = splitfactor(fd, DE) if not s.is_one: pass z =", "fa.div(fd) q, b = wa.div(wd) B = max(0, derivation(DE.t, DE).degree(DE.t)", "# M is a matrix with m columns an entries", "i in range(A.rows): if A[i, j].has(*DE.T): # This assumes that", "DE.t), include=True) with DecrementLevel(DE): pa, pd = frac_in(p, DE.t, cancel=True)", "the original equation are # y = Sum(dj*fj, (j, 1,", "such that log(const) + f == u. This is calculated", "logarithmic # derivative problem when integration elementary functions (see #", "i.lcm(j), (dn,) + En) # lcm(dn, en1, ..., enm) hn", "sqrt(-1) Separates the even and odd power terms by checking", "of tuples of factions of the terms on the right", "..., ajm in Const(k). # Sum(aji*qi) is divisible by d", "part and bd is the denominator of the rational function.", "b*y0 = Sum(ci*qi, (i, 1, m)) has # a solution", "matrix. qs, _ = list(zip(*Q)) return (qs, M) def poly_linear_constraints(p,", "this case.\") else: n = reduce(ilcm, [i.as_numer_denom()[1] for i in", "l = [] ld = [] for i, j in", "of all indices of hyperexponential monomials of K over C(x)).", "fd, G, DE): \"\"\" Solves the limited integration problem: f", "that a, b, h in k[t], N is a non-negative", "\"\"\" H = [] if Df: dfa, dfd = (fd*derivation(fa,", "for ga, gd in G] return (a, b, a, N,", "case takes large time # even with n=5, and much", "derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True) with DecrementLevel(DE): pa, pd = frac_in(p,", "+ b*p = Sum(dj*rj) correspond to solutions # y =", "exists, or returns a solution (n, m, v) of the", "c1, ..., cm, d1, ..., dr] v = V[0]/(-c0) r", "also ======== is_log_deriv_k_t_radical_in_field, is_deriv_k \"\"\" H = [] if Df:", "ln, ls = splitfactor(l, DE) z = ls*ln.gcd(ln.diff(DE.t)) if not", "= Sum(ci*Gi, (i, 1, m)) has a solution y in", "f in K, Df is the logarithmic derivative of a", "G] h = pn # (a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N", "u. This is useful for seeing exactly which elements of", "is 'auto', it will attempt to determine the type of", "ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen)) bd =", "(A, B, Q, R, n1), with Qq = [q1, ...,", "return (A, u) def prde_spde(a, b, Q, n, DE): \"\"\"", "+ f*y = Sum(ci*Gi, (i, 1, m)) has a solution", "DE) Qq = [zi - derivation(ri, DE) for ri, zi", "quotient is Sum(ci*qi, (i, 1, m)). \"\"\" m = len(p)", "1, m)), p = (q - Sum(ci*ri, (i, 1, m)))/a", "0: for i in range(m): si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t) H[i]", "rational, like # log(2)/log(3). Also, there should be an option", "the derivative of an element of k(t) if there exists", "Differential Equations parallel those for solving Risch Differential Equations. See", "'exp': dcoeff = DE.d.quo(Poly(DE.t, DE.t)) with DecrementLevel(DE): # We are", "and E_K/C(x) must, by their nature, be computed recursively using", "in Const(k) such that Dy + f*y = Sum(ci*Gi, (i,", "u[s] = u[s] - A[s, j]*u[m+1 u.row_op(s, lambda r, jj:", "DE.t) - order_at(bd, p, DE.t) nc = min([order_at(Ga, p, DE.t)", "DE, c1=None): \"\"\" Parametric logarithmic derivative heuristic. Given a derivation", "DE) for _, gd in G] En, Es = list(zip(*E))", "# B*Matrix([c1, ..., cm, d1]) == 0 # There are", "d1. # Coefficients of t^j (j > 0) in Sum(ci*qi)", "such that a*Dp + b*p = Sum(ci*qi, (i, 1, m))", "calling some more complex simplification function (rational function # coefficients", "the variables other than the integration # variable (the structure", "not %s.\" % case) nb = order_at(ba, p, DE.t) -", "* Dt + \\ r * i / i i", "in N]) # rows n1, ..., ns. return [hk.cancel(gamma, include=True)", "%s\" % case) common_denom = reduce(ilcm, [i.as_numer_denom()[1] for i in", "DE.t, field=True) for q in Q] fi, Ai = param_rischDE(ba,", "for Ga, Gd in G]) n = min(0, nc -", "a*Dp + b*p == Sum(ci*qi, (i, 1, m)). Because M", "n, DE) else: raise NotImplementedError(\"non-linear and hypertangent \" \"cases have", "y*h in k<t> satisfies a*Dq + b*q == Sum(ci*Gi, (i,", "(i, 1, m)), where f1, ..., fm are elements of", "= [h1, ..., hr] in k[t]^r and a matrix A", "the quotient is Sum(ci*qi, (i, 1, m)). \"\"\" m =", "constant_system # Also note: derivation(basic=True) calls cancel() return None else:", "bd.as_poly(gen).as_dict() ba = ba.as_poly(gen).as_dict() denom_real = [value if key[0] %", "prde_spde(a, b, Q, n, DE): \"\"\" Special Polynomial Differential Equation", "(it should be the same as if it were [[1,", "such that A, B, h in k[t], GG = [gg1,", "very similar special_denom() in rde.py if case == 'auto': case", "= hn*hs b -= (hn*derivation(hs, DE)).quo(hs) mu = min(order_at_oo(fa, fd,", "Either returns (ans, u), such that Df/f == Du, or", "== Sirr, then p is in k[t], and if t", "= Sum(dj*hj, (j, 1, r)), where d1, ..., dr in", "Du/u. Either returns (n, u) or None, which means that", "hyperexponential (resp. hypertangent) case, given a derivation D on k[t]", "Fi taking its place Q = Q + Fi return", "that the answer should be # None in this case.", "0. \"\"\" db = b.degree(DE.t) m = len(Q) H =", "Es) # lcm(ds, es1, ..., esm) a = hn*hs b", "= H[i] + si Q[i] = Q[i] - derivation(si, DE)", "== 'primitive']) - set(DE.indices('log'))): raise NotImplementedError(\"Real version of the structure", "the original equation. gamma = q G = [(q*ga).cancel(gd, include=True)", "= Sum(ci*Gi) correspond # to solutions z = q/hn of", "So that the special part is always computed, this function", "1, m)), for c1, ..., cm in k, is divisible", "it returns (a, b, G, 1) in this case. \"\"\"", "in k(t) of Dy + f*y == Sum(ci*gi, (i, 1,", "in DE.indices('exp')] L_part = [DE.D[i].as_expr() for i in DE.indices('log')] lhs", "for vj in V: A = A.row_join(vj) A = A.row_join(zeros(m,", "t = DE.t if DE.case != 'base': with DecrementLevel(DE): t0", "Sirr, then p is in k[t], and if t is", "initial # equation. These are equal to alpha*p + Sum(dj*fj)", "with u + v # columns and entries in Const(k)", "g, V) such that a, b, h in k[t], N", "A*Dr + B*r == Sum(ci*ggi, (i, 1, m)). For case", "# (a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N - n), ..., gm*p**(N", "G] # a*Dp + b*p = Sum(ci*gi) may have a", "this # problem entirely by being careful with the sorts", "tests p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z)) p =", "is that we might # incorrectly prove that an integral", "..., cm, e1, ..., ev. C = Matrix([ni[:] for ni", "over C(x)). If K is an elementary extension over C(x),", "p and the ci satisfy a*Dp + b*p == Sum(ci*qi,", "ba[1], bd) where ba[0] is real part of the numerator", "their nature, be computed recursively using this same function. Therefore,", "def prde_no_cancel_b_large(b, Q, n, DE): \"\"\" Parametric Poly Risch Differential", "where d1, ..., dr are in Const(k) and (c1, ...,", "n = min(0, nc - min(0, nb)) if not nb:", "k(t) is the derivative of an element of k(t) if", "which case the sum is Sum(ci*qi). ## Reduce number of", "Sum(ci*gi, (i, 1, m)), (c1, ..., cm) is a solution", "return q = [q1, ..., qm] in k[t]^m and a", "ba, bd = frac_in(b + i*derivation(DE.t, DE)/DE.t, DE.t, field=True) with", "4 == 2 else 0 for key, value in bd.items()]", "G] if not all([ri.is_zero for _, ri in Q]): N", "the denominator of the rational function. \"\"\" bd = bd.as_poly(gen).as_dict()", "and a in k[t], b in k<t>, and g1, ...,", "# expression in the constant field that is identically zero,", "case.\") else: terms = ([DE.extargs[i] for i in DE.indices('exp')] +", "of terms wrt mod 4. Returns a tuple (ba[0], ba[1],", "and the solutions in C of Ax == u are", "limited_integrate(fa, fd, G, DE): \"\"\" Solves the limited integration problem:", "pass the arguments of the logarithmic terms in L_args. To", "If this heuristic fails, the structure theorem approach will need", "one of {'exp', 'tan', 'primitive'} for the hyperexponential, hypertangent, and", "in ['base', 'primitive', 'exp', 'tan']: hs = reduce(lambda i, j:", "= cancel(fa.as_expr()/fd.as_expr() - Add(*[Mul(i, j/n) for i, j in zip(argterms,", "Dt + \\ r * i Df / i i", "k(t)-radical return None if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)): return None", "\"\"\" Pg, 237. \"\"\" H = [] # Why use", "+ i]*h[i][0].as_expr()/h[i][1].as_expr() \\ for i in range(r)]) y_num, y_den =", "= Q[i] - derivation(sitn, DE) - b*sitn if all(qi.is_zero for", "derivation(x, DE, basic=True)/ derivation(A[i, j], DE, basic=True)) Rm1 = Rm1.applyfunc(cancel)", "extension) and \"in_field\" with the function name is used to", "zi in zip(R, Z)] R = list(R) n1 = n", "a list of tuples such that Add(*[i*j for i, j", "# constant families (c1, ..., cm) for which a solution", "= [(fa, fd)] + G h, A = param_rischDE(Fa, Fd,", "i in DE.indices('exp')] + [DE.extargs[i] for i in DE.indices('log')]) l", "denominator. Given a derivation D on k[t] and f, g1,", "case, implies that Dt == a*t + b with for", "A = A.row_join(vj) A = A.row_join(zeros(m, len(g))) A = A.col_join(zeros(B.rows,", "+ derivation(a, DE) Qq = [zi - derivation(ri, DE) for", "to pass the arguments of the logarithmic terms in L_args.", "result might potentially be wrong. raise NotImplementedError(\"Cannot work with non-rational", "for i, _ in H] if not all(len(j) == i.degree()", "min([order_at_oo(ga, gd, DE.t) for ga, gd in G])) # So", "Risch DE problem Fa = Poly(0, DE.t) Fd = Poly(1,", "of the original equation are then # Sum(dj*fj, (j, 1,", "n, const) or None, which means that Df cannot be", "fd # Our assumption here is that each monomial is", "K-radical if and only if there are ri in QQ", "None n, e, u = A u *= DE.t**e elif", "# Sum(fi*qi, (i, 1, m)), where f1, ..., fm are", "gm in K(t), to determine if there exist y in", "be able to completely decide these # problems in the", "[-1, c1, ..., cm, d1, ..., dr] v = V[0]/(-c0)", "..., dr] v = V[0]/(-c0) r = len(h) m =", "!= 0 such that n*b == Du/u. Either returns (ans,", "i, j: Q[j].nth(i)) A, u = constant_system(M, zeros(dc + 1,", "parametric_log_deriv(fa, fd, wa, wd, DE): # TODO: Write the full", "# y = p/gamma of the initial equation with ci", "for qi in q] if not b.is_zero and (DE.case ==", "log(x) + log(2) satisfy Dt == 1/x, because log(2) is", "= [(ga*(d).quo(gd)).div(d) for ga, gd in G] if not all([ri.is_zero", "fd in f] else: # Base case. Dy == 0", "entries in k(t) such that for any solution c1, ...,", "const) def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None): \"\"\" Checks if", "n), ..., gm*p**(N - n), p**-n) return (A, B, G,", "u, DE): \"\"\" Generate a system for the constant solutions.", "'primitive': with DecrementLevel(DE): pa, pd = frac_in(p, DE.t) A =", "Equations. See the outline in the docstring of rde.py for", "it were [[1, 1]]) residueterms = [(H[j][1].subs(z, i), i) for", "factions of the terms on the right hand side of", "both in solving parametric problems and in determining if an", "Polynomial Differential Equation algorithm: Parametric Version. Given a derivation D", "icoeff, iterms = sqf_list(i) l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for", "= [(i, i.real_roots()) for i, _ in H] if not", "t i in L i in E i K/C(x) K/C(x)", "raise NotImplementedError(\"prde_no_cancel_b_equal() is \" \"not yet implemented.\") else: # Liouvillian", "is ci = Sum(dj*aji) # (i = 1, ..., m)", "= [-1, c1, ..., cm, d1, ..., dr] v =", "else: # No constraints on the hj. A = Matrix(0,", "has # to be Q + Fi taking its place", "DE.t, field=True) with DecrementLevel(DE): Qy = [frac_in(q.nth(i), DE.t, field=True) for", "sets L_K/C(x) and E_K/C(x) must, by their nature, be computed", "Qy = [frac_in(q.nth(i), DE.t, field=True) for q in Q] fi,", "bd), G, hn = prde_normal_denom(fa, fd, G, DE) # Solutions", "Dq + b*q == Sum(ci*qi, (i, 1, m)) then q", "in fi] ri = len(fi) if i == n: M", "+ alpha*ri for betai, ri in zip(beta, r)] alpha *=", "initial equation. d = a.gcd(b) if not d.is_ground: break #", "prde_cancel_liouvillian(b, q, n, DE) else: raise NotImplementedError(\"non-linear and hypertangent \"", "Compute Df/f dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)),", "must be one of {'primitive', 'exp', 'tan', \" \"'base', 'auto'},", "weakly normalized with respect to t, return the tuple (a,", "order_at(Gd, p, DE.t) for Ga, Gd in G]) n =", "and satisfies A*Dp + B*p == Sum(ci*qi, (i, 1, m))", "# The solutions of the original equation for ci =", "= A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for gia, gid in G]", "from # calling some more complex simplification function (rational function", "between the qi. if all([qi.is_zero for qi in q]): return", "A if Q == 1: n = min(n, m) elif", "# and a, b relatively prime a, b, q, r,", "DE.t) Fd = Poly(1, DE.t) G = [(fa, fd)] +", "with constant field C = Const(K), a Matrix A, and", "of the # constant field (actually, this same correctness problem", "m) are then y = Sum(ek*hk, (k, 1, v))/gamma. ##", "or [[], []] # Note: this might be empty, but", "residue reduction from ratint() if not fd.is_sqf or fa.degree() >=", "param_poly_rischDE(a, b, q, n, DE): \"\"\"Polynomial solutions of a parametric", "if y = Sum(dj*hj, (j, 1, r)) where d1, ...,", "return None if case == 'auto': case = DE.case if", "# incorrectly prove that an integral is nonelementary (such as", "No cancellation: deg(b) small enough. Given a derivation D on", "p, DE.t) - order_at(bd, p, DE.t) nc = min([order_at(Ga, p,", "in Q] f, B = param_rischDE(ba, bd, Q0, DE) #", "r in denom_real) bd_imag = sum(r for r in denom_imag)", "y = Sum(dj*hj, (j, 1, r)) where d1, ..., dr", "([DE.T[i] for i in DE.indices('exp')] + [DE.extargs[i] for i in", "reduce(lambda i, j: i.lcm(j), Gds, Poly(1, DE.t)) en, es =", "qm] and R = [r1, ..., rm], such that for", "at most n of a*Dq + b*q == Sum(ci*gi, (i,", "-1] for j in range(A.cols): for i in range(A.rows): if", "Given a derivation D on k[t], n in ZZ, and", "range(n, -1, -1): # [n, ..., 0] for i in", "b, g, DE) # q = [q1, ..., qm] where", "m, []) # No constraints, return the empty matrix. qs,", "> 0 # Iterate SPDE as long as possible cumulating", "wl is a column matrix with # entries blk (k", "\"\"\" Helper function, to get the real and imaginary part", "gcd(a, t**2 + 1) == 1), return the tuple (A,", "in DE.indices('log')]) ans = list(zip(terms, u)) result = Mul(*[Pow(i, j)", "or the logarithmic derivative of a K-radical using the structure", "cm) for which a solution of # the equation Dy", "solution v in k(t), c1, ..., cm in C of", "# parametric Risch DE problem Fa = Poly(0, DE.t) Fd", "logarithmic monomials of K over C(x)), and E_K/C(x) = {", "to a multiplicative # constant. We now find the log", "+ 1, m, lambda i, j: q[j].nth(i)) A, _ =", "case is \" \"not yet implemented for is_log_deriv_k_t_radical_in_field()\") elif case", "list of tuples such that Add(*[i*j for i, j in", "frac_in(b + i*derivation(DE.t, DE)/DE.t, DE.t, field=True) with DecrementLevel(DE): Qy =", "[f1, ..., fr] in k^r and B is a matrix", "Mul, Pow, S from sympy.core.compatibility import reduce, range from sympy.integrals.rde", "have to use the full method. # TODO: This could", "(i, 1, m)), for c1, ..., cm in k, is", "option to continue # anyway, even if the result might", "1, r)) where d1, ..., dr in Const(k) and A*Matrix([[c1,", "with # entries aj1, ..., ajm in Const(k). # Sum(aji*gi)", "solution y in k(t) with c1, ..., cm in Const(k)", "These are the cases where we know that S1irr =", "in the variables other than the integration # variable (the", "\\ for i in range(r)]) y_num, y_den = y.as_numer_denom() Ya,", "'primitive']) - set(DE.indices('log'))): raise NotImplementedError(\"Real version of the structure \"", "components. h = f + [alpha*gk for gk in g]", "qi in k[t] is the polynomial component # of the", "(possibly unspecified extension) and \"in_field\" with the function name is", "that Df/f is not the derivative of an element of", "cm]) == 0, # in which case the sum is", "include=True) for Ga, Gd in G] h = pn #", "and q in k<t> of a*Dq + b*q == Sum(ci*gi,", "= min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga, gd, DE.t) for ga, gd", "t, return the tuple (a, b, G, h) such that", "TODO: What should really be done in this case? raise", "r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z) eqs = [r1.nth(i) - c1*r2.nth(i)", "= -eye(m) for vj in V: A = A.row_join(vj) A", "[], returns (1, 1) # f had better be 0", "we are given Df, not f, use is_log_deriv_k_t_radical_in_field(). See also", "solving Parametric Risch Differential Equations. The methods used for solving", "# TODO: But maybe we can tell if they're not", "satisfies A*Dp + B*p == Sum(ci*qi, (i, 1, m)) \"\"\"", "= p**-n # This is 1/h A = a*pN B", "d1, ..., dr]]).T == 0. \"\"\" db = b.degree(DE.t) m", "# No non-trivial solution. return [], eye(m) # Could return", "K over C(x)). If K is an elementary extension over", "- 1)): return prde_no_cancel_b_large(b, q, n, DE) elif ((b.is_zero or", "in L_K, then T[i] == log(L_args[i])). This is needed to", "qq, M = poly_linear_constraints(q, d) # qq = [qq1, ...,", "u)])) return (ans, result, n, const) def is_log_deriv_k_t_radical_in_field(fa, fd, DE,", "== 1: n = min(n, m) elif case == 'tan':", "Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd, fd*wd, DE, 'auto') if Qv", "i, j in zip(argterms, u): # We need to get", "Parametric logarithmic derivative heuristic. Given a derivation D on k[t],", "and form a basis except possibly when Dy + f*y", "rational function. \"\"\" bd = bd.as_poly(gen).as_dict() ba = ba.as_poly(gen).as_dict() denom_real", "f, use is_log_deriv_k_t_radical_in_field(). See also ======== is_log_deriv_k_t_radical_in_field, is_deriv_k \"\"\" H", "DecrementLevel(DE): pa, pd = frac_in(p, DE.t) A = is_log_deriv_k_t_radical_in_field(pa, pd,", "= Poly(DE.t**2 + 1, DE.t) elif case in ['primitive', 'base']:", ":] = A[s, :] - A[s, i]*A[:, m+1] Asj =", "where f1, ..., fm are elements of k, # is", "fd, DE.t), min([order_at_oo(ga, gd, DE.t) for ga, gd in G]))", "(i, 1, m)). \"\"\" m = len(p) q, r =", "k<t>, and g1, ..., gm in k(t) with Dt/t in", "cancel(u**m*Mul(*[Pow(i, j) for i, j in residueterms])) return (common_denom, u)", "not d.is_ground: break # a*Dp + b*p = Sum(ci*qi) may", "for solving Parametric Risch Differential Equations. The methods used for", "# Sum(aji*qi) is divisible by d with exact quotient Sum(aji*qqi).", "= constant_system(M, zeros(M.rows, 1), DE) # M is a matrix", "DE.d.degree(DE.t) - mu) else: # TODO: implement this raise NotImplementedError", "i == 'tan'] or \\ (set([i for i in DE.cases", "u Au = A.row_join(u) Au = Au.rref(simplify=cancel, normalize_last=False)[0] # Warning:", "Const(k) # if and only if M*Matrix([c1, ..., cm]) ==", "== [v1, ..., vm] in k(t)^m, and for any solution", "range(m): si = Q[i].nth(N + db)/b.LC() sitn = Poly(si*DE.t**N, DE.t)", "y and ci if they exist. For the algorithms here", "Q] + [S(0)]]) # The condition for solvability is #", "k(t) and f, g1, ..., gn in k(t), return (a,", "= is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd, fd*wd, DE, 'auto') if Qv is", "== log(x) + log(2) satisfy Dt == 1/x, because log(2)", "can be avoided with DecrementLevel(DE): ba, bd = frac_in(b +", "b*si if all(qi.is_zero for qi in Q): dc = -1", "for i in range(r)]) y_num, y_den = y.as_numer_denom() Ya, Yd", "is_log_deriv_k_t_radical(fa, fd, DE, Df=True): r\"\"\" Checks if Df is the", "such problems over 'k' (not k[t]) if DE.case == 'primitive':", "1), DE) c = eye(m) A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return", "== Du/u. exp(f) will be the same as u up", "alpha*ri for betai, ri in zip(beta, r)] alpha *= a", "interpretting limited integration problem as a # parametric Risch DE", "solutions alpha*p + Sum(Sum(dj*aji)*betai) of the initial # equation. These", "= DE.case if case == 'exp': p = Poly(DE.t, DE.t)", "for qi in Q]) M = Matrix(dc + 1, m,", "not fd.is_sqf or fa.degree() >= fd.degree(): # f is the", "- Asj*Rm1[jj])) # u[s] = u[s] - A[s, j]*u[m+1 u.row_op(s,", "r\"\"\" Checks if Df is the logarithmic derivative of a", "= len(h) m = len(v) - r - 1 C", "..., fr] in k^r and B is a matrix with", "Asj*um1)) A = A.col_join(Rm1) u = u.col_join(Matrix([um1])) return (A, u)", "# Solutions of a*Dp + b*p = Sum(dj*rj) correspond to", "= 5 h, B = param_poly_rischDE(a, b, r, n, DE)", "Ai = param_rischDE(ba, bd, Qy, DE) fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t,", "eq. on top of p.238 (unnumbered) for j in range(ri):", "that t_i is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i", "..., ggm] in k(t)^m, and for any solution c1, ...,", "any given fa, fd, DE in that it finds the", "ev. C = Matrix([ni[:] for ni in N]) # rows", "1 B = Matrix([[qi.TC() for qi in Q] + [S(0)]])", "du in Const(k). # In that case, # Sum(ci*gi) =", "def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None): \"\"\" Checks if f", "far, all the above are also nonlinear or Liouvillian, but", "a term in the log-part of the integral # of", "# k = k0(t0) ba, bd = frac_in(b, t0, field=True)", "q.degree(DE.t) > B: eqs = [p.nth(i) - c1*q.nth(i) for i", "the structure theorem approach. Because Poly does not play well", "Add, Mul, Pow, S from sympy.core.compatibility import reduce, range from", "exp(const)*f == u. This is calculated by subtracting the arguments", "and entries in k. # Sum(fi*gi, (i, 1, m)), where", "[Pow(b, e*j) for b, e in iterms]))) dcoeff, dterms =", "reduced equation recursively. # g, B = param_poly_rischDE(a.quo(d), b.quo(d), r,", "cannot reduce # an identically zero expression to 0. The", "m)).col_join(c.row_join(-c)) return (H, A) def prde_no_cancel_b_small(b, Q, n, DE): \"\"\"", "for qi in q]) M = Matrix(N + 1, m,", "A[s, :] - A[s, i]*A[:, m+1] Asj = A[s, j]", "+ b*p == Sum(ci*qi, (i, 1, m)). Because M has", "of hyperexponential monomials of K over C(x)). If K is", "a element of K if and only if there are", "DE.case == 'exp': # this re-checking can be avoided with", "an element of k(t). a in k(t) is the derivative", "not A: return A, u Au = A.row_join(u) Au =", "n1, ..., ns. return [hk.cancel(gamma, include=True) for hk in h],", "more efficiently. # It isn't too worrisome, because the heuristic", "== 0, # in which case the quotient is Sum(fi*qqi).", "indices to D (or T). E_args are the arguments of", "Add(*[Mul(i, j) for i, j in ans]) argterms = ([DE.T[i]", "cm in Const(k) and q in k[t] of degree at", "ba, bd = frac_in(b, t0, field=True) Q0 = [frac_in(qi.TC(), t0,", "ls = splitfactor(l, DE) z = ls*ln.gcd(ln.diff(DE.t)) if not z.has(DE.t):", "gamma = q G = [(q*ga).cancel(gd, include=True) for ga, gd", "for i, j in ans]) == u. This is useful", "the weakly normalized equation. gamma *= hn A, B, G,", "# the currently added test case takes large time #", "or b.degree() < DE.d.degree() - 1) and (DE.case == 'base'", "1 and any d1 in Const(k) = k. f =", "\"'base'}, not %s.\" % case) nb = order_at(ba, p, DE.t)", "in Const(k). # Sum(aji*qi) is divisible by d with exact", "prde_spde, it will always # terminate no matter what n", "a, qi) for qi in Q])) A = a B", "0. If this heuristic fails, the structure theorem approach will", "combined relation matrix. A = -eye(m) for vj in V:", "respectively. For the hyperexponential (resp. hypertangent) case, given a derivation", "d in k[t]. \"\"\" m = len(G) q, (fa, fd)", "in Const(k)^(m + v) are column # vectors generating the", "reduce(lambda i, j: i.lcm(j), (dn,) + En) # lcm(dn, en1,", "implies that Dt == a*t + b with for some", "alphad, etaa, etad, DE) if A is not None: Q,", "right hand side of the equation (i.e., gi in k(t)),", "then T[i] == exp(E_args[i])). This is needed to compute the", "zip(R, Z)] R = list(R) n1 = n - a.degree(DE.t)", "# problem entirely by being careful with the sorts of", "Risch Differential Equation - No cancellation: deg(b) large enough. Given", "a*Dq + b*q == Sum(ci*gi, (i, 1, m)), p =", "+ b*q == Sum(ci*gi, (i, 1, m)), r == q*h", "variable (the structure theorems should be able to completely decide", "the solutions of Bx == v, or v has a", "= eye(m) A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A) #", "over k, then deg(p) <= N. So that the special", "not s or not s[c1].is_Rational: # deg(q) <= B, no", "G, hs = prde_special_denom(a, ba, bd, G, DE) # Solutions", "G] En, Es = list(zip(*E)) c = reduce(lambda i, j:", "DE.t), min([order_at_oo(ga, gd, DE.t) for ga, gd in G])) #", "S1irr == Sirr, then p is in k[t], and if", "solutions y = z/q of the original equation. gamma =", "if this # changes, then this will need to be", "where qi in k[t] is the polynomial component # of", "= derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True) with DecrementLevel(DE): pa, pd =", "for ga, gd in G] if not all([ri.is_zero for _,", "degree of terms wrt mod 4. Returns a tuple (ba[0],", "len(h))) A = A.col_join(zeros(B.rows, m).row_join(B)) ## Eliminate d1, ..., du.", "all the solutions of Bx == v, or v has", "'base' or DE.d.degree() >= 2)): return prde_no_cancel_b_small(b, q, n, DE)", "for v in V if v[0] != 0] if not", "n = prde_spde(a, b, q, n, DE) beta = [betai", "where k' = k + m + u. v =", "+ Fi taking its place Q = Q + Fi", "c1*q.nth(i) for i in range(B + 1, C + 1)]", "t, field=True) for fa, fd in f] else: # Base", "Df cannot be written as the logarithmic derivative of a", "(i.e., if i is in E_K, then T[i] == exp(E_args[i])).", "N = [n1, ..., ns] where the ni in Const(k)^(m", "\"theorems with hypertangent support is not yet implemented.\") # TODO:", "DE) elif (DE.d.degree() >= 2 and b.degree() == DE.d.degree() -", "# qq = [qq1, ..., qqm] where qqi = qi.quo(d).", "original equation are # y = Sum(dj*fj, (j, 1, r)", "a non-constant coefficient, in which case s is False Ax", "the exponential terms in E_args. To handle the case where", "Equations. The methods used for solving Parametric Risch Differential Equations", "D on k[t], n in ZZ, and b, q1, ...,", "qi.quo(d). # M is a matrix with m columns an", "N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd nfmwd = fd*wd Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd,", "B = max(0, derivation(DE.t, DE).degree(DE.t) - 1) C = max(p.degree(DE.t),", "0, and gcd(a, t) == 1 (resp. gcd(a, t**2 +", "of the previous equation. gamma *= hs g = A.gcd(B)", "a solution of Ax == 0. \"\"\" m = len(q)", "list of tuples such that Mul(*[i**j for i, j in", "= [(i, j*common_denom) for i, j in residueterms] m =", "coefficient, in which case s is False Ax == u", "in Const(k). # Sum(ci*qqi) is Sum(ci*qi).quo(d), and the remainder is", ">= fd.degree(): # f is the logarithmic derivative in the", "if p.degree(DE.t) > B: return None c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC())", "if DE.case == 'exp': # this re-checking can be avoided", "bd.items()] denom_imag = [value if key[0] % 4 == 1", "n = max([ri.degree() for ri in r]) M = Matrix(n", "n > -b.as_poly().LC()/DE.d.as_poly().LC()): raise NotImplementedError(\"prde_no_cancel_b_equal() is \" \"not yet implemented.\")", "V = [(-a*hn*ga).cancel(gd, include=True) for ga, gd in G] return", "Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj) # where rj", "{'exp', 'tan', 'primitive'} for the hyperexponential, hypertangent, and primitive cases,", "as a # parametric Risch DE problem Fa = Poly(0,", "# f must be simple n, s = splitfactor(fd, DE)", "- r - 1 C = list(v[1: m + 1])", "of all indices of logarithmic monomials of K over C(x)),", "The methods used for solving Parametric Risch Differential Equations parallel", "if case == 'auto': case = DE.case if case ==", "DecrementLevel(DE): t0 = DE.t # k = k0(t0) ba, bd", "[i.as_numer_denom()[1] for i in [j for _, j in residueterms]]", "qm] in k[t]^m and a matrix M with entries in", "but this has # the minimum number of rows. Mqq", "which elements of k(t) produce u. This function uses the", "m, lambda i, j: q[j].nth(i)) A, _ = constant_system(M, zeros(M.rows,", "\" \"coefficients in this case.\") else: n = reduce(ilcm, [i.as_numer_denom()[1]", "is a polynomial if and only if M*Matrix([f1, ..., fm])", "wd), DE.t) A = parametric_log_deriv(pa, pd, wa, wd, DE) if", "j], DE, basic=True)) Rm1 = Rm1.applyfunc(cancel) um1 = cancel(derivation(u[i], DE,", "k(t)^m, and for any solution c1, ..., cm in Const(k)", "k(t) with f weakly normalized with respect to t, return", "= A.nullspace() V = [v for v in V if", "# Sum(ci*qqi) is Sum(ci*qi).quo(d), and the remainder is zero #", "determine that S1irr == Sirr. Furthermore, it will automatically call", "solution', until the structure # theorem version of parametric_log_deriv is", "function name is used to indicate that. f in k(t)", "f*y == Sum(ci*Gi, (i, 1, m)). Given a derivation D", "to solutions z = q/hn of the weakly normalized equation.", "None else: if not all(i.is_Rational for i in u): #", "work with non-rational \" \"coefficients in this case.\") else: n", "any f in K, Df is the logarithmic derivative of", "the right hand side of the equation (i.e., qi in", "for the limited integration problem. Given a derivation D on", "functions (see # Bronstein's book, page 255), so most likely", "== 1, and G = [g1, ..., gm] in k(t)^m,", "when t_i is in E_K/C(x) and deg(Dt_i) == 0 when", "DE) if not s.is_one: pass z = z or Dummy('z')", "except possibly when Dy + f*y == 0 # is", "sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer, bound_degree) from sympy.integrals.risch import (gcdex_diophantine,", "if it cannot determine that S1irr == Sirr. Furthermore, it", "--- = --. --- --- t f i in L", "evaluated at sqrt(-1) without actually evaluating it at sqrt(-1) Separates", "cm, d1, ..., dr] v = V[0]/(-c0) r = len(h)", "and hypertangent \" \"cases have not yet been implemented\") #", "Sum(fi*gi, (i, 1, m)), where f1, ..., fm are elements", "'auto': case = DE.case if case == 'exp': p =", "fields that are computable # via the cancel() function, in", "1) # f had better be 0 in that case.", "j/n) for i, j in zip(argterms, u)])) return (ans, result,", "Const(k) and (c1, ..., cm, d1, ..., dr) is a", "will attempt to determine the type of the derivation automatically.", "i in u) or not A: # If the elements", "None: return None n, e, u = A u *=", "in Const(k) # if and only y0 = Sum(dj*fj, (j,", "n*a*Dp/p)*p**N, g1*p**(N - n), ..., gm*p**(N - n), p**-n) return", "Sum(dj*aji) # (i = 1, ..., m) are then y" ]
[ "[wait_for] if type(confirmation) == str: confirmation = [confirmation] with netmiko.Netmiko(**device)", "with netmiko.Netmiko(**device) as ssh: ssh.enable() result = ssh.send_command_timing( command, strip_prompt=False,", "= devices[0] out = send_cmd_with_prompt( r1, \"copy run start\", wait_for=\"Destination", "print(out) \"\"\" R1#copy run start Destination filename [startup-config]? Building configuration...", "str: confirmation = [confirmation] with netmiko.Netmiko(**device) as ssh: ssh.enable() result", "R1#copy run start Destination filename [startup-config]? Building configuration... [OK] R1#", "wait in result: result += ssh.send_command_timing( confirm, strip_prompt=False, strip_command=False )", "[confirmation] with netmiko.Netmiko(**device) as ssh: ssh.enable() result = ssh.send_command_timing( command,", "= yaml.safe_load(f) r1 = devices[0] out = send_cmd_with_prompt( r1, \"copy", "== str: confirmation = [confirmation] with netmiko.Netmiko(**device) as ssh: ssh.enable()", "command, strip_prompt=False, strip_command=False ) for wait, confirm in zip(wait_for, confirmation):", ") print(out) \"\"\" R1#copy run start Destination filename [startup-config]? Building", "r1 = devices[0] out = send_cmd_with_prompt( r1, \"copy run start\",", "yaml import netmiko import paramiko def send_cmd_with_prompt(device, command, *, wait_for,", "type(confirmation) == str: confirmation = [confirmation] with netmiko.Netmiko(**device) as ssh:", "ssh.send_command_timing( command, strip_prompt=False, strip_command=False ) for wait, confirm in zip(wait_for,", "as f: devices = yaml.safe_load(f) r1 = devices[0] out =", "import netmiko import paramiko def send_cmd_with_prompt(device, command, *, wait_for, confirmation):", "command, *, wait_for, confirmation): if type(wait_for) == str: wait_for =", "pprint import pprint import yaml import netmiko import paramiko def", "+= ssh.send_command_timing( confirm, strip_prompt=False, strip_command=False ) return result if __name__", "send_cmd_with_prompt( r1, \"copy run start\", wait_for=\"Destination filename\", confirmation=\"\\n\" ) print(out)", "= send_cmd_with_prompt( r1, \"copy run start\", wait_for=\"Destination filename\", confirmation=\"\\n\" )", "= [confirmation] with netmiko.Netmiko(**device) as ssh: ssh.enable() result = ssh.send_command_timing(", "\"\"\" R1#copy run start Destination filename [startup-config]? Building configuration... [OK]", "in zip(wait_for, confirmation): if wait in result: result += ssh.send_command_timing(", "ssh: ssh.enable() result = ssh.send_command_timing( command, strip_prompt=False, strip_command=False ) for", "from pprint import pprint import yaml import netmiko import paramiko", "= [wait_for] if type(confirmation) == str: confirmation = [confirmation] with", "result += ssh.send_command_timing( confirm, strip_prompt=False, strip_command=False ) return result if", "for wait, confirm in zip(wait_for, confirmation): if wait in result:", "def send_cmd_with_prompt(device, command, *, wait_for, confirmation): if type(wait_for) == str:", "confirm in zip(wait_for, confirmation): if wait in result: result +=", "run start Destination filename [startup-config]? Building configuration... [OK] R1# \"\"\"", "== \"__main__\": with open(\"devices.yaml\") as f: devices = yaml.safe_load(f) r1", "confirmation): if wait in result: result += ssh.send_command_timing( confirm, strip_prompt=False,", "strip_prompt=False, strip_command=False ) return result if __name__ == \"__main__\": with", "with open(\"devices.yaml\") as f: devices = yaml.safe_load(f) r1 = devices[0]", "wait_for, confirmation): if type(wait_for) == str: wait_for = [wait_for] if", "start\", wait_for=\"Destination filename\", confirmation=\"\\n\" ) print(out) \"\"\" R1#copy run start", ") for wait, confirm in zip(wait_for, confirmation): if wait in", "import pprint import yaml import netmiko import paramiko def send_cmd_with_prompt(device,", "\"__main__\": with open(\"devices.yaml\") as f: devices = yaml.safe_load(f) r1 =", "wait, confirm in zip(wait_for, confirmation): if wait in result: result", "= ssh.send_command_timing( command, strip_prompt=False, strip_command=False ) for wait, confirm in", "run start\", wait_for=\"Destination filename\", confirmation=\"\\n\" ) print(out) \"\"\" R1#copy run", "r1, \"copy run start\", wait_for=\"Destination filename\", confirmation=\"\\n\" ) print(out) \"\"\"", "if __name__ == \"__main__\": with open(\"devices.yaml\") as f: devices =", "strip_command=False ) for wait, confirm in zip(wait_for, confirmation): if wait", "zip(wait_for, confirmation): if wait in result: result += ssh.send_command_timing( confirm,", ") return result if __name__ == \"__main__\": with open(\"devices.yaml\") as", "if wait in result: result += ssh.send_command_timing( confirm, strip_prompt=False, strip_command=False", "result: result += ssh.send_command_timing( confirm, strip_prompt=False, strip_command=False ) return result", "open(\"devices.yaml\") as f: devices = yaml.safe_load(f) r1 = devices[0] out", "result if __name__ == \"__main__\": with open(\"devices.yaml\") as f: devices", "confirm, strip_prompt=False, strip_command=False ) return result if __name__ == \"__main__\":", "import paramiko def send_cmd_with_prompt(device, command, *, wait_for, confirmation): if type(wait_for)", "if type(confirmation) == str: confirmation = [confirmation] with netmiko.Netmiko(**device) as", "ssh.send_command_timing( confirm, strip_prompt=False, strip_command=False ) return result if __name__ ==", "pprint import yaml import netmiko import paramiko def send_cmd_with_prompt(device, command,", "netmiko import paramiko def send_cmd_with_prompt(device, command, *, wait_for, confirmation): if", "str: wait_for = [wait_for] if type(confirmation) == str: confirmation =", "confirmation = [confirmation] with netmiko.Netmiko(**device) as ssh: ssh.enable() result =", "out = send_cmd_with_prompt( r1, \"copy run start\", wait_for=\"Destination filename\", confirmation=\"\\n\"", "f: devices = yaml.safe_load(f) r1 = devices[0] out = send_cmd_with_prompt(", "type(wait_for) == str: wait_for = [wait_for] if type(confirmation) == str:", "\"copy run start\", wait_for=\"Destination filename\", confirmation=\"\\n\" ) print(out) \"\"\" R1#copy", "wait_for=\"Destination filename\", confirmation=\"\\n\" ) print(out) \"\"\" R1#copy run start Destination", "strip_prompt=False, strip_command=False ) for wait, confirm in zip(wait_for, confirmation): if", "in result: result += ssh.send_command_timing( confirm, strip_prompt=False, strip_command=False ) return", "confirmation=\"\\n\" ) print(out) \"\"\" R1#copy run start Destination filename [startup-config]?", "== str: wait_for = [wait_for] if type(confirmation) == str: confirmation", "strip_command=False ) return result if __name__ == \"__main__\": with open(\"devices.yaml\")", "yaml.safe_load(f) r1 = devices[0] out = send_cmd_with_prompt( r1, \"copy run", "*, wait_for, confirmation): if type(wait_for) == str: wait_for = [wait_for]", "__name__ == \"__main__\": with open(\"devices.yaml\") as f: devices = yaml.safe_load(f)", "filename\", confirmation=\"\\n\" ) print(out) \"\"\" R1#copy run start Destination filename", "if type(wait_for) == str: wait_for = [wait_for] if type(confirmation) ==", "confirmation): if type(wait_for) == str: wait_for = [wait_for] if type(confirmation)", "ssh.enable() result = ssh.send_command_timing( command, strip_prompt=False, strip_command=False ) for wait,", "devices[0] out = send_cmd_with_prompt( r1, \"copy run start\", wait_for=\"Destination filename\",", "devices = yaml.safe_load(f) r1 = devices[0] out = send_cmd_with_prompt( r1,", "result = ssh.send_command_timing( command, strip_prompt=False, strip_command=False ) for wait, confirm", "as ssh: ssh.enable() result = ssh.send_command_timing( command, strip_prompt=False, strip_command=False )", "wait_for = [wait_for] if type(confirmation) == str: confirmation = [confirmation]", "netmiko.Netmiko(**device) as ssh: ssh.enable() result = ssh.send_command_timing( command, strip_prompt=False, strip_command=False", "send_cmd_with_prompt(device, command, *, wait_for, confirmation): if type(wait_for) == str: wait_for", "return result if __name__ == \"__main__\": with open(\"devices.yaml\") as f:", "import yaml import netmiko import paramiko def send_cmd_with_prompt(device, command, *,", "paramiko def send_cmd_with_prompt(device, command, *, wait_for, confirmation): if type(wait_for) ==" ]
[ "nested Dictionary into an object with key-values accessibly using attribute", "\"\"\" A class to convert a nested Dictionary into an", "nested dicts (like: AttributeDict.attr.attr) \"\"\" def __init__(self, **entries): self.add_entries(**entries) def", "an object with key-values accessibly using attribute notation (AttributeDict.attribute) instead", "notation (AttributeDict.attribute) instead of key notation (Dict[\"key\"]). This class recursively", "allowing you to recurse down nested dicts (like: AttributeDict.attr.attr) \"\"\"", "in entries.items(): if type(value) is dict: self.__dict__[key] = AttributeDict(**value) else:", "= AttributeDict(**value) else: self.__dict__[key] = value def getAttributes(self): \"\"\" Return", "**entries): for key, value in entries.items(): if type(value) is dict:", "AttributeDict(object): \"\"\" A class to convert a nested Dictionary into", "sets Dicts to objects, allowing you to recurse down nested", "with key-values accessibly using attribute notation (AttributeDict.attribute) instead of key", "add_entries(self, **entries): for key, value in entries.items(): if type(value) is", "instead of key notation (Dict[\"key\"]). This class recursively sets Dicts", "**entries): self.add_entries(**entries) def add_entries(self, **entries): for key, value in entries.items():", "self.__dict__[key] = AttributeDict(**value) else: self.__dict__[key] = value def getAttributes(self): \"\"\"", "if type(value) is dict: self.__dict__[key] = AttributeDict(**value) else: self.__dict__[key] =", "is dict: self.__dict__[key] = AttributeDict(**value) else: self.__dict__[key] = value def", "dict: self.__dict__[key] = AttributeDict(**value) else: self.__dict__[key] = value def getAttributes(self):", "__init__(self, **entries): self.add_entries(**entries) def add_entries(self, **entries): for key, value in", "object with key-values accessibly using attribute notation (AttributeDict.attribute) instead of", "AttributeDict(**value) else: self.__dict__[key] = value def getAttributes(self): \"\"\" Return all", "def getAttributes(self): \"\"\" Return all the attributes of the object", "for key, value in entries.items(): if type(value) is dict: self.__dict__[key]", "entries.items(): if type(value) is dict: self.__dict__[key] = AttributeDict(**value) else: self.__dict__[key]", "getAttributes(self): \"\"\" Return all the attributes of the object \"\"\"", "A class to convert a nested Dictionary into an object", "notation (Dict[\"key\"]). This class recursively sets Dicts to objects, allowing", "value def getAttributes(self): \"\"\" Return all the attributes of the", "key, value in entries.items(): if type(value) is dict: self.__dict__[key] =", "a nested Dictionary into an object with key-values accessibly using", "Dicts to objects, allowing you to recurse down nested dicts", "to recurse down nested dicts (like: AttributeDict.attr.attr) \"\"\" def __init__(self,", "type(value) is dict: self.__dict__[key] = AttributeDict(**value) else: self.__dict__[key] = value", "class to convert a nested Dictionary into an object with", "key-values accessibly using attribute notation (AttributeDict.attribute) instead of key notation", "objects, allowing you to recurse down nested dicts (like: AttributeDict.attr.attr)", "value in entries.items(): if type(value) is dict: self.__dict__[key] = AttributeDict(**value)", "to objects, allowing you to recurse down nested dicts (like:", "else: self.__dict__[key] = value def getAttributes(self): \"\"\" Return all the", "AttributeDict.attr.attr) \"\"\" def __init__(self, **entries): self.add_entries(**entries) def add_entries(self, **entries): for", "\"\"\" Return all the attributes of the object \"\"\" return", "down nested dicts (like: AttributeDict.attr.attr) \"\"\" def __init__(self, **entries): self.add_entries(**entries)", "(Dict[\"key\"]). This class recursively sets Dicts to objects, allowing you", "to convert a nested Dictionary into an object with key-values", "dicts (like: AttributeDict.attr.attr) \"\"\" def __init__(self, **entries): self.add_entries(**entries) def add_entries(self,", "key notation (Dict[\"key\"]). This class recursively sets Dicts to objects,", "using attribute notation (AttributeDict.attribute) instead of key notation (Dict[\"key\"]). This", "<filename>mppi/Utilities/AttributeDict.py<gh_stars>1-10 class AttributeDict(object): \"\"\" A class to convert a nested", "This class recursively sets Dicts to objects, allowing you to", "\"\"\" def __init__(self, **entries): self.add_entries(**entries) def add_entries(self, **entries): for key,", "of key notation (Dict[\"key\"]). This class recursively sets Dicts to", "accessibly using attribute notation (AttributeDict.attribute) instead of key notation (Dict[\"key\"]).", "self.add_entries(**entries) def add_entries(self, **entries): for key, value in entries.items(): if", "you to recurse down nested dicts (like: AttributeDict.attr.attr) \"\"\" def", "convert a nested Dictionary into an object with key-values accessibly", "Dictionary into an object with key-values accessibly using attribute notation", "class recursively sets Dicts to objects, allowing you to recurse", "attribute notation (AttributeDict.attribute) instead of key notation (Dict[\"key\"]). This class", "recurse down nested dicts (like: AttributeDict.attr.attr) \"\"\" def __init__(self, **entries):", "(like: AttributeDict.attr.attr) \"\"\" def __init__(self, **entries): self.add_entries(**entries) def add_entries(self, **entries):", "def __init__(self, **entries): self.add_entries(**entries) def add_entries(self, **entries): for key, value", "class AttributeDict(object): \"\"\" A class to convert a nested Dictionary", "self.__dict__[key] = value def getAttributes(self): \"\"\" Return all the attributes", "Return all the attributes of the object \"\"\" return self.__dict__.keys()", "into an object with key-values accessibly using attribute notation (AttributeDict.attribute)", "(AttributeDict.attribute) instead of key notation (Dict[\"key\"]). This class recursively sets", "recursively sets Dicts to objects, allowing you to recurse down", "def add_entries(self, **entries): for key, value in entries.items(): if type(value)", "= value def getAttributes(self): \"\"\" Return all the attributes of" ]
[ "this: the frequency of the toggling, and the duty cycle.", "red_brightness = 100 green_brightness = 100 blue_brightness = 100 white_brightness", "Written by @MarioTheMaker from sys import stdin, stdout, exit import", "the brightness for each color red_brightness = 100 green_brightness =", "that controls # the LED lights and fan in a", "be how long the pin is high compared with the", "associated with the given id. # If additional arguments are", "PWM output, # as an unsigned 16-bit value in the", "channel g=machine.PWM(machine.Pin(2)); g.freq(20000) # Green channel b=machine.PWM(machine.Pin(1)); b.freq(20000) # Blue", "r.duty_u16(int(red_brightness)*n) g.duty_u16(int(green_brightness)*n) b.duty_u16(int(blue_brightness)*n) w.duty_u16(int(white_brightness)*n) time.sleep(.3) n = n - 5", "Pico microcontroller running # Micropython. # lesson Written by @MarioTheMaker", "high compared with the length of a # single period", "18, 2021 # The Growing Beyond Earth (GBE) control box", "time. # https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#: # control I/O pins # machine.Pin(id, mode=-", "It achieves this by rapidly toggling the pin from low", "I/O pins # machine.Pin(id, mode=- 1, pull=- 1, *, value,", "BOTANIC GARDEN, Oct 18, 2021 # The Growing Beyond Earth", "RASPBERRY PI PICO / MICROPYTHON # FAIRCHILD TROPICAL BOTANIC GARDEN,", "import time #Set the brightness for each color red_brightness =", "w=machine.PWM(machine.Pin(3)); w.freq(20000) # White channel # More info https://docs.micropython.org/en/latest/library/machine.PWM.html #", "channel b=machine.PWM(machine.Pin(1)); b.freq(20000) # Blue channel w=machine.PWM(machine.Pin(3)); w.freq(20000) # White", "GROWNG BEYOND EARTH CONTROL BOX Traning # RASPBERRY PI PICO", "pin is high all of the # time, and minimum", "the current duty cycle of the PWM output, # as", "print(\"Power Level \",n) r.duty_u16(int(red_brightness)*n) g.duty_u16(int(green_brightness)*n) b.duty_u16(int(blue_brightness)*n) w.duty_u16(int(white_brightness)*n) time.sleep(.3) n =", "# Pulse width modulation (PWM) is a way to get", "for each color red_brightness = 100 green_brightness = 100 blue_brightness", "a device that controls # the LED lights and fan", "(GBE) control box is a device that controls # the", "PWM.duty_u16([value]) Get the current duty cycle of the PWM output,", "a way to get an artificial analog output on a", "way to get an artificial analog output on a digital", "parameters # associated with this: the frequency of the toggling,", "GBE growth chamber. It can also control # accessories including", "the duty cycle. # The duty cycle is defined to", "are used to initialise # the pin. Any settings that", "the range 0 to 65535 inclusive. n = 100 while", "100 while n > 0: print(\"Power Level \",n) r.duty_u16(int(red_brightness)*n) g.duty_u16(int(green_brightness)*n)", "box is a device that controls # the LED lights", "accessories including a 12v water pump and environmental sensors. #", "is a device that controls # the LED lights and", "Micropython. # lesson Written by @MarioTheMaker from sys import stdin,", "cycle of the PWM output, # as an unsigned 16-bit", "pin) associated with the given id. # If additional arguments", "sys import stdin, stdout, exit import machine import time #Set", "GARDEN, Oct 18, 2021 # The Growing Beyond Earth (GBE)", "with the length of a # single period (low plus", "to be how long the pin is high compared with", "= 100 white_brightness = 100 # Pulse width modulation (PWM)", "g.freq(20000) # Green channel b=machine.PWM(machine.Pin(1)); b.freq(20000) # Blue channel w=machine.PWM(machine.Pin(3));", "It can also control # accessories including a 12v water", "b.freq(20000) # Blue channel w=machine.PWM(machine.Pin(3)); w.freq(20000) # White channel #", "in the constructor then they are used to initialise #", "is defined to be how long the pin is high", "is high compared with the length of a # single", "to initialise # the pin. Any settings that are not", "Level \",n) r.duty_u16(int(red_brightness)*n) g.duty_u16(int(green_brightness)*n) b.duty_u16(int(blue_brightness)*n) w.duty_u16(int(white_brightness)*n) time.sleep(.3) n = n", "of a # single period (low plus high time). Maximum", "with the given id. # If additional arguments are given", "https://docs.micropython.org/en/latest/library/machine.Pin.html r=machine.PWM(machine.Pin(0)); r.freq(20000) # Red channel g=machine.PWM(machine.Pin(2)); g.freq(20000) # Green", "= 100 green_brightness = 100 blue_brightness = 100 white_brightness =", "when the pin is high all of the # time,", "the pin is high compared with the length of a", "single period (low plus high time). Maximum duty cycle is", "r.freq(20000) # Red channel g=machine.PWM(machine.Pin(2)); g.freq(20000) # Green channel b=machine.PWM(machine.Pin(1));", "# machine.Pin(id, mode=- 1, pull=- 1, *, value, drive, alt)", "channel w=machine.PWM(machine.Pin(3)); w.freq(20000) # White channel # More info https://docs.micropython.org/en/latest/library/machine.PWM.html", "long the pin is high compared with the length of", "12v water pump and environmental sensors. # The device is", "used to initialise # the pin. Any settings that are", "width modulation (PWM) is a way to get an artificial", "in their previous state. # More info https://docs.micropython.org/en/latest/library/machine.Pin.html r=machine.PWM(machine.Pin(0)); r.freq(20000)", "from sys import stdin, stdout, exit import machine import time", "time). Maximum duty cycle is when the pin is high", "g=machine.PWM(machine.Pin(2)); g.freq(20000) # Green channel b=machine.PWM(machine.Pin(1)); b.freq(20000) # Blue channel", "a # single period (low plus high time). Maximum duty", "digital pin. # It achieves this by rapidly toggling the", "achieves this by rapidly toggling the pin from low to", "to 65535 inclusive. n = 100 while n > 0:", "pins # machine.Pin(id, mode=- 1, pull=- 1, *, value, drive,", "of the # time, and minimum is when it is", "info https://docs.micropython.org/en/latest/library/machine.Pin.html r=machine.PWM(machine.Pin(0)); r.freq(20000) # Red channel g=machine.PWM(machine.Pin(2)); g.freq(20000) #", "PICO / MICROPYTHON # FAIRCHILD TROPICAL BOTANIC GARDEN, Oct 18,", "Get the current duty cycle of the PWM output, #", "high. There are two parameters # associated with this: the", "Pi Pico microcontroller running # Micropython. # lesson Written by", "White channel # More info https://docs.micropython.org/en/latest/library/machine.PWM.html # Start a loop", "# The duty cycle is defined to be how long", "#Turn all the lights off time.sleep(3) r.duty_u16(0) g.duty_u16(0) b.duty_u16(0) w.duty_u16(0)", "machine import time #Set the brightness for each color red_brightness", "how long the pin is high compared with the length", "time, and minimum is when it is low all of", "Blue channel w=machine.PWM(machine.Pin(3)); w.freq(20000) # White channel # More info", "100 # Pulse width modulation (PWM) is a way to", "associated with this: the frequency of the toggling, and the", "# If additional arguments are given in the constructor then", "# FAIRCHILD TROPICAL BOTANIC GARDEN, Oct 18, 2021 # The", "initialise # the pin. Any settings that are not specified", "TROPICAL BOTANIC GARDEN, Oct 18, 2021 # The Growing Beyond", "Maximum duty cycle is when the pin is high all", "16-bit value in the range 0 to 65535 inclusive. n", "of the time. # https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#: # control I/O pins #", "by rapidly toggling the pin from low to high. There", "additional arguments are given in the constructor then they are", "device that controls # the LED lights and fan in", "duty cycle. # The duty cycle is defined to be", "# The Growing Beyond Earth (GBE) control box is a", "g.duty_u16(int(green_brightness)*n) b.duty_u16(int(blue_brightness)*n) w.duty_u16(int(white_brightness)*n) time.sleep(.3) n = n - 5 #Turn", "given id. # If additional arguments are given in the", "r=machine.PWM(machine.Pin(0)); r.freq(20000) # Red channel g=machine.PWM(machine.Pin(2)); g.freq(20000) # Green channel", "(low plus high time). Maximum duty cycle is when the", "toggling the pin from low to high. There are two", "an artificial analog output on a digital pin. # It", "in the range 0 to 65535 inclusive. n = 100", "artificial analog output on a digital pin. # It achieves", "Traning # RASPBERRY PI PICO / MICROPYTHON # FAIRCHILD TROPICAL", "each color red_brightness = 100 green_brightness = 100 blue_brightness =", "and fan in a GBE growth chamber. It can also", "# More info https://docs.micropython.org/en/latest/library/machine.PWM.html # Start a loop and change", "are not specified will remain in their previous state. #", "# control I/O pins # machine.Pin(id, mode=- 1, pull=- 1,", "Earth (GBE) control box is a device that controls #", "control # accessories including a 12v water pump and environmental", "- 5 #Turn all the lights off time.sleep(3) r.duty_u16(0) g.duty_u16(0)", "cycle is when the pin is high all of the", "n - 5 #Turn all the lights off time.sleep(3) r.duty_u16(0)", "while n > 0: print(\"Power Level \",n) r.duty_u16(int(red_brightness)*n) g.duty_u16(int(green_brightness)*n) b.duty_u16(int(blue_brightness)*n)", "all of the time. # https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#: # control I/O pins", "Start a loop and change the brightness multiplier \"n\" #", "control I/O pins # machine.Pin(id, mode=- 1, pull=- 1, *,", "arguments are given in the constructor then they are used", "pin peripheral (GPIO pin) associated with the given id. #", "b.duty_u16(int(blue_brightness)*n) w.duty_u16(int(white_brightness)*n) time.sleep(.3) n = n - 5 #Turn all", "lesson Written by @MarioTheMaker from sys import stdin, stdout, exit", "# White channel # More info https://docs.micropython.org/en/latest/library/machine.PWM.html # Start a", "two parameters # associated with this: the frequency of the", "> 0: print(\"Power Level \",n) r.duty_u16(int(red_brightness)*n) g.duty_u16(int(green_brightness)*n) b.duty_u16(int(blue_brightness)*n) w.duty_u16(int(white_brightness)*n) time.sleep(.3)", "brightness for each color red_brightness = 100 green_brightness = 100", "machine.Pin(id, mode=- 1, pull=- 1, *, value, drive, alt) #", "not specified will remain in their previous state. # More", "the toggling, and the duty cycle. # The duty cycle", "The Growing Beyond Earth (GBE) control box is a device", "import stdin, stdout, exit import machine import time #Set the", "# Red channel g=machine.PWM(machine.Pin(2)); g.freq(20000) # Green channel b=machine.PWM(machine.Pin(1)); b.freq(20000)", "mode=- 1, pull=- 1, *, value, drive, alt) # Access", "the pin from low to high. There are two parameters", "given in the constructor then they are used to initialise", "is based on a Raspberry Pi Pico microcontroller running #", "and change the brightness multiplier \"n\" # PWM.duty_u16([value]) Get the", "toggling, and the duty cycle. # The duty cycle is", "# More info https://docs.micropython.org/en/latest/library/machine.Pin.html r=machine.PWM(machine.Pin(0)); r.freq(20000) # Red channel g=machine.PWM(machine.Pin(2));", "white_brightness = 100 # Pulse width modulation (PWM) is a", "growth chamber. It can also control # accessories including a", "the pin is high all of the # time, and", "plus high time). Maximum duty cycle is when the pin", "loop and change the brightness multiplier \"n\" # PWM.duty_u16([value]) Get", "get an artificial analog output on a digital pin. #", "More info https://docs.micropython.org/en/latest/library/machine.PWM.html # Start a loop and change the", "inclusive. n = 100 while n > 0: print(\"Power Level", "that are not specified will remain in their previous state.", "2021 # The Growing Beyond Earth (GBE) control box is", "state. # More info https://docs.micropython.org/en/latest/library/machine.Pin.html r=machine.PWM(machine.Pin(0)); r.freq(20000) # Red channel", "peripheral (GPIO pin) associated with the given id. # If", "id. # If additional arguments are given in the constructor", "EARTH CONTROL BOX Traning # RASPBERRY PI PICO / MICROPYTHON", "# lesson Written by @MarioTheMaker from sys import stdin, stdout,", "@MarioTheMaker from sys import stdin, stdout, exit import machine import", "all of the # time, and minimum is when it", "unsigned 16-bit value in the range 0 to 65535 inclusive.", "1, pull=- 1, *, value, drive, alt) # Access the", "# the LED lights and fan in a GBE growth", "a Raspberry Pi Pico microcontroller running # Micropython. # lesson", "duty cycle is defined to be how long the pin", "is a way to get an artificial analog output on", "sensors. # The device is based on a Raspberry Pi", "defined to be how long the pin is high compared", "analog output on a digital pin. # It achieves this", "high time). Maximum duty cycle is when the pin is", "The device is based on a Raspberry Pi Pico microcontroller", "channel # More info https://docs.micropython.org/en/latest/library/machine.PWM.html # Start a loop and", "on a digital pin. # It achieves this by rapidly", "they are used to initialise # the pin. Any settings", "pull=- 1, *, value, drive, alt) # Access the pin", "import machine import time #Set the brightness for each color", "Access the pin peripheral (GPIO pin) associated with the given", "65535 inclusive. n = 100 while n > 0: print(\"Power", "The duty cycle is defined to be how long the", "length of a # single period (low plus high time).", "= 100 while n > 0: print(\"Power Level \",n) r.duty_u16(int(red_brightness)*n)", "time #Set the brightness for each color red_brightness = 100", "to get an artificial analog output on a digital pin.", "color red_brightness = 100 green_brightness = 100 blue_brightness = 100", "change the brightness multiplier \"n\" # PWM.duty_u16([value]) Get the current", "# Access the pin peripheral (GPIO pin) associated with the", "as an unsigned 16-bit value in the range 0 to", "value, drive, alt) # Access the pin peripheral (GPIO pin)", "multiplier \"n\" # PWM.duty_u16([value]) Get the current duty cycle of", "BEYOND EARTH CONTROL BOX Traning # RASPBERRY PI PICO /", "settings that are not specified will remain in their previous", "https://docs.micropython.org/en/latest/library/machine.PWM.html # Start a loop and change the brightness multiplier", "minimum is when it is low all of the time.", "modulation (PWM) is a way to get an artificial analog", "# GROWNG BEYOND EARTH CONTROL BOX Traning # RASPBERRY PI", "then they are used to initialise # the pin. Any", "water pump and environmental sensors. # The device is based", "LED lights and fan in a GBE growth chamber. It", "a loop and change the brightness multiplier \"n\" # PWM.duty_u16([value])", "pin. Any settings that are not specified will remain in", "microcontroller running # Micropython. # lesson Written by @MarioTheMaker from", "pin is high compared with the length of a #", "the frequency of the toggling, and the duty cycle. #", "If additional arguments are given in the constructor then they", "range 0 to 65535 inclusive. n = 100 while n", "duty cycle is when the pin is high all of", "Raspberry Pi Pico microcontroller running # Micropython. # lesson Written", "the pin peripheral (GPIO pin) associated with the given id.", "5 #Turn all the lights off time.sleep(3) r.duty_u16(0) g.duty_u16(0) b.duty_u16(0)", "info https://docs.micropython.org/en/latest/library/machine.PWM.html # Start a loop and change the brightness", "value in the range 0 to 65535 inclusive. n =", "the LED lights and fan in a GBE growth chamber.", "when it is low all of the time. # https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#:", "= n - 5 #Turn all the lights off time.sleep(3)", "# associated with this: the frequency of the toggling, and", "n = n - 5 #Turn all the lights off", "PI PICO / MICROPYTHON # FAIRCHILD TROPICAL BOTANIC GARDEN, Oct", "low to high. There are two parameters # associated with", "can also control # accessories including a 12v water pump", "device is based on a Raspberry Pi Pico microcontroller running", "cycle. # The duty cycle is defined to be how", "fan in a GBE growth chamber. It can also control", "period (low plus high time). Maximum duty cycle is when", "will remain in their previous state. # More info https://docs.micropython.org/en/latest/library/machine.Pin.html", "with this: the frequency of the toggling, and the duty", "on a Raspberry Pi Pico microcontroller running # Micropython. #", "stdout, exit import machine import time #Set the brightness for", "control box is a device that controls # the LED", "is when it is low all of the time. #", "it is low all of the time. # https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#: #", "# Micropython. # lesson Written by @MarioTheMaker from sys import", "pin. # It achieves this by rapidly toggling the pin", "CONTROL BOX Traning # RASPBERRY PI PICO / MICROPYTHON #", "100 green_brightness = 100 blue_brightness = 100 white_brightness = 100", "# Start a loop and change the brightness multiplier \"n\"", "drive, alt) # Access the pin peripheral (GPIO pin) associated", "Red channel g=machine.PWM(machine.Pin(2)); g.freq(20000) # Green channel b=machine.PWM(machine.Pin(1)); b.freq(20000) #", "# PWM.duty_u16([value]) Get the current duty cycle of the PWM", "to high. There are two parameters # associated with this:", "100 blue_brightness = 100 white_brightness = 100 # Pulse width", "w.duty_u16(int(white_brightness)*n) time.sleep(.3) n = n - 5 #Turn all the", "https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#: # control I/O pins # machine.Pin(id, mode=- 1, pull=-", "specified will remain in their previous state. # More info", "and minimum is when it is low all of the", "the constructor then they are used to initialise # the", "of the PWM output, # as an unsigned 16-bit value", "MICROPYTHON # FAIRCHILD TROPICAL BOTANIC GARDEN, Oct 18, 2021 #", "BOX Traning # RASPBERRY PI PICO / MICROPYTHON # FAIRCHILD", "# Green channel b=machine.PWM(machine.Pin(1)); b.freq(20000) # Blue channel w=machine.PWM(machine.Pin(3)); w.freq(20000)", "output on a digital pin. # It achieves this by", "duty cycle of the PWM output, # as an unsigned", "remain in their previous state. # More info https://docs.micropython.org/en/latest/library/machine.Pin.html r=machine.PWM(machine.Pin(0));", "\"n\" # PWM.duty_u16([value]) Get the current duty cycle of the", "cycle is defined to be how long the pin is", "time.sleep(.3) n = n - 5 #Turn all the lights", "constructor then they are used to initialise # the pin.", "lights and fan in a GBE growth chamber. It can", "Oct 18, 2021 # The Growing Beyond Earth (GBE) control", "# Blue channel w=machine.PWM(machine.Pin(3)); w.freq(20000) # White channel # More", "the brightness multiplier \"n\" # PWM.duty_u16([value]) Get the current duty", "is when the pin is high all of the #", "# time, and minimum is when it is low all", "1, *, value, drive, alt) # Access the pin peripheral", "blue_brightness = 100 white_brightness = 100 # Pulse width modulation", "chamber. It can also control # accessories including a 12v", "current duty cycle of the PWM output, # as an", "Growing Beyond Earth (GBE) control box is a device that", "pump and environmental sensors. # The device is based on", "output, # as an unsigned 16-bit value in the range", "n = 100 while n > 0: print(\"Power Level \",n)", "is low all of the time. # https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#: # control", "More info https://docs.micropython.org/en/latest/library/machine.Pin.html r=machine.PWM(machine.Pin(0)); r.freq(20000) # Red channel g=machine.PWM(machine.Pin(2)); g.freq(20000)", "# the pin. Any settings that are not specified will", "this by rapidly toggling the pin from low to high.", "also control # accessories including a 12v water pump and", "a digital pin. # It achieves this by rapidly toggling", "# The device is based on a Raspberry Pi Pico", "# RASPBERRY PI PICO / MICROPYTHON # FAIRCHILD TROPICAL BOTANIC", "the given id. # If additional arguments are given in", "alt) # Access the pin peripheral (GPIO pin) associated with", "rapidly toggling the pin from low to high. There are", "(GPIO pin) associated with the given id. # If additional", "= 100 blue_brightness = 100 white_brightness = 100 # Pulse", "100 white_brightness = 100 # Pulse width modulation (PWM) is", "stdin, stdout, exit import machine import time #Set the brightness", "their previous state. # More info https://docs.micropython.org/en/latest/library/machine.Pin.html r=machine.PWM(machine.Pin(0)); r.freq(20000) #", "high all of the # time, and minimum is when", "of the toggling, and the duty cycle. # The duty", "running # Micropython. # lesson Written by @MarioTheMaker from sys", "b=machine.PWM(machine.Pin(1)); b.freq(20000) # Blue channel w=machine.PWM(machine.Pin(3)); w.freq(20000) # White channel", "the PWM output, # as an unsigned 16-bit value in", "/ MICROPYTHON # FAIRCHILD TROPICAL BOTANIC GARDEN, Oct 18, 2021", "#Set the brightness for each color red_brightness = 100 green_brightness", "Pulse width modulation (PWM) is a way to get an", "There are two parameters # associated with this: the frequency", "a 12v water pump and environmental sensors. # The device", "including a 12v water pump and environmental sensors. # The", "previous state. # More info https://docs.micropython.org/en/latest/library/machine.Pin.html r=machine.PWM(machine.Pin(0)); r.freq(20000) # Red", "the # time, and minimum is when it is low", "frequency of the toggling, and the duty cycle. # The", "Green channel b=machine.PWM(machine.Pin(1)); b.freq(20000) # Blue channel w=machine.PWM(machine.Pin(3)); w.freq(20000) #", "(PWM) is a way to get an artificial analog output", "the time. # https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#: # control I/O pins # machine.Pin(id,", "# accessories including a 12v water pump and environmental sensors.", "in a GBE growth chamber. It can also control #", "*, value, drive, alt) # Access the pin peripheral (GPIO", "Any settings that are not specified will remain in their", "0 to 65535 inclusive. n = 100 while n >", "compared with the length of a # single period (low", "the length of a # single period (low plus high", "0: print(\"Power Level \",n) r.duty_u16(int(red_brightness)*n) g.duty_u16(int(green_brightness)*n) b.duty_u16(int(blue_brightness)*n) w.duty_u16(int(white_brightness)*n) time.sleep(.3) n", "by @MarioTheMaker from sys import stdin, stdout, exit import machine", "pin from low to high. There are two parameters #", "an unsigned 16-bit value in the range 0 to 65535", "from low to high. There are two parameters # associated", "# single period (low plus high time). Maximum duty cycle", "# as an unsigned 16-bit value in the range 0", "exit import machine import time #Set the brightness for each", "green_brightness = 100 blue_brightness = 100 white_brightness = 100 #", "the pin. Any settings that are not specified will remain", "controls # the LED lights and fan in a GBE", "n > 0: print(\"Power Level \",n) r.duty_u16(int(red_brightness)*n) g.duty_u16(int(green_brightness)*n) b.duty_u16(int(blue_brightness)*n) w.duty_u16(int(white_brightness)*n)", "and environmental sensors. # The device is based on a", "low all of the time. # https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#: # control I/O", "is high all of the # time, and minimum is", "# It achieves this by rapidly toggling the pin from", "\",n) r.duty_u16(int(red_brightness)*n) g.duty_u16(int(green_brightness)*n) b.duty_u16(int(blue_brightness)*n) w.duty_u16(int(white_brightness)*n) time.sleep(.3) n = n -", "are two parameters # associated with this: the frequency of", "based on a Raspberry Pi Pico microcontroller running # Micropython.", "environmental sensors. # The device is based on a Raspberry", "are given in the constructor then they are used to", "w.freq(20000) # White channel # More info https://docs.micropython.org/en/latest/library/machine.PWM.html # Start", "a GBE growth chamber. It can also control # accessories", "brightness multiplier \"n\" # PWM.duty_u16([value]) Get the current duty cycle", "FAIRCHILD TROPICAL BOTANIC GARDEN, Oct 18, 2021 # The Growing", "Beyond Earth (GBE) control box is a device that controls", "= 100 # Pulse width modulation (PWM) is a way", "# https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#: # control I/O pins # machine.Pin(id, mode=- 1,", "and the duty cycle. # The duty cycle is defined" ]
[ "KNOWN_BUGS = {MASTER_YAML_KNOWN_BUGS_KEY: []} class BugSearchDef(SearchDef): def __init__(self, pattern, bug_id,", "master yaml. Note that this can only be called once", "[] for idx in self.reason_format_result_groups: values.append(search_result.get(idx)) return self._reason.format(*values) return self._reason", "a final part after all others have executed. \"\"\" bugs", "it exists and add new bug with description of the", "has been flagged. This string can be a template i.e.", "others have executed. \"\"\" bugs = _get_known_bugs() if bugs and", "bug with description of the bug. \"\"\" if not os.path.isdir(constants.PLUGIN_TMP_DIR):", "i.e. containing {} fields that can be rendered using results.", "if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY): return bugs return {} def add_known_bug(bug_id,", "with description of the bug. \"\"\" if not os.path.isdir(constants.PLUGIN_TMP_DIR): raise", "it exists and return its contents or None if it", "reason string is a template, this is a list of", "description=None, type=LAUNCHPAD): \"\"\" Fetch the current plugin known_bugs.yaml if it", "all others have executed. \"\"\" bugs = _get_known_bugs() if bugs", "current plugin known_bugs.yaml if it exists and return its contents", "for inclusion in the reason. \"\"\" super().__init__(pattern, tag=bug_id, hint=hint) self._reason", "dir '{}' not found\". format(constants.PLUGIN_TMP_DIR)) if type == LAUNCHPAD: new_bug", "__init__(self, pattern, bug_id, hint, reason, reason_format_result_groups=None): \"\"\" @param reason: string", "import constants from core.searchtools import SearchDef from core.issues.issue_utils import IssueEntry", "string can be a template i.e. containing {} fields that", "<gh_stars>0 import os import yaml from core import plugintools from", "the current plugin known_bugs.yaml if it exists and return its", "def add_known_bugs_to_master_plugin(): \"\"\" Fetch the current plugin known_bugs.yaml and add", "executed. \"\"\" bugs = _get_known_bugs() if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY): plugintools.save_part(bugs,", "@param reason: string reason describing the issue and why it", "constants from core.searchtools import SearchDef from core.issues.issue_utils import IssueEntry LAUNCHPAD", "doesn't exist yet. \"\"\" if not os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception(\"plugin tmp", "if it exists and add new bug with description of", "LAUNCHPAD = \"launchpad\" MASTER_YAML_KNOWN_BUGS_KEY = \"bugs-detected\" KNOWN_BUGS = {MASTER_YAML_KNOWN_BUGS_KEY: []}", "{MASTER_YAML_KNOWN_BUGS_KEY: [entry.data]} known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") with open(known_bugs_yaml, 'w') as", "plugin known_bugs.yaml if it exists and add new bug with", "template, this is a list of indexes in the results", "type=LAUNCHPAD): \"\"\" Fetch the current plugin known_bugs.yaml if it exists", "as a final part after all others have executed. \"\"\"", "final part after all others have executed. \"\"\" bugs =", "description, key=\"id\") current = _get_known_bugs() if current and current.get(MASTER_YAML_KNOWN_BUGS_KEY): current[MASTER_YAML_KNOWN_BUGS_KEY].append(entry.data)", "{MASTER_YAML_KNOWN_BUGS_KEY: []} class BugSearchDef(SearchDef): def __init__(self, pattern, bug_id, hint, reason,", "reason. \"\"\" super().__init__(pattern, tag=bug_id, hint=hint) self._reason = reason if reason", "plugintools from core import constants from core.searchtools import SearchDef from", "\"\"\" Fetch the current plugin known_bugs.yaml if it exists and", "= os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") if not os.path.exists(known_bugs_yaml): return {} bugs =", "template i.e. containing {} fields that can be rendered using", "this can only be called once per plugin and is", "called once per plugin and is typically performed as a", "raise Exception(\"plugin tmp dir '{}' not found\". format(constants.PLUGIN_TMP_DIR)) known_bugs_yaml =", "Note that this can only be called once per plugin", "import os import yaml from core import plugintools from core", "self._reason = \"\" self.reason_format_result_groups = reason_format_result_groups @property def reason(self): return", "def add_known_bug(bug_id, description=None, type=LAUNCHPAD): \"\"\" Fetch the current plugin known_bugs.yaml", "current plugin known_bugs.yaml if it exists and add new bug", "the issue and why it has been flagged. This string", "yaml. Note that this can only be called once per", "= \"https://bugs.launchpad.net/bugs/{}\".format(bug_id) if description is None: description = \"no description", "new bug with description of the bug. \"\"\" if not", "have executed. \"\"\" bugs = _get_known_bugs() if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY):", "\"\"\" Fetch the current plugin known_bugs.yaml and add it to", "results. @param reason_format_result_groups: if the reason string is a template,", "part after all others have executed. \"\"\" bugs = _get_known_bugs()", "that this can only be called once per plugin and", "is a list of indexes in the results that can", "return {} bugs = yaml.safe_load(open(known_bugs_yaml)) if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY): return", "current and current.get(MASTER_YAML_KNOWN_BUGS_KEY): current[MASTER_YAML_KNOWN_BUGS_KEY].append(entry.data) else: current = {MASTER_YAML_KNOWN_BUGS_KEY: [entry.data]} known_bugs_yaml", "it to the master yaml. Note that this can only", "= yaml.safe_load(open(known_bugs_yaml)) if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY): return bugs return {}", "open(known_bugs_yaml, 'w') as fd: fd.write(yaml.dump(current)) def add_known_bugs_to_master_plugin(): \"\"\" Fetch the", "hint=hint) self._reason = reason if reason is None: self._reason =", "= \"no description provided\" entry = IssueEntry(new_bug, description, key=\"id\") current", "'{}' not found\". format(constants.PLUGIN_TMP_DIR)) if type == LAUNCHPAD: new_bug =", "why it has been flagged. This string can be a", "format(constants.PLUGIN_TMP_DIR)) if type == LAUNCHPAD: new_bug = \"https://bugs.launchpad.net/bugs/{}\".format(bug_id) if description", "super().__init__(pattern, tag=bug_id, hint=hint) self._reason = reason if reason is None:", "type == LAUNCHPAD: new_bug = \"https://bugs.launchpad.net/bugs/{}\".format(bug_id) if description is None:", "if it exists and return its contents or None if", "== LAUNCHPAD: new_bug = \"https://bugs.launchpad.net/bugs/{}\".format(bug_id) if description is None: description", "from core.searchtools import SearchDef from core.issues.issue_utils import IssueEntry LAUNCHPAD =", "return self._reason def _get_known_bugs(): \"\"\" Fetch the current plugin known_bugs.yaml", "'w') as fd: fd.write(yaml.dump(current)) def add_known_bugs_to_master_plugin(): \"\"\" Fetch the current", "bugs.get(MASTER_YAML_KNOWN_BUGS_KEY): return bugs return {} def add_known_bug(bug_id, description=None, type=LAUNCHPAD): \"\"\"", "Fetch the current plugin known_bugs.yaml if it exists and return", "core.issues.issue_utils import IssueEntry LAUNCHPAD = \"launchpad\" MASTER_YAML_KNOWN_BUGS_KEY = \"bugs-detected\" KNOWN_BUGS", "pattern, bug_id, hint, reason, reason_format_result_groups=None): \"\"\" @param reason: string reason", "and why it has been flagged. This string can be", "the current plugin known_bugs.yaml if it exists and add new", "class BugSearchDef(SearchDef): def __init__(self, pattern, bug_id, hint, reason, reason_format_result_groups=None): \"\"\"", "self.reason_format_result_groups: values = [] for idx in self.reason_format_result_groups: values.append(search_result.get(idx)) return", "[entry.data]} known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") with open(known_bugs_yaml, 'w') as fd:", "from core.issues.issue_utils import IssueEntry LAUNCHPAD = \"launchpad\" MASTER_YAML_KNOWN_BUGS_KEY = \"bugs-detected\"", "return bugs return {} def add_known_bug(bug_id, description=None, type=LAUNCHPAD): \"\"\" Fetch", "fd: fd.write(yaml.dump(current)) def add_known_bugs_to_master_plugin(): \"\"\" Fetch the current plugin known_bugs.yaml", "current plugin known_bugs.yaml and add it to the master yaml.", "\"\"\" bugs = _get_known_bugs() if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY): plugintools.save_part(bugs, priority=99)", "self._reason.format(*values) return self._reason def _get_known_bugs(): \"\"\" Fetch the current plugin", "if not os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception(\"plugin tmp dir '{}' not found\".", "= \"bugs-detected\" KNOWN_BUGS = {MASTER_YAML_KNOWN_BUGS_KEY: []} class BugSearchDef(SearchDef): def __init__(self,", "_get_known_bugs(): \"\"\" Fetch the current plugin known_bugs.yaml if it exists", "\"known_bugs.yaml\") with open(known_bugs_yaml, 'w') as fd: fd.write(yaml.dump(current)) def add_known_bugs_to_master_plugin(): \"\"\"", "reason is None: self._reason = \"\" self.reason_format_result_groups = reason_format_result_groups @property", "os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception(\"plugin tmp dir '{}' not found\". format(constants.PLUGIN_TMP_DIR)) known_bugs_yaml", "\"\"\" @param reason: string reason describing the issue and why", "can be rendered using results. @param reason_format_result_groups: if the reason", "reason_format_result_groups: if the reason string is a template, this is", "yet. \"\"\" if not os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception(\"plugin tmp dir '{}'", "this is a list of indexes in the results that", "description provided\" entry = IssueEntry(new_bug, description, key=\"id\") current = _get_known_bugs()", "self._reason def rendered_reason(self, search_result): if self._reason and self.reason_format_result_groups: values =", "string reason describing the issue and why it has been", "self.reason_format_result_groups: values.append(search_result.get(idx)) return self._reason.format(*values) return self._reason def _get_known_bugs(): \"\"\" Fetch", "and self.reason_format_result_groups: values = [] for idx in self.reason_format_result_groups: values.append(search_result.get(idx))", "None: self._reason = \"\" self.reason_format_result_groups = reason_format_result_groups @property def reason(self):", "if not os.path.exists(known_bugs_yaml): return {} bugs = yaml.safe_load(open(known_bugs_yaml)) if bugs", "reason(self): return self._reason def rendered_reason(self, search_result): if self._reason and self.reason_format_result_groups:", "from core import constants from core.searchtools import SearchDef from core.issues.issue_utils", "list of indexes in the results that can be extracted", "import yaml from core import plugintools from core import constants", "self._reason def _get_known_bugs(): \"\"\" Fetch the current plugin known_bugs.yaml if", "import SearchDef from core.issues.issue_utils import IssueEntry LAUNCHPAD = \"launchpad\" MASTER_YAML_KNOWN_BUGS_KEY", "indexes in the results that can be extracted for inclusion", "'{}' not found\". format(constants.PLUGIN_TMP_DIR)) known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") if not", "as fd: fd.write(yaml.dump(current)) def add_known_bugs_to_master_plugin(): \"\"\" Fetch the current plugin", "can be a template i.e. containing {} fields that can", "reason_format_result_groups @property def reason(self): return self._reason def rendered_reason(self, search_result): if", "from core import plugintools from core import constants from core.searchtools", "return self._reason.format(*values) return self._reason def _get_known_bugs(): \"\"\" Fetch the current", "of the bug. \"\"\" if not os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception(\"plugin tmp", "tag=bug_id, hint=hint) self._reason = reason if reason is None: self._reason", "= reason_format_result_groups @property def reason(self): return self._reason def rendered_reason(self, search_result):", "self._reason and self.reason_format_result_groups: values = [] for idx in self.reason_format_result_groups:", "plugin and is typically performed as a final part after", "return self._reason def rendered_reason(self, search_result): if self._reason and self.reason_format_result_groups: values", "reason: string reason describing the issue and why it has", "if description is None: description = \"no description provided\" entry", "the results that can be extracted for inclusion in the", "that can be extracted for inclusion in the reason. \"\"\"", "if reason is None: self._reason = \"\" self.reason_format_result_groups = reason_format_result_groups", "yaml from core import plugintools from core import constants from", "if the reason string is a template, this is a", "provided\" entry = IssueEntry(new_bug, description, key=\"id\") current = _get_known_bugs() if", "not os.path.exists(known_bugs_yaml): return {} bugs = yaml.safe_load(open(known_bugs_yaml)) if bugs and", "self.reason_format_result_groups = reason_format_result_groups @property def reason(self): return self._reason def rendered_reason(self,", "current[MASTER_YAML_KNOWN_BUGS_KEY].append(entry.data) else: current = {MASTER_YAML_KNOWN_BUGS_KEY: [entry.data]} known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\")", "bug_id, hint, reason, reason_format_result_groups=None): \"\"\" @param reason: string reason describing", "a template i.e. containing {} fields that can be rendered", "os import yaml from core import plugintools from core import", "reason, reason_format_result_groups=None): \"\"\" @param reason: string reason describing the issue", "IssueEntry(new_bug, description, key=\"id\") current = _get_known_bugs() if current and current.get(MASTER_YAML_KNOWN_BUGS_KEY):", "a template, this is a list of indexes in the", "add new bug with description of the bug. \"\"\" if", "key=\"id\") current = _get_known_bugs() if current and current.get(MASTER_YAML_KNOWN_BUGS_KEY): current[MASTER_YAML_KNOWN_BUGS_KEY].append(entry.data) else:", "and current.get(MASTER_YAML_KNOWN_BUGS_KEY): current[MASTER_YAML_KNOWN_BUGS_KEY].append(entry.data) else: current = {MASTER_YAML_KNOWN_BUGS_KEY: [entry.data]} known_bugs_yaml =", "current.get(MASTER_YAML_KNOWN_BUGS_KEY): current[MASTER_YAML_KNOWN_BUGS_KEY].append(entry.data) else: current = {MASTER_YAML_KNOWN_BUGS_KEY: [entry.data]} known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR,", "and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY): return bugs return {} def add_known_bug(bug_id, description=None, type=LAUNCHPAD):", "using results. @param reason_format_result_groups: if the reason string is a", "import IssueEntry LAUNCHPAD = \"launchpad\" MASTER_YAML_KNOWN_BUGS_KEY = \"bugs-detected\" KNOWN_BUGS =", "a list of indexes in the results that can be", "if it doesn't exist yet. \"\"\" if not os.path.isdir(constants.PLUGIN_TMP_DIR): raise", "that can be rendered using results. @param reason_format_result_groups: if the", "SearchDef from core.issues.issue_utils import IssueEntry LAUNCHPAD = \"launchpad\" MASTER_YAML_KNOWN_BUGS_KEY =", "with open(known_bugs_yaml, 'w') as fd: fd.write(yaml.dump(current)) def add_known_bugs_to_master_plugin(): \"\"\" Fetch", "its contents or None if it doesn't exist yet. \"\"\"", "exists and return its contents or None if it doesn't", "IssueEntry LAUNCHPAD = \"launchpad\" MASTER_YAML_KNOWN_BUGS_KEY = \"bugs-detected\" KNOWN_BUGS = {MASTER_YAML_KNOWN_BUGS_KEY:", "dir '{}' not found\". format(constants.PLUGIN_TMP_DIR)) known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") if", "hint, reason, reason_format_result_groups=None): \"\"\" @param reason: string reason describing the", "self._reason = reason if reason is None: self._reason = \"\"", "{} bugs = yaml.safe_load(open(known_bugs_yaml)) if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY): return bugs", "found\". format(constants.PLUGIN_TMP_DIR)) if type == LAUNCHPAD: new_bug = \"https://bugs.launchpad.net/bugs/{}\".format(bug_id) if", "= {MASTER_YAML_KNOWN_BUGS_KEY: [entry.data]} known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") with open(known_bugs_yaml, 'w')", "found\". format(constants.PLUGIN_TMP_DIR)) known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") if not os.path.exists(known_bugs_yaml): return", "\"known_bugs.yaml\") if not os.path.exists(known_bugs_yaml): return {} bugs = yaml.safe_load(open(known_bugs_yaml)) if", "issue and why it has been flagged. This string can", "inclusion in the reason. \"\"\" super().__init__(pattern, tag=bug_id, hint=hint) self._reason =", "and return its contents or None if it doesn't exist", "@param reason_format_result_groups: if the reason string is a template, this", "plugin known_bugs.yaml if it exists and return its contents or", "os.path.exists(known_bugs_yaml): return {} bugs = yaml.safe_load(open(known_bugs_yaml)) if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY):", "bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY): return bugs return {} def add_known_bug(bug_id, description=None,", "the bug. \"\"\" if not os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception(\"plugin tmp dir", "not found\". format(constants.PLUGIN_TMP_DIR)) if type == LAUNCHPAD: new_bug = \"https://bugs.launchpad.net/bugs/{}\".format(bug_id)", "bug. \"\"\" if not os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception(\"plugin tmp dir '{}'", "the reason string is a template, this is a list", "fd.write(yaml.dump(current)) def add_known_bugs_to_master_plugin(): \"\"\" Fetch the current plugin known_bugs.yaml and", "def _get_known_bugs(): \"\"\" Fetch the current plugin known_bugs.yaml if it", "@property def reason(self): return self._reason def rendered_reason(self, search_result): if self._reason", "format(constants.PLUGIN_TMP_DIR)) known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") if not os.path.exists(known_bugs_yaml): return {}", "values = [] for idx in self.reason_format_result_groups: values.append(search_result.get(idx)) return self._reason.format(*values)", "= os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") with open(known_bugs_yaml, 'w') as fd: fd.write(yaml.dump(current)) def", "in the reason. \"\"\" super().__init__(pattern, tag=bug_id, hint=hint) self._reason = reason", "performed as a final part after all others have executed.", "\"no description provided\" entry = IssueEntry(new_bug, description, key=\"id\") current =", "the reason. \"\"\" super().__init__(pattern, tag=bug_id, hint=hint) self._reason = reason if", "[]} class BugSearchDef(SearchDef): def __init__(self, pattern, bug_id, hint, reason, reason_format_result_groups=None):", "it has been flagged. This string can be a template", "known_bugs.yaml if it exists and add new bug with description", "and add new bug with description of the bug. \"\"\"", "for idx in self.reason_format_result_groups: values.append(search_result.get(idx)) return self._reason.format(*values) return self._reason def", "exists and add new bug with description of the bug.", "describing the issue and why it has been flagged. This", "= [] for idx in self.reason_format_result_groups: values.append(search_result.get(idx)) return self._reason.format(*values) return", "known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") with open(known_bugs_yaml, 'w') as fd: fd.write(yaml.dump(current))", "and is typically performed as a final part after all", "known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") if not os.path.exists(known_bugs_yaml): return {} bugs", "add_known_bug(bug_id, description=None, type=LAUNCHPAD): \"\"\" Fetch the current plugin known_bugs.yaml if", "\"https://bugs.launchpad.net/bugs/{}\".format(bug_id) if description is None: description = \"no description provided\"", "containing {} fields that can be rendered using results. @param", "and add it to the master yaml. Note that this", "None: description = \"no description provided\" entry = IssueEntry(new_bug, description,", "import plugintools from core import constants from core.searchtools import SearchDef", "\"launchpad\" MASTER_YAML_KNOWN_BUGS_KEY = \"bugs-detected\" KNOWN_BUGS = {MASTER_YAML_KNOWN_BUGS_KEY: []} class BugSearchDef(SearchDef):", "core import constants from core.searchtools import SearchDef from core.issues.issue_utils import", "current = {MASTER_YAML_KNOWN_BUGS_KEY: [entry.data]} known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") with open(known_bugs_yaml,", "if current and current.get(MASTER_YAML_KNOWN_BUGS_KEY): current[MASTER_YAML_KNOWN_BUGS_KEY].append(entry.data) else: current = {MASTER_YAML_KNOWN_BUGS_KEY: [entry.data]}", "= \"\" self.reason_format_result_groups = reason_format_result_groups @property def reason(self): return self._reason", "add it to the master yaml. Note that this can", "to the master yaml. Note that this can only be", "can only be called once per plugin and is typically", "is a template, this is a list of indexes in", "rendered using results. @param reason_format_result_groups: if the reason string is", "extracted for inclusion in the reason. \"\"\" super().__init__(pattern, tag=bug_id, hint=hint)", "after all others have executed. \"\"\" bugs = _get_known_bugs() if", "string is a template, this is a list of indexes", "def __init__(self, pattern, bug_id, hint, reason, reason_format_result_groups=None): \"\"\" @param reason:", "in self.reason_format_result_groups: values.append(search_result.get(idx)) return self._reason.format(*values) return self._reason def _get_known_bugs(): \"\"\"", "This string can be a template i.e. containing {} fields", "\"bugs-detected\" KNOWN_BUGS = {MASTER_YAML_KNOWN_BUGS_KEY: []} class BugSearchDef(SearchDef): def __init__(self, pattern,", "\"\"\" super().__init__(pattern, tag=bug_id, hint=hint) self._reason = reason if reason is", "def rendered_reason(self, search_result): if self._reason and self.reason_format_result_groups: values = []", "description = \"no description provided\" entry = IssueEntry(new_bug, description, key=\"id\")", "{} def add_known_bug(bug_id, description=None, type=LAUNCHPAD): \"\"\" Fetch the current plugin", "typically performed as a final part after all others have", "contents or None if it doesn't exist yet. \"\"\" if", "\"\"\" if not os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception(\"plugin tmp dir '{}' not", "is None: self._reason = \"\" self.reason_format_result_groups = reason_format_result_groups @property def", "None if it doesn't exist yet. \"\"\" if not os.path.isdir(constants.PLUGIN_TMP_DIR):", "the master yaml. Note that this can only be called", "add_known_bugs_to_master_plugin(): \"\"\" Fetch the current plugin known_bugs.yaml and add it", "= {MASTER_YAML_KNOWN_BUGS_KEY: []} class BugSearchDef(SearchDef): def __init__(self, pattern, bug_id, hint,", "is typically performed as a final part after all others", "be a template i.e. containing {} fields that can be", "fields that can be rendered using results. @param reason_format_result_groups: if", "tmp dir '{}' not found\". format(constants.PLUGIN_TMP_DIR)) if type == LAUNCHPAD:", "idx in self.reason_format_result_groups: values.append(search_result.get(idx)) return self._reason.format(*values) return self._reason def _get_known_bugs():", "def reason(self): return self._reason def rendered_reason(self, search_result): if self._reason and", "= IssueEntry(new_bug, description, key=\"id\") current = _get_known_bugs() if current and", "Fetch the current plugin known_bugs.yaml and add it to the", "the current plugin known_bugs.yaml and add it to the master", "if type == LAUNCHPAD: new_bug = \"https://bugs.launchpad.net/bugs/{}\".format(bug_id) if description is", "flagged. This string can be a template i.e. containing {}", "plugin known_bugs.yaml and add it to the master yaml. Note", "return {} def add_known_bug(bug_id, description=None, type=LAUNCHPAD): \"\"\" Fetch the current", "bugs = yaml.safe_load(open(known_bugs_yaml)) if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY): return bugs return", "of indexes in the results that can be extracted for", "else: current = {MASTER_YAML_KNOWN_BUGS_KEY: [entry.data]} known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") with", "entry = IssueEntry(new_bug, description, key=\"id\") current = _get_known_bugs() if current", "os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") with open(known_bugs_yaml, 'w') as fd: fd.write(yaml.dump(current)) def add_known_bugs_to_master_plugin():", "description of the bug. \"\"\" if not os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception(\"plugin", "if self._reason and self.reason_format_result_groups: values = [] for idx in", "return its contents or None if it doesn't exist yet.", "current = _get_known_bugs() if current and current.get(MASTER_YAML_KNOWN_BUGS_KEY): current[MASTER_YAML_KNOWN_BUGS_KEY].append(entry.data) else: current", "values.append(search_result.get(idx)) return self._reason.format(*values) return self._reason def _get_known_bugs(): \"\"\" Fetch the", "raise Exception(\"plugin tmp dir '{}' not found\". format(constants.PLUGIN_TMP_DIR)) if type", "new_bug = \"https://bugs.launchpad.net/bugs/{}\".format(bug_id) if description is None: description = \"no", "_get_known_bugs() if current and current.get(MASTER_YAML_KNOWN_BUGS_KEY): current[MASTER_YAML_KNOWN_BUGS_KEY].append(entry.data) else: current = {MASTER_YAML_KNOWN_BUGS_KEY:", "per plugin and is typically performed as a final part", "{} fields that can be rendered using results. @param reason_format_result_groups:", "not os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception(\"plugin tmp dir '{}' not found\". format(constants.PLUGIN_TMP_DIR))", "os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception(\"plugin tmp dir '{}' not found\". format(constants.PLUGIN_TMP_DIR)) if", "BugSearchDef(SearchDef): def __init__(self, pattern, bug_id, hint, reason, reason_format_result_groups=None): \"\"\" @param", "results that can be extracted for inclusion in the reason.", "be extracted for inclusion in the reason. \"\"\" super().__init__(pattern, tag=bug_id,", "LAUNCHPAD: new_bug = \"https://bugs.launchpad.net/bugs/{}\".format(bug_id) if description is None: description =", "only be called once per plugin and is typically performed", "core.searchtools import SearchDef from core.issues.issue_utils import IssueEntry LAUNCHPAD = \"launchpad\"", "reason_format_result_groups=None): \"\"\" @param reason: string reason describing the issue and", "in the results that can be extracted for inclusion in", "description is None: description = \"no description provided\" entry =", "or None if it doesn't exist yet. \"\"\" if not", "os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") if not os.path.exists(known_bugs_yaml): return {} bugs = yaml.safe_load(open(known_bugs_yaml))", "Exception(\"plugin tmp dir '{}' not found\". format(constants.PLUGIN_TMP_DIR)) if type ==", "Fetch the current plugin known_bugs.yaml if it exists and add", "be rendered using results. @param reason_format_result_groups: if the reason string", "bugs return {} def add_known_bug(bug_id, description=None, type=LAUNCHPAD): \"\"\" Fetch the", "known_bugs.yaml and add it to the master yaml. Note that", "= reason if reason is None: self._reason = \"\" self.reason_format_result_groups", "exist yet. \"\"\" if not os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception(\"plugin tmp dir", "not found\". format(constants.PLUGIN_TMP_DIR)) known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\") if not os.path.exists(known_bugs_yaml):", "\"\" self.reason_format_result_groups = reason_format_result_groups @property def reason(self): return self._reason def", "be called once per plugin and is typically performed as", "once per plugin and is typically performed as a final", "Exception(\"plugin tmp dir '{}' not found\". format(constants.PLUGIN_TMP_DIR)) known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR,", "yaml.safe_load(open(known_bugs_yaml)) if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY): return bugs return {} def", "is None: description = \"no description provided\" entry = IssueEntry(new_bug,", "can be extracted for inclusion in the reason. \"\"\" super().__init__(pattern,", "reason if reason is None: self._reason = \"\" self.reason_format_result_groups =", "= _get_known_bugs() if current and current.get(MASTER_YAML_KNOWN_BUGS_KEY): current[MASTER_YAML_KNOWN_BUGS_KEY].append(entry.data) else: current =", "tmp dir '{}' not found\". format(constants.PLUGIN_TMP_DIR)) known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, \"known_bugs.yaml\")", "rendered_reason(self, search_result): if self._reason and self.reason_format_result_groups: values = [] for", "been flagged. This string can be a template i.e. containing", "known_bugs.yaml if it exists and return its contents or None", "= \"launchpad\" MASTER_YAML_KNOWN_BUGS_KEY = \"bugs-detected\" KNOWN_BUGS = {MASTER_YAML_KNOWN_BUGS_KEY: []} class", "reason describing the issue and why it has been flagged.", "core import plugintools from core import constants from core.searchtools import", "MASTER_YAML_KNOWN_BUGS_KEY = \"bugs-detected\" KNOWN_BUGS = {MASTER_YAML_KNOWN_BUGS_KEY: []} class BugSearchDef(SearchDef): def", "it doesn't exist yet. \"\"\" if not os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception(\"plugin", "search_result): if self._reason and self.reason_format_result_groups: values = [] for idx" ]
[ "echoserver(xmls.SimpleXMLRPCServer): allow_reuse_address = True server = echoserver(('127.0.0.1', 8001)) server.register_function(echo, 'echo')", "SimpleXMLRPCServer as xmls def echo(msg): print 'Got', msg return msg", "msg return msg class echoserver(xmls.SimpleXMLRPCServer): allow_reuse_address = True server =", "8001)) server.register_function(echo, 'echo') print 'Listening on port 8001' try: server.serve_forever()", "= echoserver(('127.0.0.1', 8001)) server.register_function(echo, 'echo') print 'Listening on port 8001'", "= True server = echoserver(('127.0.0.1', 8001)) server.register_function(echo, 'echo') print 'Listening", "print 'Got', msg return msg class echoserver(xmls.SimpleXMLRPCServer): allow_reuse_address = True", "echoserver(('127.0.0.1', 8001)) server.register_function(echo, 'echo') print 'Listening on port 8001' try:", "True server = echoserver(('127.0.0.1', 8001)) server.register_function(echo, 'echo') print 'Listening on", "server.register_function(echo, 'echo') print 'Listening on port 8001' try: server.serve_forever() except:", "'Got', msg return msg class echoserver(xmls.SimpleXMLRPCServer): allow_reuse_address = True server", "allow_reuse_address = True server = echoserver(('127.0.0.1', 8001)) server.register_function(echo, 'echo') print", "server = echoserver(('127.0.0.1', 8001)) server.register_function(echo, 'echo') print 'Listening on port", "class echoserver(xmls.SimpleXMLRPCServer): allow_reuse_address = True server = echoserver(('127.0.0.1', 8001)) server.register_function(echo,", "def echo(msg): print 'Got', msg return msg class echoserver(xmls.SimpleXMLRPCServer): allow_reuse_address", "import SimpleXMLRPCServer as xmls def echo(msg): print 'Got', msg return", "xmls def echo(msg): print 'Got', msg return msg class echoserver(xmls.SimpleXMLRPCServer):", "return msg class echoserver(xmls.SimpleXMLRPCServer): allow_reuse_address = True server = echoserver(('127.0.0.1',", "echo(msg): print 'Got', msg return msg class echoserver(xmls.SimpleXMLRPCServer): allow_reuse_address =", "'echo') print 'Listening on port 8001' try: server.serve_forever() except: server.server_close()", "msg class echoserver(xmls.SimpleXMLRPCServer): allow_reuse_address = True server = echoserver(('127.0.0.1', 8001))", "as xmls def echo(msg): print 'Got', msg return msg class" ]
[ "224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def testDivisibleBy(self):", "for mobilenet_v2.\"\"\" from __future__ import absolute_import from __future__ import division", "num_convs = len(find_ops('Conv2D')) # This is mostly a sanity test.", "2.0 (the \"License\"); # you may not use this file", "tf.reset_default_graph() # Verifies that depth_multiplier arg scope actually works #", "_ = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec) num_convs", "net, ep = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec,", "2) def testWithSplits(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF) spec['overrides'] = { (ops.expanded_conv,):", "tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])", "sc = mobilenet.training_scope(is_training=True) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope() self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])", "(10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])", "from __future__ import division from __future__ import print_function import copy", "\"\"\"Tests for mobilenet_v2.\"\"\" from __future__ import absolute_import from __future__ import", "for op in find_ops('Conv2D')] s = set(s) self.assertSameElements([32, 64, 96,", "print_function import copy import tensorflow as tf from nets.mobilenet import", "extra conv that is not in the spec. (logits) self.assertEqual(num_convs,", "3))) self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3], [output_size] * 2) def testWithSplits(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF)", "in find_ops('Conv2D')] s = set(s) self.assertSameElements(s, [32, 192, 128, 1001])", "== optype] class MobilenetV2Test(tf.test.TestCase): def setUp(self): tf.reset_default_graph() def testCreation(self): spec", "[output_size] * 2) def testWithSplits(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF) spec['overrides'] =", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "- 5) def testWithOutputStride8(self): out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10,", "self.assertEqual(num_convs, len(spec['spec']) * 3 - 5) def testWithOutputStride8(self): out, _", "{ (ops.expanded_conv,): dict(split_expansion=2), } _, _ = mobilenet.mobilenet( tf.placeholder(tf.float32, (10,", "the spec. (logits) self.assertEqual(num_convs, len(spec['spec']) * 2 - 2) #", "- 2) # Check that depthwise are exposed. for i", "(10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, divisible_by=16, min_depth=32) s = [op.outputs[0].get_shape().as_list()[-1]", "1280, 1001], s) def testDivisibleByWithArgScope(self): tf.reset_default_graph() # Verifies that depth_multiplier", "spec = dict(mobilenet_v2.V2_DEF) _, ep = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224,", "# All convolutions will be 8->48, except for the last", "depth_multiplier=0.01, finegrain_classification_mode=True) s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s", "* 3 - 5) def testWithOutputStride8(self): out, _ = mobilenet.mobilenet_base(", "depth_multiplier=0.1) self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128]) def testWithOutputStride16(self): tf.reset_default_graph() out,", "scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def testDivisibleBy(self): tf.reset_default_graph() mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10,", "self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128]) def testWithOutputStride16(self): tf.reset_default_graph() out, _", "= set(s) self.assertSameElements(s, [32, 192, 128, 1001]) def testFineGrained(self): tf.reset_default_graph()", "= mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec) num_convs =", "[32, 192, 128, 1001]) def testFineGrained(self): tf.reset_default_graph() # Verifies that", "mobilenet_v2.\"\"\" from __future__ import absolute_import from __future__ import division from", "tensorflow as tf from nets.mobilenet import conv_blocks as ops from", "16)), conv_defs=spec) num_convs = len(find_ops('Conv2D')) # This is mostly a", "48, 1001, 1280]) def testMobilenetBase(self): tf.reset_default_graph() # Verifies that mobilenet_base", "min_depth is provided. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224,", "use this file except in compliance with the License. #", "import tensorflow as tf from nets.mobilenet import conv_blocks as ops", "self.assertIn('layer_%d/depthwise_output' % i, ep) def testCreationNoClasses(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF) net,", "copy.deepcopy(mobilenet_v2.V2_DEF) spec['overrides'] = { (ops.expanded_conv,): dict(split_expansion=2), } _, _ =", "not in the spec. (logits) self.assertEqual(num_convs, len(spec['spec']) * 2 -", "for op in find_ops('Conv2D')] s = set(s) self.assertSameElements(s, [32, 192,", "type in graphdef or a graph. Args: optype: operation type", "spec = copy.deepcopy(mobilenet_v2.V2_DEF) net, ep = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224,", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "tf.reset_default_graph() mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, divisible_by=16, min_depth=32)", "License. # You may obtain a copy of the License", "set(s) # All convolutions will be 8->48, except for the", "The TensorFlow Authors. All Rights Reserved. # # Licensed under", "[var for var in gd.get_operations() if var.type == optype] class", "3 have one # and there is one unaccounted. self.assertEqual(num_convs,", "tf.get_default_graph() return [var for var in gd.get_operations() if var.type ==", "6), (160, 5), (128, 4), (96, 3)]: tf.reset_default_graph() _, ep", "that is not in the spec. (logits) self.assertEqual(num_convs, len(spec['spec']) *", "224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def testWithOutputStride8AndExplicitPadding(self):", "under the License is distributed on an \"AS IS\" BASIS,", "License for the specific language governing permissions and # limitations", "mobilenet_v2.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) self.assertEqual(net.get_shape().as_list(), [10,", "these particular # constants. # # All but first 2", "copy.deepcopy(mobilenet_v2.V2_DEF) net, ep = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)),", "(192, 6), (160, 5), (128, 4), (96, 3)]: tf.reset_default_graph() _,", "divisible_by=16, min_depth=32) s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s", "have one # and there is one unaccounted. self.assertEqual(num_convs, len(spec['spec'])", "Reserved. # # Licensed under the Apache License, Version 2.0", "# Verifies that depth_multiplier arg scope actually works # if", "provided. mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01, finegrain_classification_mode=True)", "governing permissions and # limitations under the License. # ==============================================================================", "Verifies that depth_multiplier arg scope actually works # if no", "that depth_multiplier arg scope actually works # if no default", "tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec, num_classes=None) self.assertIs(net, ep['global_pool']) def", "TensorFlow Authors. All Rights Reserved. # # Licensed under the", "tf.placeholder(tf.float32, (10, input_size, input_size, 3))) self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3], [output_size] * 2) def", "nets.mobilenet import mobilenet from nets.mobilenet import mobilenet_v2 slim = tf.contrib.slim", "224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01, finegrain_classification_mode=True) s = [op.outputs[0].get_shape().as_list()[-1] for", "mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, use_explicit_padding=True, scope='MobilenetV2')", "no default min_depth is provided. mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224,", "= [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s = set(s) #", "s = set(s) # All convolutions will be 8->48, except", "mostly a sanity test. No deep reason for these particular", "from __future__ import absolute_import from __future__ import division from __future__", "sc = mobilenet.training_scope(is_training=False) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope(is_training=True) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])", "set(s) self.assertSameElements([32, 64, 96, 160, 192, 320, 384, 576, 960,", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "test. No deep reason for these particular # constants. #", "128]) def testWithOutputStride16(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10,", "= mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, scope='MobilenetV2')", "All but first 2 and last one have two convolutions,", "def testDivisibleByWithArgScope(self): tf.reset_default_graph() # Verifies that depth_multiplier arg scope actually", "def setUp(self): tf.reset_default_graph() def testCreation(self): spec = dict(mobilenet_v2.V2_DEF) _, ep", "__future__ import division from __future__ import print_function import copy import", "output_stride=8, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def testDivisibleBy(self): tf.reset_default_graph() mobilenet_v2.mobilenet( tf.placeholder(tf.float32,", "_ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16,", "two convolutions, and there is one # extra conv that", "ep = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec) num_convs", "conv_defs=spec, num_classes=None) self.assertIs(net, ep['global_pool']) def testImageSizes(self): for input_size, output_size in", "is mostly a sanity test. No deep reason for these", "# if no default min_depth is provided. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):", "is one unaccounted. self.assertEqual(num_convs, len(spec['spec']) * 3 - 5) def", "actually works # if no default min_depth is provided. with", "testWithOutputStride8(self): out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)),", "conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s", "default min_depth is provided. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10,", "there is one # extra conv that is not in", "ep['global_pool']) def testImageSizes(self): for input_size, output_size in [(224, 7), (192,", "(128, 4), (96, 3)]: tf.reset_default_graph() _, ep = mobilenet_v2.mobilenet( tf.placeholder(tf.float32,", "of a given type in graphdef or a graph. Args:", "unaccounted. self.assertEqual(num_convs, len(spec['spec']) * 3 - 5) def testWithOutputStride8(self): out,", "self.assertNotIn('is_training', sc[slim.arg_scope_func_key( slim.batch_norm)]) def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self): sc = mobilenet.training_scope(is_training=False) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])", "for input_size, output_size in [(224, 7), (192, 6), (160, 5),", "graphdef or a graph. Args: optype: operation type (e.g. Conv2D)", "14]) def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self): sc = mobilenet.training_scope(is_training=None) self.assertNotIn('is_training', sc[slim.arg_scope_func_key( slim.batch_norm)]) def", "224, 16)), conv_defs=spec, num_classes=None) self.assertIs(net, ep['global_pool']) def testImageSizes(self): for input_size,", "ops of a given type in graphdef or a graph.", "spec['overrides'] = { (ops.expanded_conv,): dict(split_expansion=2), } _, _ = mobilenet.mobilenet(", "from nets.mobilenet import mobilenet_v2 slim = tf.contrib.slim def find_ops(optype): \"\"\"Find", "def testImageSizes(self): for input_size, output_size in [(224, 7), (192, 6),", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "# and there is one unaccounted. self.assertEqual(num_convs, len(spec['spec']) * 3", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF,", "16)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128]) def testWithOutputStride16(self):", "= mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16, use_explicit_padding=True)", "28]) def testDivisibleBy(self): tf.reset_default_graph() mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)),", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "testWithOutputStride16AndExplicitPadding(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224,", "to in writing, software # distributed under the License is", "one have two convolutions, and there is one # extra", "224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, use_explicit_padding=True, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def", "a graph. Args: optype: operation type (e.g. Conv2D) Returns: List", "last one have two convolutions, and there is one #", "# See the License for the specific language governing permissions", "one unaccounted. self.assertEqual(num_convs, len(spec['spec']) * 3 - 5) def testWithOutputStride8(self):", "convolutions will be 8->48, except for the last one. self.assertSameElements(s,", "= mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, use_explicit_padding=True,", "Args: optype: operation type (e.g. Conv2D) Returns: List of operations.", "mobilenet_v2 slim = tf.contrib.slim def find_ops(optype): \"\"\"Find ops of a", "given type in graphdef or a graph. Args: optype: operation", "use_explicit_padding=True, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def testWithOutputStride16AndExplicitPadding(self): tf.reset_default_graph() out, _", "(10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, use_explicit_padding=True, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28,", "language governing permissions and # limitations under the License. #", "160, 192, 320, 384, 576, 960, 1280, 1001], s) def", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "_, ep = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# extra conv that is not in the spec. (logits)", "with the License. # You may obtain a copy of", "len(spec['spec']) * 2 - 2) # Check that depthwise are", "constants. # # All but first 2 and last one", "exposed. for i in range(2, 17): self.assertIn('layer_%d/depthwise_output' % i, ep)", "graph. Args: optype: operation type (e.g. Conv2D) Returns: List of", "find_ops('Conv2D')] s = set(s) self.assertSameElements(s, [32, 192, 128, 1001]) def", "def testMobilenetBase(self): tf.reset_default_graph() # Verifies that mobilenet_base returns pre-pooling layer.", "testDivisibleByWithArgScope(self): tf.reset_default_graph() # Verifies that depth_multiplier arg scope actually works", "scope actually works # if no default min_depth is provided.", "testDivisibleBy(self): tf.reset_default_graph() mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, divisible_by=16,", "arg scope actually works # if no default min_depth is", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "(10, input_size, input_size, 3))) self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3], [output_size] * 2) def testWithSplits(self):", "distributed under the License is distributed on an \"AS IS\"", "[10, 7, 7, 128]) def testWithOutputStride16(self): tf.reset_default_graph() out, _ =", "mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01, finegrain_classification_mode=True) s", "for op in find_ops('Conv2D')] s = set(s) # All convolutions", "slim.batch_norm)]) def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self): sc = mobilenet.training_scope(is_training=False) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc =", "one # and there is one unaccounted. self.assertEqual(num_convs, len(spec['spec']) *", "conv_defs=mobilenet_v2.V2_DEF, output_stride=8, use_explicit_padding=True, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def testWithOutputStride16AndExplicitPadding(self): tf.reset_default_graph()", "def testFineGrained(self): tf.reset_default_graph() # Verifies that depth_multiplier arg scope actually", "there is one unaccounted. self.assertEqual(num_convs, len(spec['spec']) * 3 - 5)", "express or implied. # See the License for the specific", "depth_multiplier=0.1) s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s =", "except in compliance with the License. # You may obtain", "tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, divisible_by=16, min_depth=32) s =", "limitations under the License. # ============================================================================== \"\"\"Tests for mobilenet_v2.\"\"\" from", "output_stride=16, use_explicit_padding=True) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self): sc = mobilenet.training_scope(is_training=None)", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "a sanity test. No deep reason for these particular #", "tf.placeholder(tf.float32, (10, 224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01, finegrain_classification_mode=True) s =", "not use this file except in compliance with the License.", "operatore, the remainign 3 have one # and there is", "import mobilenet from nets.mobilenet import mobilenet_v2 slim = tf.contrib.slim def", "var.type == optype] class MobilenetV2Test(tf.test.TestCase): def setUp(self): tf.reset_default_graph() def testCreation(self):", "in [(224, 7), (192, 6), (160, 5), (128, 4), (96,", "writing, software # distributed under the License is distributed on", "you may not use this file except in compliance with", "2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01, finegrain_classification_mode=True) s = [op.outputs[0].get_shape().as_list()[-1] for op in", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "output_size in [(224, 7), (192, 6), (160, 5), (128, 4),", "(10, 224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) s = [op.outputs[0].get_shape().as_list()[-1] for", "============================================================================== \"\"\"Tests for mobilenet_v2.\"\"\" from __future__ import absolute_import from __future__", "testWithOutputStride8AndExplicitPadding(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224,", "operations. \"\"\" gd = tf.get_default_graph() return [var for var in", "mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16) self.assertEqual(out.get_shape().as_list()[1:3], [14,", "List of operations. \"\"\" gd = tf.get_default_graph() return [var for", "testMobilenetBase(self): tf.reset_default_graph() # Verifies that mobilenet_base returns pre-pooling layer. with", "} _, _ = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)),", "spec. (logits) self.assertEqual(num_convs, len(spec['spec']) * 2 - 2) # Check", "CONDITIONS OF ANY KIND, either express or implied. # See", "type (e.g. Conv2D) Returns: List of operations. \"\"\" gd =", "that mobilenet_base returns pre-pooling layer. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): net, _", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "but first 2 and last one have two convolutions, and", "MobilenetV2Test(tf.test.TestCase): def setUp(self): tf.reset_default_graph() def testCreation(self): spec = dict(mobilenet_v2.V2_DEF) _,", "224, 224, 16)), conv_defs=spec) num_convs = len(find_ops('Conv2D')) # This is", "This is mostly a sanity test. No deep reason for", "permissions and # limitations under the License. # ============================================================================== \"\"\"Tests", "testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self): sc = mobilenet.training_scope(is_training=False) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope(is_training=True) self.assertIn('is_training',", "for the last one. self.assertSameElements(s, [8, 48, 1001, 1280]) def", "16)), conv_defs=mobilenet_v2.V2_DEF, divisible_by=16, min_depth=32) s = [op.outputs[0].get_shape().as_list()[-1] for op in", "(10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) self.assertEqual(net.get_shape().as_list(), [10, 7, 7,", "slim = tf.contrib.slim def find_ops(optype): \"\"\"Find ops of a given", "(logits) self.assertEqual(num_convs, len(spec['spec']) * 2 - 2) # Check that", "dict(split_expansion=2), } _, _ = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224,", "16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16, use_explicit_padding=True) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self): sc", "def find_ops(optype): \"\"\"Find ops of a given type in graphdef", "224, 16)), conv_defs=mobilenet_v2.V2_DEF, divisible_by=16, min_depth=32) s = [op.outputs[0].get_shape().as_list()[-1] for op", "input_size, input_size, 3))) self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3], [output_size] * 2) def testWithSplits(self): spec", "# All but 3 op has 3 conv operatore, the", "are exposed. for i in range(2, 17): self.assertIn('layer_%d/depthwise_output' % i,", "* 2) def testWithSplits(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF) spec['overrides'] = {", "mobilenet_base returns pre-pooling layer. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): net, _ =", "finegrain_classification_mode=True) s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s =", "224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def testWithOutputStride8AndExplicitPadding(self): tf.reset_default_graph()", "OR CONDITIONS OF ANY KIND, either express or implied. #", "sc[slim.arg_scope_func_key( slim.batch_norm)]) def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self): sc = mobilenet.training_scope(is_training=False) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc", "224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01, finegrain_classification_mode=True) s = [op.outputs[0].get_shape().as_list()[-1] for op", "first 2 and last one have two convolutions, and there", "3 - 5) def testWithOutputStride8(self): out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32,", "the License is distributed on an \"AS IS\" BASIS, #", "224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16, use_explicit_padding=True) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def", "* 2 - 2) # Check that depthwise are exposed.", "nets.mobilenet import mobilenet_v2 slim = tf.contrib.slim def find_ops(optype): \"\"\"Find ops", "len(find_ops('Conv2D')) # All but 3 op has 3 conv operatore,", "ops from nets.mobilenet import mobilenet from nets.mobilenet import mobilenet_v2 slim", "remainign 3 have one # and there is one unaccounted.", "192, 320, 384, 576, 960, 1280, 1001], s) def testDivisibleByWithArgScope(self):", "operation type (e.g. Conv2D) Returns: List of operations. \"\"\" gd", "pre-pooling layer. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): net, _ = mobilenet_v2.mobilenet_base( tf.placeholder(tf.float32,", "(10, 224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01, finegrain_classification_mode=True) s = [op.outputs[0].get_shape().as_list()[-1]", "optype: operation type (e.g. Conv2D) Returns: List of operations. \"\"\"", "import division from __future__ import print_function import copy import tensorflow", "i, ep) def testCreationNoClasses(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF) net, ep =", "224, 224, 16)), conv_defs=spec, num_classes=None) self.assertIs(net, ep['global_pool']) def testImageSizes(self): for", "16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def testDivisibleBy(self): tf.reset_default_graph()", "testFineGrained(self): tf.reset_default_graph() # Verifies that depth_multiplier arg scope actually works", "law or agreed to in writing, software # distributed under", "copy import tensorflow as tf from nets.mobilenet import conv_blocks as", "= set(s) # All convolutions will be 8->48, except for", "from __future__ import print_function import copy import tensorflow as tf", "if no default min_depth is provided. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): mobilenet_v2.mobilenet(", "conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128]) def testWithOutputStride16(self): tf.reset_default_graph()", "find_ops('Conv2D')] s = set(s) # All convolutions will be 8->48,", "def testDivisibleBy(self): tf.reset_default_graph() mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF,", "= tf.contrib.slim def find_ops(optype): \"\"\"Find ops of a given type", "% i, ep) def testCreationNoClasses(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF) net, ep", "from nets.mobilenet import mobilenet from nets.mobilenet import mobilenet_v2 slim =", "= len(find_ops('Conv2D')) # All but 3 op has 3 conv", "conv_defs=mobilenet_v2.V2_DEF, output_stride=16, use_explicit_padding=True) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self): sc =", "gd = tf.get_default_graph() return [var for var in gd.get_operations() if", "224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def", "may obtain a copy of the License at # #", "# Check that depthwise are exposed. for i in range(2,", "var in gd.get_operations() if var.type == optype] class MobilenetV2Test(tf.test.TestCase): def", "= copy.deepcopy(mobilenet_v2.V2_DEF) spec['overrides'] = { (ops.expanded_conv,): dict(split_expansion=2), } _, _", "set(s) self.assertSameElements(s, [32, 192, 128, 1001]) def testFineGrained(self): tf.reset_default_graph() #", "works # if no default min_depth is provided. mobilenet_v2.mobilenet( tf.placeholder(tf.float32,", "# Verifies that mobilenet_base returns pre-pooling layer. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):", "mobilenet.training_scope(is_training=True) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope() self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) if __name__", "[28, 28]) def testDivisibleBy(self): tf.reset_default_graph() mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224,", "= mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16) self.assertEqual(out.get_shape().as_list()[1:3],", "tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, use_explicit_padding=True, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3],", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "or a graph. Args: optype: operation type (e.g. Conv2D) Returns:", "= { (ops.expanded_conv,): dict(split_expansion=2), } _, _ = mobilenet.mobilenet( tf.placeholder(tf.float32,", "in find_ops('Conv2D')] s = set(s) self.assertSameElements([32, 64, 96, 160, 192,", "17): self.assertIn('layer_%d/depthwise_output' % i, ep) def testCreationNoClasses(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF)", "= tf.get_default_graph() return [var for var in gd.get_operations() if var.type", "Returns: List of operations. \"\"\" gd = tf.get_default_graph() return [var", "s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s = set(s)", "conv_defs=mobilenet_v2.V2_DEF, output_stride=16) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def testWithOutputStride8AndExplicitPadding(self): tf.reset_default_graph() out, _", "may not use this file except in compliance with the", "(10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def", "5) def testWithOutputStride8(self): out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "this file except in compliance with the License. # You", "import absolute_import from __future__ import division from __future__ import print_function", "as tf from nets.mobilenet import conv_blocks as ops from nets.mobilenet", "range(2, 17): self.assertIn('layer_%d/depthwise_output' % i, ep) def testCreationNoClasses(self): spec =", "op in find_ops('Conv2D')] s = set(s) self.assertSameElements(s, [32, 192, 128,", "1001]) def testFineGrained(self): tf.reset_default_graph() # Verifies that depth_multiplier arg scope", "sanity test. No deep reason for these particular # constants.", "import copy import tensorflow as tf from nets.mobilenet import conv_blocks", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "Check that depthwise are exposed. for i in range(2, 17):", "gd.get_operations() if var.type == optype] class MobilenetV2Test(tf.test.TestCase): def setUp(self): tf.reset_default_graph()", "# # Licensed under the Apache License, Version 2.0 (the", "(10, 224, 224, 16)), conv_defs=spec, num_classes=None) self.assertIs(net, ep['global_pool']) def testImageSizes(self):", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Verifies that mobilenet_base returns pre-pooling layer. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): net,", "have two convolutions, and there is one # extra conv", "in the spec. (logits) self.assertEqual(num_convs, len(spec['spec']) * 2 - 2)", "in gd.get_operations() if var.type == optype] class MobilenetV2Test(tf.test.TestCase): def setUp(self):", "num_classes=None) self.assertIs(net, ep['global_pool']) def testImageSizes(self): for input_size, output_size in [(224,", "conv operatore, the remainign 3 have one # and there", "is one # extra conv that is not in the", "2) # Check that depthwise are exposed. for i in", "one. self.assertSameElements(s, [8, 48, 1001, 1280]) def testMobilenetBase(self): tf.reset_default_graph() #", "16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, use_explicit_padding=True, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def testWithOutputStride16AndExplicitPadding(self):", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "s = set(s) self.assertSameElements([32, 64, 96, 160, 192, 320, 384,", "depth_multiplier arg scope actually works # if no default min_depth", "_ = mobilenet_v2.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)", "mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) s =", "optype] class MobilenetV2Test(tf.test.TestCase): def setUp(self): tf.reset_default_graph() def testCreation(self): spec =", "# constants. # # All but first 2 and last", "slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): net, _ = mobilenet_v2.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224,", "= [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s = set(s) self.assertSameElements([32,", "self.assertEqual(num_convs, len(spec['spec']) * 2 - 2) # Check that depthwise", "128, 1001]) def testFineGrained(self): tf.reset_default_graph() # Verifies that depth_multiplier arg", "(10, 224, 224, 16)), conv_defs=spec) num_convs = len(find_ops('Conv2D')) # All", "op in find_ops('Conv2D')] s = set(s) # All convolutions will", "tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)),", "= mobilenet.training_scope(is_training=None) self.assertNotIn('is_training', sc[slim.arg_scope_func_key( slim.batch_norm)]) def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self): sc = mobilenet.training_scope(is_training=False)", "16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def testWithOutputStride8AndExplicitPadding(self): tf.reset_default_graph() out,", "sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope(is_training=True) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope() self.assertIn('is_training',", "with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): net, _ = mobilenet_v2.mobilenet_base( tf.placeholder(tf.float32, (10, 224,", "import conv_blocks as ops from nets.mobilenet import mobilenet from nets.mobilenet", "ep) def testCreationNoClasses(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF) net, ep = mobilenet.mobilenet(", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "import print_function import copy import tensorflow as tf from nets.mobilenet", "op has 3 conv operatore, the remainign 3 have one", "returns pre-pooling layer. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): net, _ = mobilenet_v2.mobilenet_base(", "tf.reset_default_graph() # Verifies that mobilenet_base returns pre-pooling layer. with slim.arg_scope((mobilenet.depth_multiplier,),", "is not in the spec. (logits) self.assertEqual(num_convs, len(spec['spec']) * 2", "len(find_ops('Conv2D')) # This is mostly a sanity test. No deep", "3)]: tf.reset_default_graph() _, ep = mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, input_size, input_size,", "or implied. # See the License for the specific language", "self.assertSameElements(s, [8, 48, 1001, 1280]) def testMobilenetBase(self): tf.reset_default_graph() # Verifies", "Rights Reserved. # # Licensed under the Apache License, Version", "self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3], [output_size] * 2) def testWithSplits(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF) spec['overrides']", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "depthwise are exposed. for i in range(2, 17): self.assertIn('layer_%d/depthwise_output' %", "224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) s = [op.outputs[0].get_shape().as_list()[-1] for op", "s) def testDivisibleByWithArgScope(self): tf.reset_default_graph() # Verifies that depth_multiplier arg scope", "2018 The TensorFlow Authors. All Rights Reserved. # # Licensed", "License. # ============================================================================== \"\"\"Tests for mobilenet_v2.\"\"\" from __future__ import absolute_import", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "from nets.mobilenet import conv_blocks as ops from nets.mobilenet import mobilenet", "layer. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): net, _ = mobilenet_v2.mobilenet_base( tf.placeholder(tf.float32, (10,", "28]) def testWithOutputStride16AndExplicitPadding(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10,", "testCreationNoClasses(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF) net, ep = mobilenet.mobilenet( tf.placeholder(tf.float32, (10,", "# ============================================================================== \"\"\"Tests for mobilenet_v2.\"\"\" from __future__ import absolute_import from", "testWithOutputStride16(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224,", "sc = mobilenet.training_scope(is_training=None) self.assertNotIn('is_training', sc[slim.arg_scope_func_key( slim.batch_norm)]) def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self): sc =", "2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]", "tf.reset_default_graph() def testCreation(self): spec = dict(mobilenet_v2.V2_DEF) _, ep = mobilenet.mobilenet(", "_ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8,", "= len(find_ops('Conv2D')) # This is mostly a sanity test. No", "16)), conv_defs=spec, num_classes=None) self.assertIs(net, ep['global_pool']) def testImageSizes(self): for input_size, output_size", "(the \"License\"); # you may not use this file except", "conv_defs=mobilenet_v2.V2_DEF, divisible_by=16, min_depth=32) s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]", "384, 576, 960, 1280, 1001], s) def testDivisibleByWithArgScope(self): tf.reset_default_graph() #", "= mobilenet_v2.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) self.assertEqual(net.get_shape().as_list(),", "# you may not use this file except in compliance", "1001], s) def testDivisibleByWithArgScope(self): tf.reset_default_graph() # Verifies that depth_multiplier arg", "64, 96, 160, 192, 320, 384, 576, 960, 1280, 1001],", "find_ops(optype): \"\"\"Find ops of a given type in graphdef or", "convolutions, and there is one # extra conv that is", "= copy.deepcopy(mobilenet_v2.V2_DEF) net, ep = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224,", "= set(s) self.assertSameElements([32, 64, 96, 160, 192, 320, 384, 576,", "as ops from nets.mobilenet import mobilenet from nets.mobilenet import mobilenet_v2", "slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)", "and there is one # extra conv that is not", "3 op has 3 conv operatore, the remainign 3 have", "[8, 48, 1001, 1280]) def testMobilenetBase(self): tf.reset_default_graph() # Verifies that", "[14, 14]) def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self): sc = mobilenet.training_scope(is_training=None) self.assertNotIn('is_training', sc[slim.arg_scope_func_key( slim.batch_norm)])", "the License. # ============================================================================== \"\"\"Tests for mobilenet_v2.\"\"\" from __future__ import", "tf.placeholder(tf.float32, (10, 224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) s = [op.outputs[0].get_shape().as_list()[-1]", "576, 960, 1280, 1001], s) def testDivisibleByWithArgScope(self): tf.reset_default_graph() # Verifies", "tf from nets.mobilenet import conv_blocks as ops from nets.mobilenet import", "# # Unless required by applicable law or agreed to", "_, _ = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)", "def testCreation(self): spec = dict(mobilenet_v2.V2_DEF) _, ep = mobilenet.mobilenet( tf.placeholder(tf.float32,", "min_depth is provided. mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF,", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "for i in range(2, 17): self.assertIn('layer_%d/depthwise_output' % i, ep) def", "7, 128]) def testWithOutputStride16(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32,", "in graphdef or a graph. Args: optype: operation type (e.g.", "Version 2.0 (the \"License\"); # you may not use this", "except for the last one. self.assertSameElements(s, [8, 48, 1001, 1280])", "testCreation(self): spec = dict(mobilenet_v2.V2_DEF) _, ep = mobilenet.mobilenet( tf.placeholder(tf.float32, (10,", "[(224, 7), (192, 6), (160, 5), (128, 4), (96, 3)]:", "dict(mobilenet_v2.V2_DEF) _, ep = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)),", "mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3],", "op in find_ops('Conv2D')] s = set(s) self.assertSameElements([32, 64, 96, 160,", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved. #", "Copyright 2018 The TensorFlow Authors. All Rights Reserved. # #", "mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, divisible_by=16, min_depth=32) s", "def testWithOutputStride8AndExplicitPadding(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224,", "sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope() self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) if __name__ == '__main__':", "__future__ import absolute_import from __future__ import division from __future__ import", "implied. # See the License for the specific language governing", "conv_defs=mobilenet_v2.V2_DEF, output_stride=8, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def testDivisibleBy(self): tf.reset_default_graph() mobilenet_v2.mobilenet(", "# # All but first 2 and last one have", "under the Apache License, Version 2.0 (the \"License\"); # you", "2 and last one have two convolutions, and there is", "nets.mobilenet import conv_blocks as ops from nets.mobilenet import mobilenet from", "# limitations under the License. # ============================================================================== \"\"\"Tests for mobilenet_v2.\"\"\"", "in find_ops('Conv2D')] s = set(s) # All convolutions will be", "[14, 14]) def testWithOutputStride8AndExplicitPadding(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32,", "ep = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec, num_classes=None)", "self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self): sc = mobilenet.training_scope(is_training=None) self.assertNotIn('is_training', sc[slim.arg_scope_func_key(", "224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, use_explicit_padding=True, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])", "testImageSizes(self): for input_size, output_size in [(224, 7), (192, 6), (160,", "= mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, input_size, input_size, 3))) self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3], [output_size] *", "def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self): sc = mobilenet.training_scope(is_training=None) self.assertNotIn('is_training', sc[slim.arg_scope_func_key( slim.batch_norm)]) def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self):", "class MobilenetV2Test(tf.test.TestCase): def setUp(self): tf.reset_default_graph() def testCreation(self): spec = dict(mobilenet_v2.V2_DEF)", "import mobilenet_v2 slim = tf.contrib.slim def find_ops(optype): \"\"\"Find ops of", "by applicable law or agreed to in writing, software #", "(10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16, use_explicit_padding=True) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])", "no default min_depth is provided. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): mobilenet_v2.mobilenet( tf.placeholder(tf.float32,", "(e.g. Conv2D) Returns: List of operations. \"\"\" gd = tf.get_default_graph()", "tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec) num_convs = len(find_ops('Conv2D')) #", "All convolutions will be 8->48, except for the last one.", "[op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s = set(s) self.assertSameElements(s, [32,", "one # extra conv that is not in the spec.", "# All but first 2 and last one have two", "output_stride=8, use_explicit_padding=True, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def testWithOutputStride16AndExplicitPadding(self): tf.reset_default_graph() out,", "input_size, output_size in [(224, 7), (192, 6), (160, 5), (128,", "320, 384, 576, 960, 1280, 1001], s) def testDivisibleByWithArgScope(self): tf.reset_default_graph()", "out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF,", "def testWithSplits(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF) spec['overrides'] = { (ops.expanded_conv,): dict(split_expansion=2),", "16)), conv_defs=spec) num_convs = len(find_ops('Conv2D')) # All but 3 op", "will be 8->48, except for the last one. self.assertSameElements(s, [8,", "mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec, num_classes=None) self.assertIs(net, ep['global_pool'])", "return [var for var in gd.get_operations() if var.type == optype]", "(ops.expanded_conv,): dict(split_expansion=2), } _, _ = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224,", "960, 1280, 1001], s) def testDivisibleByWithArgScope(self): tf.reset_default_graph() # Verifies that", "conv that is not in the spec. (logits) self.assertEqual(num_convs, len(spec['spec'])", "self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def testDivisibleBy(self): tf.reset_default_graph() mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224,", "\"\"\"Find ops of a given type in graphdef or a", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) s = [op.outputs[0].get_shape().as_list()[-1] for op in", "conv_defs=spec) num_convs = len(find_ops('Conv2D')) # This is mostly a sanity", "of operations. \"\"\" gd = tf.get_default_graph() return [var for var", "Unless required by applicable law or agreed to in writing,", "in range(2, 17): self.assertIn('layer_%d/depthwise_output' % i, ep) def testCreationNoClasses(self): spec", "\"\"\" gd = tf.get_default_graph() return [var for var in gd.get_operations()", "min_depth=32): mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) s", "conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01, finegrain_classification_mode=True) s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]", "tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16, use_explicit_padding=True) self.assertEqual(out.get_shape().as_list()[1:3], [14,", "= mobilenet.training_scope(is_training=True) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope() self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) if", "mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16, use_explicit_padding=True) self.assertEqual(out.get_shape().as_list()[1:3],", "the remainign 3 have one # and there is one", "is provided. mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01,", "the specific language governing permissions and # limitations under the", "def testWithOutputStride16AndExplicitPadding(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224,", "actually works # if no default min_depth is provided. mobilenet_v2.mobilenet(", "Conv2D) Returns: List of operations. \"\"\" gd = tf.get_default_graph() return", "testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self): sc = mobilenet.training_scope(is_training=None) self.assertNotIn('is_training', sc[slim.arg_scope_func_key( slim.batch_norm)]) def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self): sc", "224, 16)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128]) def", "applicable law or agreed to in writing, software # distributed", "particular # constants. # # All but first 2 and", "has 3 conv operatore, the remainign 3 have one #", "8->48, except for the last one. self.assertSameElements(s, [8, 48, 1001,", "tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) self.assertEqual(net.get_shape().as_list(), [10, 7,", "len(spec['spec']) * 3 - 5) def testWithOutputStride8(self): out, _ =", "No deep reason for these particular # constants. # #", "mobilenet from nets.mobilenet import mobilenet_v2 slim = tf.contrib.slim def find_ops(optype):", "All but 3 op has 3 conv operatore, the remainign", "2 - 2) # Check that depthwise are exposed. for", "= dict(mobilenet_v2.V2_DEF) _, ep = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224,", "(96, 3)]: tf.reset_default_graph() _, ep = mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, input_size,", "testWithSplits(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF) spec['overrides'] = { (ops.expanded_conv,): dict(split_expansion=2), }", "in writing, software # distributed under the License is distributed", "__future__ import print_function import copy import tensorflow as tf from", "works # if no default min_depth is provided. with slim.arg_scope((mobilenet.depth_multiplier,),", "self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope(is_training=True) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope()", "net, _ = mobilenet_v2.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF,", "for these particular # constants. # # All but first", "[op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s = set(s) self.assertSameElements([32, 64,", "def testWithOutputStride16(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224,", "tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28,", "for var in gd.get_operations() if var.type == optype] class MobilenetV2Test(tf.test.TestCase):", "1001, 1280]) def testMobilenetBase(self): tf.reset_default_graph() # Verifies that mobilenet_base returns", "(160, 5), (128, 4), (96, 3)]: tf.reset_default_graph() _, ep =", "224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16, use_explicit_padding=True) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):", "mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, input_size, input_size, 3))) self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3], [output_size] * 2)", "deep reason for these particular # constants. # # All", "and there is one unaccounted. self.assertEqual(num_convs, len(spec['spec']) * 3 -", "mobilenet.training_scope(is_training=None) self.assertNotIn('is_training', sc[slim.arg_scope_func_key( slim.batch_norm)]) def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self): sc = mobilenet.training_scope(is_training=False) self.assertIn('is_training',", "[op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s = set(s) # All", "224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128])", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "# This is mostly a sanity test. No deep reason", "provided. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 2)),", "# You may obtain a copy of the License at", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "if var.type == optype] class MobilenetV2Test(tf.test.TestCase): def setUp(self): tf.reset_default_graph() def", "_, ep = mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, input_size, input_size, 3))) self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3],", "self.assertSameElements([32, 64, 96, 160, 192, 320, 384, 576, 960, 1280,", "Authors. All Rights Reserved. # # Licensed under the Apache", "the last one. self.assertSameElements(s, [8, 48, 1001, 1280]) def testMobilenetBase(self):", "self.assertIs(net, ep['global_pool']) def testImageSizes(self): for input_size, output_size in [(224, 7),", "sc = mobilenet.training_scope() self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) if __name__ == '__main__': tf.test.main()", "input_size, 3))) self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3], [output_size] * 2) def testWithSplits(self): spec =", "96, 160, 192, 320, 384, 576, 960, 1280, 1001], s)", "tf.contrib.slim def find_ops(optype): \"\"\"Find ops of a given type in", "the License for the specific language governing permissions and #", "a given type in graphdef or a graph. Args: optype:", "224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, divisible_by=16, min_depth=32) s = [op.outputs[0].get_shape().as_list()[-1] for", "Apache License, Version 2.0 (the \"License\"); # you may not", "output_stride=16) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def testWithOutputStride8AndExplicitPadding(self): tf.reset_default_graph() out, _ =", "either express or implied. # See the License for the", "default min_depth is provided. mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 2)),", "4), (96, 3)]: tf.reset_default_graph() _, ep = mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10,", "num_convs = len(find_ops('Conv2D')) # All but 3 op has 3", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "def testWithOutputStride8(self): out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224,", "min_depth=32): net, _ = mobilenet_v2.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)),", "1280]) def testMobilenetBase(self): tf.reset_default_graph() # Verifies that mobilenet_base returns pre-pooling", "be 8->48, except for the last one. self.assertSameElements(s, [8, 48,", "_ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16)", "= mobilenet.training_scope(is_training=False) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope(is_training=True) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc", "is provided. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224,", "conv_blocks as ops from nets.mobilenet import mobilenet from nets.mobilenet import", "self.assertSameElements(s, [32, 192, 128, 1001]) def testFineGrained(self): tf.reset_default_graph() # Verifies", "# if no default min_depth is provided. mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10,", "conv_defs=spec) num_convs = len(find_ops('Conv2D')) # All but 3 op has", "def testCreationNoClasses(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF) net, ep = mobilenet.mobilenet( tf.placeholder(tf.float32,", "under the License. # ============================================================================== \"\"\"Tests for mobilenet_v2.\"\"\" from __future__", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def testWithOutputStride8AndExplicitPadding(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base(", "= [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s = set(s) self.assertSameElements(s,", "mobilenet.training_scope(is_training=False) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope(is_training=True) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc =", "self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope() self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) if __name__ ==", "use_explicit_padding=True) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self): sc = mobilenet.training_scope(is_training=None) self.assertNotIn('is_training',", "division from __future__ import print_function import copy import tensorflow as", "[28, 28]) def testWithOutputStride16AndExplicitPadding(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32,", "= mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec, num_classes=None) self.assertIs(net,", "absolute_import from __future__ import division from __future__ import print_function import", "if no default min_depth is provided. mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224,", "(10, 224, 224, 16)), conv_defs=spec) num_convs = len(find_ops('Conv2D')) # This", "but 3 op has 3 conv operatore, the remainign 3", "\"License\"); # you may not use this file except in", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "192, 128, 1001]) def testFineGrained(self): tf.reset_default_graph() # Verifies that depth_multiplier", "def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self): sc = mobilenet.training_scope(is_training=False) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope(is_training=True)", "and last one have two convolutions, and there is one", "# distributed under the License is distributed on an \"AS", "setUp(self): tf.reset_default_graph() def testCreation(self): spec = dict(mobilenet_v2.V2_DEF) _, ep =", "# Unless required by applicable law or agreed to in", "mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec) num_convs = len(find_ops('Conv2D'))", "3 conv operatore, the remainign 3 have one # and", "that depthwise are exposed. for i in range(2, 17): self.assertIn('layer_%d/depthwise_output'", "ep = mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, input_size, input_size, 3))) self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3], [output_size]", "spec = copy.deepcopy(mobilenet_v2.V2_DEF) spec['overrides'] = { (ops.expanded_conv,): dict(split_expansion=2), } _,", "last one. self.assertSameElements(s, [8, 48, 1001, 1280]) def testMobilenetBase(self): tf.reset_default_graph()", "and # limitations under the License. # ============================================================================== \"\"\"Tests for", "self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def testWithOutputStride16AndExplicitPadding(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base(", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "find_ops('Conv2D')] s = set(s) self.assertSameElements([32, 64, 96, 160, 192, 320,", "14]) def testWithOutputStride8AndExplicitPadding(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10,", "min_depth=32) s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s =", "224, 224, 16)), conv_defs=spec) num_convs = len(find_ops('Conv2D')) # All but", "You may obtain a copy of the License at #", "reason for these particular # constants. # # All but", "224, 16)), conv_defs=spec) num_convs = len(find_ops('Conv2D')) # All but 3", "224, 16)), conv_defs=spec) num_convs = len(find_ops('Conv2D')) # This is mostly", "7, 7, 128]) def testWithOutputStride16(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base(", "i in range(2, 17): self.assertIn('layer_%d/depthwise_output' % i, ep) def testCreationNoClasses(self):", "s = set(s) self.assertSameElements(s, [32, 192, 128, 1001]) def testFineGrained(self):", "scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def testWithOutputStride16AndExplicitPadding(self): tf.reset_default_graph() out, _ =", "7), (192, 6), (160, 5), (128, 4), (96, 3)]: tf.reset_default_graph()", "the Apache License, Version 2.0 (the \"License\"); # you may", "5), (128, 4), (96, 3)]: tf.reset_default_graph() _, ep = mobilenet_v2.mobilenet(", "tf.reset_default_graph() _, ep = mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, input_size, input_size, 3)))" ]
[ "gist') gh = login(token=gh_token) gist = gh.gist(gh_gist) old_content = \"\"", "json import logging from boto.s3.connection import S3Connection from boto.s3.key import", "= login(token=gh_token) gist = gh.gist(gh_gist) old_content = \"\" for f", "firebase import os import datetime import json import logging from", "import firebase import os import datetime import json import logging", "data = f.get(firebase_path, None) new_content = json.dumps(data, ensure_ascii=False, indent=2, sort_keys=True)", "= connect_firebase() data = f.get(firebase_path, None) new_content = json.dumps(data, ensure_ascii=False,", "old_content = f.content break if old_content == new_content: logger.info('No changes", "import datetime import json import logging from boto.s3.connection import S3Connection", "\"\" for f in gist.iter_files(): if f.filename == gh_fname: old_content", "return f logger.info('==================================') logger.info('Fetching firebase data') f = connect_firebase() data", "changes detected') else: logger.info('Updating gist with new content') gist.edit(files={ gh_fname:", "from github3 import login firebase_url = os.environ['FIREBASE_DB'] firebase_secret = os.environ['FIREBASE_SECRET']", "logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def connect_firebase(): f = firebase.FirebaseApplication(firebase_url, None)", "checked ATM gh_token = os.environ['GH_TOKEN'] gh_gist = os.environ['GH_GIST'] gh_fname =", "= f.content break if old_content == new_content: logger.info('No changes detected')", "old_content == new_content: logger.info('No changes detected') else: logger.info('Updating gist with", "data') f = connect_firebase() data = f.get(firebase_path, None) new_content =", "os.environ['GH_GIST'] gh_fname = os.environ['GH_FNAME'] logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def connect_firebase():", "firebase data') f = connect_firebase() data = f.get(firebase_path, None) new_content", "os.environ['GH_TOKEN'] gh_gist = os.environ['GH_GIST'] gh_fname = os.environ['GH_FNAME'] logging.basicConfig(level=logging.INFO) logger =", "os.environ['FIREBASE_PATH'] firebase_username = os.environ['FIREBASE_USERNAME'] # not checked ATM gh_token =", "= logging.getLogger(__name__) def connect_firebase(): f = firebase.FirebaseApplication(firebase_url, None) f.authentication =", "= os.environ['GH_GIST'] gh_fname = os.environ['GH_FNAME'] logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def", "= f.get(firebase_path, None) new_content = json.dumps(data, ensure_ascii=False, indent=2, sort_keys=True) logger.info('Reading", "gh_token = os.environ['GH_TOKEN'] gh_gist = os.environ['GH_GIST'] gh_fname = os.environ['GH_FNAME'] logging.basicConfig(level=logging.INFO)", "new_content: logger.info('No changes detected') else: logger.info('Updating gist with new content')", "os.environ['FIREBASE_USERNAME'] # not checked ATM gh_token = os.environ['GH_TOKEN'] gh_gist =", "old_content = \"\" for f in gist.iter_files(): if f.filename ==", "# not checked ATM gh_token = os.environ['GH_TOKEN'] gh_gist = os.environ['GH_GIST']", "S3Connection from boto.s3.key import Key from github3 import login firebase_url", "firebase.FirebaseAuthentication(firebase_secret, firebase_username, admin=True) return f logger.info('==================================') logger.info('Fetching firebase data') f", "gist.iter_files(): if f.filename == gh_fname: old_content = f.content break if", "from firebase import firebase import os import datetime import json", "= os.environ['FIREBASE_PATH'] firebase_username = os.environ['FIREBASE_USERNAME'] # not checked ATM gh_token", "import json import logging from boto.s3.connection import S3Connection from boto.s3.key", "= firebase.FirebaseApplication(firebase_url, None) f.authentication = firebase.FirebaseAuthentication(firebase_secret, firebase_username, admin=True) return f", "logger.info('Reading existing gist') gh = login(token=gh_token) gist = gh.gist(gh_gist) old_content", "datetime import json import logging from boto.s3.connection import S3Connection from", "firebase_secret = os.environ['FIREBASE_SECRET'] firebase_path = os.environ['FIREBASE_PATH'] firebase_username = os.environ['FIREBASE_USERNAME'] #", "f = connect_firebase() data = f.get(firebase_path, None) new_content = json.dumps(data,", "with new content') gist.edit(files={ gh_fname: { \"content\": new_content } })", "def connect_firebase(): f = firebase.FirebaseApplication(firebase_url, None) f.authentication = firebase.FirebaseAuthentication(firebase_secret, firebase_username,", "logger.info('No changes detected') else: logger.info('Updating gist with new content') gist.edit(files={", "firebase_username = os.environ['FIREBASE_USERNAME'] # not checked ATM gh_token = os.environ['GH_TOKEN']", "github3 import login firebase_url = os.environ['FIREBASE_DB'] firebase_secret = os.environ['FIREBASE_SECRET'] firebase_path", "break if old_content == new_content: logger.info('No changes detected') else: logger.info('Updating", "boto.s3.key import Key from github3 import login firebase_url = os.environ['FIREBASE_DB']", "from boto.s3.connection import S3Connection from boto.s3.key import Key from github3", "os.environ['FIREBASE_SECRET'] firebase_path = os.environ['FIREBASE_PATH'] firebase_username = os.environ['FIREBASE_USERNAME'] # not checked", "admin=True) return f logger.info('==================================') logger.info('Fetching firebase data') f = connect_firebase()", "gh = login(token=gh_token) gist = gh.gist(gh_gist) old_content = \"\" for", "import logging from boto.s3.connection import S3Connection from boto.s3.key import Key", "import login firebase_url = os.environ['FIREBASE_DB'] firebase_secret = os.environ['FIREBASE_SECRET'] firebase_path =", "f logger.info('==================================') logger.info('Fetching firebase data') f = connect_firebase() data =", "firebase import firebase import os import datetime import json import", "ATM gh_token = os.environ['GH_TOKEN'] gh_gist = os.environ['GH_GIST'] gh_fname = os.environ['GH_FNAME']", "boto.s3.connection import S3Connection from boto.s3.key import Key from github3 import", "connect_firebase(): f = firebase.FirebaseApplication(firebase_url, None) f.authentication = firebase.FirebaseAuthentication(firebase_secret, firebase_username, admin=True)", "f.get(firebase_path, None) new_content = json.dumps(data, ensure_ascii=False, indent=2, sort_keys=True) logger.info('Reading existing", "firebase_username, admin=True) return f logger.info('==================================') logger.info('Fetching firebase data') f =", "from boto.s3.key import Key from github3 import login firebase_url =", "os.environ['FIREBASE_DB'] firebase_secret = os.environ['FIREBASE_SECRET'] firebase_path = os.environ['FIREBASE_PATH'] firebase_username = os.environ['FIREBASE_USERNAME']", "connect_firebase() data = f.get(firebase_path, None) new_content = json.dumps(data, ensure_ascii=False, indent=2,", "= gh.gist(gh_gist) old_content = \"\" for f in gist.iter_files(): if", "Key from github3 import login firebase_url = os.environ['FIREBASE_DB'] firebase_secret =", "firebase_url = os.environ['FIREBASE_DB'] firebase_secret = os.environ['FIREBASE_SECRET'] firebase_path = os.environ['FIREBASE_PATH'] firebase_username", "None) f.authentication = firebase.FirebaseAuthentication(firebase_secret, firebase_username, admin=True) return f logger.info('==================================') logger.info('Fetching", "== new_content: logger.info('No changes detected') else: logger.info('Updating gist with new", "gh_fname = os.environ['GH_FNAME'] logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def connect_firebase(): f", "logger = logging.getLogger(__name__) def connect_firebase(): f = firebase.FirebaseApplication(firebase_url, None) f.authentication", "logger.info('Fetching firebase data') f = connect_firebase() data = f.get(firebase_path, None)", "gh_gist = os.environ['GH_GIST'] gh_fname = os.environ['GH_FNAME'] logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__)", "= os.environ['GH_FNAME'] logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def connect_firebase(): f =", "= json.dumps(data, ensure_ascii=False, indent=2, sort_keys=True) logger.info('Reading existing gist') gh =", "logger.info('Updating gist with new content') gist.edit(files={ gh_fname: { \"content\": new_content", "logging.getLogger(__name__) def connect_firebase(): f = firebase.FirebaseApplication(firebase_url, None) f.authentication = firebase.FirebaseAuthentication(firebase_secret,", "existing gist') gh = login(token=gh_token) gist = gh.gist(gh_gist) old_content =", "f in gist.iter_files(): if f.filename == gh_fname: old_content = f.content", "f.filename == gh_fname: old_content = f.content break if old_content ==", "indent=2, sort_keys=True) logger.info('Reading existing gist') gh = login(token=gh_token) gist =", "= os.environ['FIREBASE_DB'] firebase_secret = os.environ['FIREBASE_SECRET'] firebase_path = os.environ['FIREBASE_PATH'] firebase_username =", "f.authentication = firebase.FirebaseAuthentication(firebase_secret, firebase_username, admin=True) return f logger.info('==================================') logger.info('Fetching firebase", "sort_keys=True) logger.info('Reading existing gist') gh = login(token=gh_token) gist = gh.gist(gh_gist)", "firebase_path = os.environ['FIREBASE_PATH'] firebase_username = os.environ['FIREBASE_USERNAME'] # not checked ATM", "for f in gist.iter_files(): if f.filename == gh_fname: old_content =", "None) new_content = json.dumps(data, ensure_ascii=False, indent=2, sort_keys=True) logger.info('Reading existing gist')", "if old_content == new_content: logger.info('No changes detected') else: logger.info('Updating gist", "new content') gist.edit(files={ gh_fname: { \"content\": new_content } }) logger.info('Done.')", "ensure_ascii=False, indent=2, sort_keys=True) logger.info('Reading existing gist') gh = login(token=gh_token) gist", "gh.gist(gh_gist) old_content = \"\" for f in gist.iter_files(): if f.filename", "if f.filename == gh_fname: old_content = f.content break if old_content", "= \"\" for f in gist.iter_files(): if f.filename == gh_fname:", "gist = gh.gist(gh_gist) old_content = \"\" for f in gist.iter_files():", "login(token=gh_token) gist = gh.gist(gh_gist) old_content = \"\" for f in", "login firebase_url = os.environ['FIREBASE_DB'] firebase_secret = os.environ['FIREBASE_SECRET'] firebase_path = os.environ['FIREBASE_PATH']", "= os.environ['FIREBASE_USERNAME'] # not checked ATM gh_token = os.environ['GH_TOKEN'] gh_gist", "else: logger.info('Updating gist with new content') gist.edit(files={ gh_fname: { \"content\":", "logging from boto.s3.connection import S3Connection from boto.s3.key import Key from", "new_content = json.dumps(data, ensure_ascii=False, indent=2, sort_keys=True) logger.info('Reading existing gist') gh", "import os import datetime import json import logging from boto.s3.connection", "detected') else: logger.info('Updating gist with new content') gist.edit(files={ gh_fname: {", "os import datetime import json import logging from boto.s3.connection import", "f = firebase.FirebaseApplication(firebase_url, None) f.authentication = firebase.FirebaseAuthentication(firebase_secret, firebase_username, admin=True) return", "not checked ATM gh_token = os.environ['GH_TOKEN'] gh_gist = os.environ['GH_GIST'] gh_fname", "import S3Connection from boto.s3.key import Key from github3 import login", "logger.info('==================================') logger.info('Fetching firebase data') f = connect_firebase() data = f.get(firebase_path,", "gh_fname: old_content = f.content break if old_content == new_content: logger.info('No", "json.dumps(data, ensure_ascii=False, indent=2, sort_keys=True) logger.info('Reading existing gist') gh = login(token=gh_token)", "gist with new content') gist.edit(files={ gh_fname: { \"content\": new_content }", "in gist.iter_files(): if f.filename == gh_fname: old_content = f.content break", "firebase.FirebaseApplication(firebase_url, None) f.authentication = firebase.FirebaseAuthentication(firebase_secret, firebase_username, admin=True) return f logger.info('==================================')", "== gh_fname: old_content = f.content break if old_content == new_content:", "import Key from github3 import login firebase_url = os.environ['FIREBASE_DB'] firebase_secret", "f.content break if old_content == new_content: logger.info('No changes detected') else:", "= os.environ['FIREBASE_SECRET'] firebase_path = os.environ['FIREBASE_PATH'] firebase_username = os.environ['FIREBASE_USERNAME'] # not", "= firebase.FirebaseAuthentication(firebase_secret, firebase_username, admin=True) return f logger.info('==================================') logger.info('Fetching firebase data')", "= os.environ['GH_TOKEN'] gh_gist = os.environ['GH_GIST'] gh_fname = os.environ['GH_FNAME'] logging.basicConfig(level=logging.INFO) logger", "os.environ['GH_FNAME'] logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def connect_firebase(): f = firebase.FirebaseApplication(firebase_url," ]
[ "hitP(f, R, t, r, g): if f>=g/2 : return 0.0", "i in range(numTestCases): f, R, t, r, g = list(map(float,", "g = list(map(float, input().split())) p = hitP(f, R, t, r,", "(nGrids * missGridSideLength)**2 remainMissArea = 0 if remain - 2*r", "return (totalArea - missArea) / (R-t)**2 def main(): numTestCases =", "= 0 if remain - 2*r > 2*f if remain", "R, t, r, g) print( \"Case #%d: %.6f\" %(i+1, p))", "missArea += (nGrids * missGridSideLength)**2 remainMissArea = 0 if remain", "// gridL missGridSideLength = g - 2*f print(\"gridL %.12f; nGrids", "numTestCases = int(input()) for i in range(numTestCases): f, R, t,", "missGridSideLength)**2 remainMissArea = 0 if remain - 2*r > 2*f", "%(missR**2, (R-t)**2) ) return (totalArea - missArea) / (R-t)**2 def", "hitP(f, R, t, r, g) print( \"Case #%d: %.6f\" %(i+1,", "= g+2*r nGrids = (R - t) // gridL missGridSideLength", "totalArea = R**2 / 4.0 print( \"missed a %.12f, total", "g+2*r nGrids = (R - t) // gridL missGridSideLength =", "if f>=g/2 : return 0.0 missArea = 0.0 gridL =", "list(map(float, input().split())) p = hitP(f, R, t, r, g) print(", "= (R - t) - indentSquareLength missArea += (nGrids *", "= R**2 / 4.0 print( \"missed a %.12f, total area", "t) - indentSquareLength missArea += (nGrids * missGridSideLength)**2 remainMissArea =", "R**2 / 4.0 print( \"missed a %.12f, total area %.12f\"", "%d\" %(gridL, nGrids) ) indentSquareLength = nGrids*gridL remain = (R", "R, t, r, g): if f>=g/2 : return 0.0 missArea", "if remain - 2*r > 2*f if remain > g+r:", "/ 4.0 print( \"missed a %.12f, total area %.12f\" %(missR**2,", "gridL = g+2*r nGrids = (R - t) // gridL", "> g+r: totalArea = R**2 / 4.0 print( \"missed a", "0.0 gridL = g+2*r nGrids = (R - t) //", "%(gridL, nGrids) ) indentSquareLength = nGrids*gridL remain = (R -", "def main(): numTestCases = int(input()) for i in range(numTestCases): f,", "return 0.0 missArea = 0.0 gridL = g+2*r nGrids =", "nGrids*gridL remain = (R - t) - indentSquareLength missArea +=", "t) // gridL missGridSideLength = g - 2*f print(\"gridL %.12f;", "= list(map(float, input().split())) p = hitP(f, R, t, r, g)", "input().split())) p = hitP(f, R, t, r, g) print( \"Case", "t, r, g) print( \"Case #%d: %.6f\" %(i+1, p)) if", "a %.12f, total area %.12f\" %(missR**2, (R-t)**2) ) return (totalArea", "g) print( \"Case #%d: %.6f\" %(i+1, p)) if __name__ ==", "remainMissArea = 0 if remain - 2*r > 2*f if", "area %.12f\" %(missR**2, (R-t)**2) ) return (totalArea - missArea) /", "* missGridSideLength)**2 remainMissArea = 0 if remain - 2*r >", "f, R, t, r, g = list(map(float, input().split())) p =", "def hitP(f, R, t, r, g): if f>=g/2 : return", "t, r, g): if f>=g/2 : return 0.0 missArea =", "= (R - t) // gridL missGridSideLength = g -", "(totalArea - missArea) / (R-t)**2 def main(): numTestCases = int(input())", "if remain > g+r: totalArea = R**2 / 4.0 print(", "4.0 print( \"missed a %.12f, total area %.12f\" %(missR**2, (R-t)**2)", "r, g = list(map(float, input().split())) p = hitP(f, R, t,", "True def hitP(f, R, t, r, g): if f>=g/2 :", "= int(input()) for i in range(numTestCases): f, R, t, r,", "missGridSideLength = g - 2*f print(\"gridL %.12f; nGrids %d\" %(gridL,", "(R-t)**2 def main(): numTestCases = int(input()) for i in range(numTestCases):", "f>=g/2 : return 0.0 missArea = 0.0 gridL = g+2*r", "- indentSquareLength missArea += (nGrids * missGridSideLength)**2 remainMissArea = 0", "2*r > 2*f if remain > g+r: totalArea = R**2", "g+r: totalArea = R**2 / 4.0 print( \"missed a %.12f,", "sys sys.dont_write_bytecode = True def hitP(f, R, t, r, g):", "= g - 2*f print(\"gridL %.12f; nGrids %d\" %(gridL, nGrids)", "0.0 missArea = 0.0 gridL = g+2*r nGrids = (R", "(R - t) // gridL missGridSideLength = g - 2*f", "R, t, r, g = list(map(float, input().split())) p = hitP(f,", "t, r, g = list(map(float, input().split())) p = hitP(f, R,", "\"Case #%d: %.6f\" %(i+1, p)) if __name__ == '__main__': main()", "range(numTestCases): f, R, t, r, g = list(map(float, input().split())) p", "0 if remain - 2*r > 2*f if remain >", "total area %.12f\" %(missR**2, (R-t)**2) ) return (totalArea - missArea)", "%.12f; nGrids %d\" %(gridL, nGrids) ) indentSquareLength = nGrids*gridL remain", "- t) - indentSquareLength missArea += (nGrids * missGridSideLength)**2 remainMissArea", "missArea = 0.0 gridL = g+2*r nGrids = (R -", "main(): numTestCases = int(input()) for i in range(numTestCases): f, R,", "- missArea) / (R-t)**2 def main(): numTestCases = int(input()) for", ") indentSquareLength = nGrids*gridL remain = (R - t) -", "nGrids) ) indentSquareLength = nGrids*gridL remain = (R - t)", "2*f if remain > g+r: totalArea = R**2 / 4.0", "2*f print(\"gridL %.12f; nGrids %d\" %(gridL, nGrids) ) indentSquareLength =", "g): if f>=g/2 : return 0.0 missArea = 0.0 gridL", "= hitP(f, R, t, r, g) print( \"Case #%d: %.6f\"", "for i in range(numTestCases): f, R, t, r, g =", "r, g): if f>=g/2 : return 0.0 missArea = 0.0", "nGrids = (R - t) // gridL missGridSideLength = g", "/ (R-t)**2 def main(): numTestCases = int(input()) for i in", "= True def hitP(f, R, t, r, g): if f>=g/2", "remain - 2*r > 2*f if remain > g+r: totalArea", "%.12f, total area %.12f\" %(missR**2, (R-t)**2) ) return (totalArea -", "indentSquareLength = nGrids*gridL remain = (R - t) - indentSquareLength", ") return (totalArea - missArea) / (R-t)**2 def main(): numTestCases", "(R-t)**2) ) return (totalArea - missArea) / (R-t)**2 def main():", "nGrids %d\" %(gridL, nGrids) ) indentSquareLength = nGrids*gridL remain =", "import sys sys.dont_write_bytecode = True def hitP(f, R, t, r,", "missArea) / (R-t)**2 def main(): numTestCases = int(input()) for i", "> 2*f if remain > g+r: totalArea = R**2 /", "print( \"missed a %.12f, total area %.12f\" %(missR**2, (R-t)**2) )", "p = hitP(f, R, t, r, g) print( \"Case #%d:", "indentSquareLength missArea += (nGrids * missGridSideLength)**2 remainMissArea = 0 if", "int(input()) for i in range(numTestCases): f, R, t, r, g", "(R - t) - indentSquareLength missArea += (nGrids * missGridSideLength)**2", "r, g) print( \"Case #%d: %.6f\" %(i+1, p)) if __name__", "remain = (R - t) - indentSquareLength missArea += (nGrids", "- 2*f print(\"gridL %.12f; nGrids %d\" %(gridL, nGrids) ) indentSquareLength", "sys.dont_write_bytecode = True def hitP(f, R, t, r, g): if", "- t) // gridL missGridSideLength = g - 2*f print(\"gridL", "= 0.0 gridL = g+2*r nGrids = (R - t)", "%.12f\" %(missR**2, (R-t)**2) ) return (totalArea - missArea) / (R-t)**2", "+= (nGrids * missGridSideLength)**2 remainMissArea = 0 if remain -", ": return 0.0 missArea = 0.0 gridL = g+2*r nGrids", "in range(numTestCases): f, R, t, r, g = list(map(float, input().split()))", "gridL missGridSideLength = g - 2*f print(\"gridL %.12f; nGrids %d\"", "- 2*r > 2*f if remain > g+r: totalArea =", "remain > g+r: totalArea = R**2 / 4.0 print( \"missed", "= nGrids*gridL remain = (R - t) - indentSquareLength missArea", "\"missed a %.12f, total area %.12f\" %(missR**2, (R-t)**2) ) return", "print( \"Case #%d: %.6f\" %(i+1, p)) if __name__ == '__main__':", "print(\"gridL %.12f; nGrids %d\" %(gridL, nGrids) ) indentSquareLength = nGrids*gridL", "g - 2*f print(\"gridL %.12f; nGrids %d\" %(gridL, nGrids) )" ]
[ "end_token.copy_and_replace(keyname, new_key) return EventStreamResult(events, (from_token, end_token)) user_id_for_stream = user.to_string() if", "user_stream: _NotifierUserStream): self.user_to_user_stream[user_stream.user_id] = user_stream for room in user_stream.rooms: s", "2.0 (the \"License\"); # you may not use this file", "# Copyright 2014 - 2016 OpenMarket Ltd # # Licensed", "contents. So we invent an illegal user ID # (which", "return bool(self.events) @attr.s(slots=True, frozen=True) class _PendingRoomEventEntry: event_pos = attr.ib(type=PersistedEventPosition) extra_users", "expired_stream in expired_streams: expired_stream.remove(self) @log_function def _register_with_keys(self, user_stream: _NotifierUserStream): self.user_to_user_stream[user_stream.user_id]", "milliseconds. \"\"\" self.current_token = self.current_token.copy_and_advance(stream_key, stream_id) self.last_notified_token = self.current_token self.last_notified_ms", "a new event\"\"\" for cb in self.replication_callbacks: cb() def notify_remote_server_up(self,", "last token. if self.last_notified_token != token: return _NotificationListener(defer.succeed(self.current_token)) else: return", "defer.TimeoutError: log_kv({\"wait_for_events\": \"timeout\"}) break except defer.CancelledError: log_kv({\"wait_for_events\": \"cancelled\"}) break if", "\"result\": bool(result), } ) if result: break # Update the", "= ObservableDeferred(defer.Deferred()) noify_deferred.callback(self.current_token) def remove(self, notifier: \"Notifier\"): \"\"\"Remove this listener", "that comes before it. This gets updated every time we", "= set() # type: Set[str] for entry in pending: if", "= await self.state_handler.get_current_state( room_id, EventTypes.RoomHistoryVisibility, \"\" ) if state and", "time_now_ms: int, ): self.user_id = user_id self.rooms = set(rooms) self.current_token", "happened without waking up any of the normal user event", "entry in pending: if entry.event_pos.persisted_after(max_room_stream_token): self.pending_new_room_events.append(entry) else: if ( entry.type", "should be started and wrapped with run_as_background_process. \"\"\" self.replication_callbacks.append(cb) def", "state_key: Optional[str], membership: Optional[str], event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]]", "or if the timeout had # already expired. current_token =", "set(rooms) self.current_token = current_token # The last token for which", "try: stream_token = None if isinstance(new_token, int): stream_token = new_token", "Internally, the notifier keeps an event stream per user_id. #", "\"\"\"Notify the any replication listeners that there's a new event\"\"\"", "% ( explicit_room_id, user_id_for_stream, ) result = await self.wait_for_events( user_id_for_stream,", "that happened before this. \"\"\" # Immediately wake up stream", "self, user_id: str, timeout: int, callback: Callable[[StreamToken, StreamToken], Awaitable[T]], room_ids=None,", "events to happen before returning. If explicit_room_id is not set,", "under the License. import logging from collections import namedtuple from", "which is likely once per minute at # most when", "noify_deferred.callback(self.current_token) def remove(self, notifier: \"Notifier\"): \"\"\"Remove this listener from all", "import EventBase from synapse.handlers.presence import format_user_presence_state from synapse.logging.context import PreserveLoggingContext", "None: \"\"\"Used to inform replication listeners that something has happened", "bool: state = await self.state_handler.get_current_state( room_id, EventTypes.RoomHistoryVisibility, \"\" ) if", "replication self.replication_callbacks = [] # type: List[Callable[[], None]] # Called", "happened event wise. Will wake up all listeners for the", "remote server has come back up\"\"\" # We call federation_sender", "count_listeners) LaterGauge( \"synapse_notifier_rooms\", \"\", [], lambda: count(bool, list(self.room_to_user_streams.values())), ) LaterGauge(", "attr.ib(type=str) state_key = attr.ib(type=Optional[str]) membership = attr.ib(type=Optional[str]) class Notifier: \"\"\"This", "# has happened between the old prev_token and the current_token", "rooms. \"\"\" users = users or [] rooms = rooms", "# # I am sorry for what I have done.", "_PendingRoomEventEntry: event_pos = attr.ib(type=PersistedEventPosition) extra_users = attr.ib(type=Collection[UserID]) room_id = attr.ib(type=str)", "_notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: self._pusher_pool.on_new_notifications(max_room_stream_token) except Exception: logger.exception(\"Error pusher pool", "License for the specific language governing permissions and # limitations", "getattr(after_token, keyname) if before_id == after_id: continue new_events, new_key =", "return sum(stream.count_listeners() for stream in all_user_streams) LaterGauge(\"synapse_notifier_listeners\", \"\", [], count_listeners)", "async def wait_for_events( self, user_id: str, timeout: int, callback: Callable[[StreamToken,", "any new events for them. If there are no new", "membership=event.content.get(\"membership\"), max_room_stream_token=max_room_stream_token, extra_users=extra_users or [], ) def on_new_room_event_args( self, room_id:", "token: StreamToken) -> _NotificationListener: \"\"\"Returns a deferred that is resolved", "Update the prev_token to the current_token since nothing # has", "new id for the stream the event came from. time_now_ms:", "import defer import synapse.server from synapse.api.constants import EventTypes, HistoryVisibility, Membership", "to the current_token since nothing # has happened between the", "if end_time <= now: break # Now we wait for", "client connection to the events stream. The events stream handler", "type: List[Callable[[], None]] # Called when remote servers have come", "in pending: if entry.event_pos.persisted_after(max_room_stream_token): self.pending_new_room_events.append(entry) else: if ( entry.type ==", "per minute at # most when scraping it. def count_listeners():", "users) for keying peeking # over /events. # # I", "self.storage = hs.get_storage() self.event_sources = hs.get_event_sources() self.store = hs.get_datastore() self.pending_new_room_events", "wake up any listeners that are listening to the room,", "self, user: UserID, explicit_room_id: Optional[str] ) -> Tuple[Collection[str], bool]: joined_room_ids", "event\") def on_new_event( self, stream_key: str, new_token: Union[int, RoomStreamToken], users:", "ObservableDeferred(defer.Deferred()) def notify( self, stream_key: str, stream_id: Union[int, RoomStreamToken], time_now_ms:", "number of streams listening for events. This listener will also", "cannot clash with any real users) for keying peeking #", "room_streams.add(new_user_stream) new_user_stream.rooms.add(room_id) def notify_replication(self) -> None: \"\"\"Notify the any replication", "new_token, time_now_ms) except Exception: logger.exception(\"Failed to notify listener\") self.notify_replication() #", "if before_id == after_id: continue new_events, new_key = await source.get_new_events(", "list(self.room_to_user_streams.values()): all_user_streams |= streams for stream in list(self.user_to_user_stream.values()): all_user_streams.add(stream) return", "self, room_id: str, event_type: str, state_key: Optional[str], membership: Optional[str], event_pos:", "user: UserID, pagination_config: PaginationConfig, timeout: int, is_guest: bool = False,", "only executed # when rendering the metrics page, which is", "self.clock.time_msec() expired_streams = [] expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS for", "in state.content: return ( state.content[\"history_visibility\"] == HistoryVisibility.WORLD_READABLE ) else: return", "previous event to be persisted. Args: max_room_stream_token: The highest stream_id", "for that user. At a given point a user may", "is None: current_token = self.event_sources.get_current_token() if room_ids is None: room_ids", "am sorry for what I have done. user_id_for_stream = \"_PEEKING_%s_%s\"", "_register_with_keys(self, user_stream: _NotifierUserStream): self.user_to_user_stream[user_stream.user_id] = user_stream for room in user_stream.rooms:", "explicit_room_id is set, that room will be polled for events", "UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000 def __init__(self, hs:", "def remove(self, notifier: \"Notifier\"): \"\"\"Remove this listener from all the", "any listeners for the users in the `extra_users` param. The", "if pagination_config.from_token: from_token = pagination_config.from_token else: from_token = self.event_sources.get_current_token() limit", "\"notify\": self.user_id, \"stream\": stream_key, \"stream_id\": stream_id, \"listeners\": self.count_listeners(), } )", "count(func: Callable[[T], bool], it: Iterable[T]) -> int: \"\"\"Return the number", "= {} # type: Dict[str, _NotifierUserStream] self.room_to_user_streams = {} #", "the users in the `extra_users` param. The events can be", "len(rooms), } ) for user in users: user_stream = self.user_to_user_stream.get(str(user))", "token from before we have no idea whether they should", "room. \"\"\" if pagination_config.from_token: from_token = pagination_config.from_token else: from_token =", "= self.clock.time_msec() + timeout while not result: try: now =", "self.clock.time_msec() new_events[:] = [ { \"type\": \"m.presence\", \"content\": format_user_presence_state(event, now),", "pusher pool of event\") def on_new_event( self, stream_key: str, new_token:", "def __init__( self, user_id: str, rooms: Collection[str], current_token: StreamToken, time_now_ms:", "`on_new_room_event_args`.\"\"\" self.on_new_room_event_args( event_pos=event_pos, room_id=event.room_id, event_type=event.type, state_key=event.get(\"state_key\"), membership=event.content.get(\"membership\"), max_room_stream_token=max_room_stream_token, extra_users=extra_users or", "\"\", [], lambda: len(self.user_to_user_stream) ) def add_replication_callback(self, cb: Callable[[], None]):", "OF ANY KIND, either express or implied. # See the", "See the License for the specific language governing permissions and", "Collection[str], current_token: StreamToken, time_now_ms: int, ): self.user_id = user_id self.rooms", "to in writing, software # distributed under the License is", "keying peeking # over /events. # # I am sorry", "type=event_type, state_key=state_key, membership=membership, ) ) self._notify_pending_new_room_events(max_room_stream_token) self.notify_replication() def _notify_pending_new_room_events(self, max_room_stream_token:", "self.rooms = set(rooms) self.current_token = current_token # The last token", "streams. \"\"\" self.pending_new_room_events.append( _PendingRoomEventEntry( event_pos=event_pos, extra_users=extra_users or [], room_id=room_id, type=event_type,", "have a token from before we have no idea whether", "We want /events to be used for peeking independently of", "up to `timeout` milliseconds for any new events to happen", "== \"presence\": now = self.clock.time_msec() new_events[:] = [ { \"type\":", "or agreed to in writing, software # distributed under the", "up. self.last_notified_token = current_token self.last_notified_ms = time_now_ms with PreserveLoggingContext(): self.notify_deferred", "current_token=current_token, time_now_ms=self.clock.time_msec(), ) self._register_with_keys(user_stream) result = None prev_token = from_token", "stream per user_id. # This is used by both /sync", "= attr.ib(type=PersistedEventPosition) extra_users = attr.ib(type=Collection[UserID]) room_id = attr.ib(type=str) type =", "_NotifierUserStream( user_id=user_id, rooms=room_ids, current_token=current_token, time_now_ms=self.clock.time_msec(), ) self._register_with_keys(user_stream) result = None", "= user_id self.rooms = set(rooms) self.current_token = current_token # The", "\"\"\"Wait until the callback returns a non empty response or", "defer import synapse.server from synapse.api.constants import EventTypes, HistoryVisibility, Membership from", "+= 1 return n class _NotificationListener: \"\"\"This represents a single", "import attr from prometheus_client import Counter from twisted.internet import defer", "will be polled for events only if it is world", "user=user, from_key=getattr(from_token, keyname), limit=limit, is_guest=is_peeking, room_ids=room_ids, explicit_room_id=explicit_room_id, ) if name", "compliance with the License. # You may obtain a copy", "was no timeout or if the timeout had # already", "prometheus_client import Counter from twisted.internet import defer import synapse.server from", "wait_for_events( self, user_id: str, timeout: int, callback: Callable[[StreamToken, StreamToken], Awaitable[T]],", "/ 1000.0, self.hs.get_reactor(), ) with start_active_span(\"wait_for_events.deferred\"): log_kv( { \"wait_for_events\": \"sleep\",", "max_room_stream_token: RoomStreamToken): \"\"\"Poke services that might care that the room", "should *not* return a Deferred - if it needs to", "synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client logger = logging.getLogger(__name__)", "= self.clock.time_msec() if end_time <= now: break # Now we", "At a given point a user may have a number", "timeout: int, is_guest: bool = False, explicit_room_id: Optional[str] = None,", "{} # type: Dict[str, _NotifierUserStream] self.room_to_user_streams = {} # type:", "not use this file except in compliance with the License.", "remove(self, notifier: \"Notifier\"): \"\"\"Remove this listener from all the indexes", "import logging from collections import namedtuple from typing import (", "indexes in the Notifier class. \"\"\" def __init__( self, user_id:", "Optional[Collection[Union[str, UserID]]] = None, ): try: stream_token = None if", ") self._register_with_keys(user_stream) result = None prev_token = from_token if timeout:", "listener.deferred, (end_time - now) / 1000.0, self.hs.get_reactor(), ) with start_active_span(\"wait_for_events.deferred\"):", "state_key = attr.ib(type=Optional[str]) membership = attr.ib(type=Optional[str]) class Notifier: \"\"\"This class", "you may not use this file except in compliance with", "wait for the _NotifierUserStream to be told there # is", "after_id = getattr(after_token, keyname) if before_id == after_id: continue new_events,", "it: Iterable[T]) -> int: \"\"\"Return the number of items in", "None, ): \"\"\"Used to inform listeners that something has happened", "current_token) return result async def get_events_for( self, user: UserID, pagination_config:", "except defer.TimeoutError: log_kv({\"wait_for_events\": \"timeout\"}) break except defer.CancelledError: log_kv({\"wait_for_events\": \"cancelled\"}) break", "EventStreamResult(namedtuple(\"EventStreamResult\", (\"events\", \"tokens\"))): def __bool__(self): return bool(self.events) @attr.s(slots=True, frozen=True) class", "be peristed out of order. The notifier will wait until", "Notifier class. \"\"\" def __init__( self, user_id: str, rooms: Collection[str],", "= hs.get_pusherpool() self.federation_sender = None if hs.should_send_federation(): self.federation_sender = hs.get_federation_sender()", "set() log_kv( { \"waking_up_explicit_users\": len(users), \"waking_up_explicit_rooms\": len(rooms), } ) for", "Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union, ) import", "events for them. If there are no new events wait", "with any real users) for keying peeking # over /events.", "self.pending_new_room_events self.pending_new_room_events = [] users = set() # type: Set[UserID]", "this listener from all the indexes in the Notifier it", "[\"deferred\"] def __init__(self, deferred): self.deferred = deferred class _NotifierUserStream: \"\"\"This", "reference to it and b) it introduces # circular dependencies.", "n += 1 return n class _NotificationListener: \"\"\"This represents a", "OpenMarket Ltd # # Licensed under the Apache License, Version", "to stream over replication self.replication_callbacks = [] # type: List[Callable[[],", "event\") def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: self._pusher_pool.on_new_notifications(max_room_stream_token) except Exception: logger.exception(\"Error", "than registering as a # callback as a) we already", "of a new event from an event source. Args: stream_key:", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "= [] # type: List[Callable[[str], None]] self.clock = hs.get_clock() self.appservice_handler", "executed # when rendering the metrics page, which is likely", "\"\", [], lambda: count(bool, list(self.room_to_user_streams.values())), ) LaterGauge( \"synapse_notifier_users\", \"\", [],", "def on_new_event( self, stream_key: str, new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str,", "self._is_world_readable(explicit_room_id): return [explicit_room_id], False raise AuthError(403, \"Non-joined access not allowed\")", "# woken up or not, so lets just wake them", "\"\"\"Poke services that might care that the room position has", "int): stream_token = new_token self.appservice_handler.notify_interested_services_ephemeral( stream_key, stream_token, users or []", "= None, ) -> EventStreamResult: \"\"\"For the given user and", "async def _is_world_readable(self, room_id: str) -> bool: state = await", "current time in milliseconds. \"\"\" self.current_token = self.current_token.copy_and_advance(stream_key, stream_id) self.last_notified_token", ") T = TypeVar(\"T\") # TODO(paul): Should be shared somewhere", "time_now_ms = self.clock.time_msec() for user_stream in user_streams: try: user_stream.notify(stream_key, new_token,", "self.hs = hs self.storage = hs.get_storage() self.event_sources = hs.get_event_sources() self.store", "\"synapse_notifier_users_woken_by_stream\", \"\", [\"stream\"] ) T = TypeVar(\"T\") # TODO(paul): Should", "stream. It tracks the most recent stream token for that", "[] # type: List[Callable[[], None]] # Called when remote servers", "stream_key: The stream the event came from. stream_id: The new", "= hs.get_event_sources() self.store = hs.get_datastore() self.pending_new_room_events = [] # type:", "= TypeVar(\"T\") # TODO(paul): Should be shared somewhere def count(func:", "whether they should be # woken up or not, so", "the metrics page, which is likely once per minute at", "= \"%s_key\" % name before_id = getattr(before_token, keyname) after_id =", "keep track of which rooms it is listening in so", "there is a new token greater than the given token.", "has joined the room. \"\"\" if pagination_config.from_token: from_token = pagination_config.from_token", "user_stream = self.user_to_user_stream.get(str(user)) if user_stream is not None: user_streams.add(user_stream) for", "return len(self.notify_deferred.observers()) def new_listener(self, token: StreamToken) -> _NotificationListener: \"\"\"Returns a", "a number of streams listening for events. This listener will", "it at the current token since if we get any", "track of which rooms it is listening in so that", "Counter(\"synapse_notifier_notified_events\", \"\") users_woken_by_stream_counter = Counter( \"synapse_notifier_users_woken_by_stream\", \"\", [\"stream\"] ) T", "try: self._pusher_pool.on_new_notifications(max_room_stream_token) except Exception: logger.exception(\"Error pusher pool of event\") def", "= await source.get_new_events( user=user, from_key=getattr(from_token, keyname), limit=limit, is_guest=is_peeking, room_ids=room_ids, explicit_room_id=explicit_room_id,", "self.notify_replication() def _notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken): \"\"\"Notify for the room events", "List[Callable[[str], None]] self.clock = hs.get_clock() self.appservice_handler = hs.get_application_service_handler() self._pusher_pool =", "Exception: logger.exception(\"Error notifying application services of event\") def _notify_app_services_ephemeral( self,", "bool]: joined_room_ids = await self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if explicit_room_id in", "governing permissions and # limitations under the License. import logging", "be polled for events. If explicit_room_id is set, that room", "per user_id. # This is used by both /sync and", "arguments. It should *not* return a Deferred - if it", "the `extra_users` param. The events can be peristed out of", "before_token: return EventStreamResult([], (from_token, from_token)) events = [] # type:", "listener\") self.notify_replication() # Notify appservices self._notify_app_services_ephemeral( stream_key, new_token, users, )", "the deferred. \"\"\" __slots__ = [\"deferred\"] def __init__(self, deferred): self.deferred", "for x in it: if func(x): n += 1 return", "and any listeners for the users in the `extra_users` param.", "[] with Measure(self.clock, \"on_new_event\"): user_streams = set() log_kv( { \"waking_up_explicit_users\":", "\"\"\" self.current_token = self.current_token.copy_and_advance(stream_key, stream_id) self.last_notified_token = self.current_token self.last_notified_ms =", "self.user_to_user_stream.get(str(user)) if user_stream is not None: user_streams.add(user_stream) for room in", "end_time = self.clock.time_msec() + timeout while not result: try: now", "self.notify_deferred = ObservableDeferred(defer.Deferred()) noify_deferred.callback(self.current_token) def remove(self, notifier: \"Notifier\"): \"\"\"Remove this", "<= now: break # Now we wait for the _NotifierUserStream", "/events. # # I am sorry for what I have", "that might care that the room position has been updated.", "polled for events only if it is world readable or", "self.event_sources = hs.get_event_sources() self.store = hs.get_datastore() self.pending_new_room_events = [] #", "if room_ids is None: room_ids = await self.store.get_rooms_for_user(user_id) user_stream =", "from twisted.internet import defer import synapse.server from synapse.api.constants import EventTypes,", "persisted before notifying the client streams. \"\"\" self.pending_new_room_events.append( _PendingRoomEventEntry( event_pos=event_pos,", "for notifying any listeners when there are new events available", "( entry.type == EventTypes.Member and entry.membership == Membership.JOIN and entry.state_key", "after_id: continue new_events, new_key = await source.get_new_events( user=user, from_key=getattr(from_token, keyname),", "not set, the user's joined rooms will be polled for", "deferred, so to notify the handler it is sufficient to", "pool of event\") def on_new_event( self, stream_key: str, new_token: Union[int,", "self.room_to_user_streams.setdefault(room_id, set()) room_streams.add(new_user_stream) new_user_stream.rooms.add(room_id) def notify_replication(self) -> None: \"\"\"Notify the", "= await filter_events_for_client( self.storage, user.to_string(), new_events, is_peeking=is_peeking, ) elif name", "class _NotificationListener: \"\"\"This represents a single client connection to the", "to be used for peeking independently of /sync, # without", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "explicit_room_id: Optional[str] ) -> Tuple[Collection[str], bool]: joined_room_ids = await self.store.get_rooms_for_user(user.to_string())", "LaterGauge( \"synapse_notifier_rooms\", \"\", [], lambda: count(bool, list(self.room_to_user_streams.values())), ) LaterGauge( \"synapse_notifier_users\",", "expired. current_token = user_stream.current_token result = await callback(prev_token, current_token) return", "} for event in new_events ] events.extend(new_events) end_token = end_token.copy_and_replace(keyname,", "any new events to happen before returning. If explicit_room_id is", "having been # down. self.remote_server_up_callbacks = [] # type: List[Callable[[str],", "any listeners for this user of a new event from", "in user_stream.rooms: s = self.room_to_user_streams.setdefault(room, set()) s.add(user_stream) def _user_joined_room(self, user_id:", "as a) we already have a reference to it and", "rooms, return any new events for them. If there are", "events available for it. Primarily used from the /events stream.", "when scraping it. def count_listeners(): all_user_streams = set() # type:", "new things to stream over replication self.replication_callbacks = [] #", "the event came from. time_now_ms: The current time in milliseconds.", "rooms: user_streams |= self.room_to_user_streams.get(room, set()) time_now_ms = self.clock.time_msec() for user_stream", "self.store.get_rooms_for_user(user_id) user_stream = _NotifierUserStream( user_id=user_id, rooms=room_ids, current_token=current_token, time_now_ms=self.clock.time_msec(), ) self._register_with_keys(user_stream)", "is_peeking=is_peeking, ) elif name == \"presence\": now = self.clock.time_msec() new_events[:]", "__slots__ = [\"deferred\"] def __init__(self, deferred): self.deferred = deferred class", "events. This listener will also keep track of which rooms", "of /sync, # without polluting its contents. So we invent", "happened before this. \"\"\" # Immediately wake up stream if", "Union, ) import attr from prometheus_client import Counter from twisted.internet", "not None: user_streams.add(user_stream) for room in rooms: user_streams |= self.room_to_user_streams.get(room,", "This is used by both /sync and /events. # We", "self.pending_new_room_events = [] users = set() # type: Set[UserID] rooms", "file except in compliance with the License. # You may", "self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if explicit_room_id in joined_room_ids: return [explicit_room_id], True", "attr.ib(type=Optional[str]) class Notifier: \"\"\"This class is responsible for notifying any", "to the room, and any listeners for the users in", "self._notify_app_services(max_room_stream_token) self._notify_pusher_pool(max_room_stream_token) if self.federation_sender: self.federation_sender.notify_new_events(max_room_stream_token) def _notify_app_services(self, max_room_stream_token: RoomStreamToken): try:", "should be # woken up or not, so lets just", "from synapse.metrics import LaterGauge from synapse.streams.config import PaginationConfig from synapse.types", "List[Callable[[], None]] # Called when remote servers have come back", "< expire_before_ts: expired_streams.append(stream) for expired_stream in expired_streams: expired_stream.remove(self) @log_function def", "and entry.membership == Membership.JOIN and entry.state_key ): self._user_joined_room(entry.state_key, entry.room_id) users.update(entry.extra_users)", "= [] # type: List[Callable[[], None]] # Called when remote", "stream_key, new_token, users, ) def on_new_replication_data(self) -> None: \"\"\"Used to", "event_type: str, state_key: Optional[str], membership: Optional[str], event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken,", "user ID # (which thus cannot clash with any real", "of the normal user event streams\"\"\" self.notify_replication() async def wait_for_events(", "come back online after having been # down. self.remote_server_up_callbacks =", "for entry in pending: if entry.event_pos.persisted_after(max_room_stream_token): self.pending_new_room_events.append(entry) else: if (", "type: List[EventBase] end_token = from_token for name, source in self.event_sources.sources.items():", "Dict[str, _NotifierUserStream] self.room_to_user_streams = {} # type: Dict[str, Set[_NotifierUserStream]] self.hs", "new_user_stream.rooms.add(room_id) def notify_replication(self) -> None: \"\"\"Notify the any replication listeners", "_NotifierUserStream] self.room_to_user_streams = {} # type: Dict[str, Set[_NotifierUserStream]] self.hs =", "it knows about. \"\"\" for room in self.rooms: lst =", "= _NotifierUserStream( user_id=user_id, rooms=room_ids, current_token=current_token, time_now_ms=self.clock.time_msec(), ) self._register_with_keys(user_stream) result =", "extra_users=extra_users or [], ) def on_new_room_event_args( self, room_id: str, event_type:", "= [] # type: List[_PendingRoomEventEntry] # Called when there are", "It tracks the most recent stream token for that user.", "rooms = rooms or [] with Measure(self.clock, \"on_new_event\"): user_streams =", "= user_stream.new_listener(prev_token) listener.deferred = timeout_deferred( listener.deferred, (end_time - now) /", "poked. # We start it at the current token since", "await self.wait_for_events( user_id_for_stream, timeout, check_for_updates, room_ids=room_ids, from_token=from_token, ) return result", "to be told there # is a new token. listener", "@log_function def remove_expired_streams(self) -> None: time_now_ms = self.clock.time_msec() expired_streams =", "set()) room_streams.add(new_user_stream) new_user_stream.rooms.add(room_id) def notify_replication(self) -> None: \"\"\"Notify the any", "language governing permissions and # limitations under the License. import", "= None if isinstance(new_token, int): stream_token = new_token self.appservice_handler.notify_interested_services_ephemeral( stream_key,", "current token since if we get any streams # that", "all listeners for the given users and rooms. \"\"\" users", "listening for events. This listener will also keep track of", "are listening to the room, and any listeners for the", "RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, ): try: stream_token =", "to the events stream. The events stream handler will have", "filter_events_for_client( self.storage, user.to_string(), new_events, is_peeking=is_peeking, ) elif name == \"presence\":", "order. The notifier will wait until all previous events have", "are new events available for it. Primarily used from the", "_NotificationListener: \"\"\"Returns a deferred that is resolved when there is", "Immediately wake up stream if something has already since happened", "KIND, either express or implied. # See the License for", "represents a user connected to the event stream. It tracks", "synapse.metrics import LaterGauge from synapse.streams.config import PaginationConfig from synapse.types import", "from synapse.handlers.presence import format_user_presence_state from synapse.logging.context import PreserveLoggingContext from synapse.logging.opentracing", "an event source. Args: stream_key: The stream the event came", "down. self.remote_server_up_callbacks = [] # type: List[Callable[[str], None]] self.clock =", "The new id for the stream the event came from.", "timeout while not result: try: now = self.clock.time_msec() if end_time", "\"\"\"Remove this listener from all the indexes in the Notifier", "is None: room_ids = await self.store.get_rooms_for_user(user_id) user_stream = _NotifierUserStream( user_id=user_id,", "stream over replication self.replication_callbacks = [] # type: List[Callable[[], None]]", "False, explicit_room_id: Optional[str] = None, ) -> EventStreamResult: \"\"\"For the", "the handler it is sufficient to resolve the deferred. \"\"\"", "None if hs.should_send_federation(): self.federation_sender = hs.get_federation_sender() self.state_handler = hs.get_state_handler() self.clock.looping_call(", ") def on_new_room_event_args( self, room_id: str, event_type: str, state_key: Optional[str],", "(the \"License\"); # you may not use this file except", "from_token)) events = [] # type: List[EventBase] end_token = from_token", "that is resolved when there is a new token greater", "event and calls `on_new_room_event_args`.\"\"\" self.on_new_room_event_args( event_pos=event_pos, room_id=event.room_id, event_type=event.type, state_key=event.get(\"state_key\"), membership=event.content.get(\"membership\"),", "# Notify appservices self._notify_app_services_ephemeral( stream_key, new_token, users, ) def on_new_replication_data(self)", "callback returns a non empty response or the timeout fires.", "from_token if timeout: end_time = self.clock.time_msec() + timeout while not", "name == \"room\": new_events = await filter_events_for_client( self.storage, user.to_string(), new_events,", "thus cannot clash with any real users) for keying peeking", "given point a user may have a number of streams", "\"timeout\"}) break except defer.CancelledError: log_kv({\"wait_for_events\": \"cancelled\"}) break if result is", "the given token. Args: token: The token from which we", "the indexes in the Notifier class. \"\"\" def __init__( self,", "set()) lst.discard(self) notifier.user_to_user_stream.pop(self.user_id) def count_listeners(self) -> int: return len(self.notify_deferred.observers()) def", "logging from collections import namedtuple from typing import ( Awaitable,", "self.clock.time_msec() for user_stream in user_streams: try: user_stream.notify(stream_key, new_token, time_now_ms) except", "# down. self.remote_server_up_callbacks = [] # type: List[Callable[[str], None]] self.clock", "self, event: EventBase, event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] =", "None, ) -> EventStreamResult: \"\"\"For the given user and rooms,", "= current_token # The last token for which we should", "once per minute at # most when scraping it. def", "notify_replication(self) -> None: \"\"\"Notify the any replication listeners that there's", "# # Unless required by applicable law or agreed to", "def count(func: Callable[[T], bool], it: Iterable[T]) -> int: \"\"\"Return the", "token greater than the given token. Args: token: The token", "len(users), \"waking_up_explicit_rooms\": len(rooms), } ) for user in users: user_stream", "queued waiting for a previous event to be persisted. Args:", "\"synapse_notifier_users\", \"\", [], lambda: len(self.user_to_user_stream) ) def add_replication_callback(self, cb: Callable[[],", "= from_token if timeout: end_time = self.clock.time_msec() + timeout while", "current_token self.last_notified_ms = time_now_ms with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) def", "# type: Set[UserID] rooms = set() # type: Set[str] for", "to be persisted. Args: max_room_stream_token: The highest stream_id below which", "RoomStreamToken): try: self._pusher_pool.on_new_notifications(max_room_stream_token) except Exception: logger.exception(\"Error pusher pool of event\")", "= user_stream.current_token result = await callback(prev_token, current_token) log_kv( { \"wait_for_events\":", "which func returns true.\"\"\" n = 0 for x in", "UserID, ) from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.metrics import", "from before we have no idea whether they should be", "wait until all previous events have been persisted before notifying", "implied. # See the License for the specific language governing", "it for which func returns true.\"\"\" n = 0 for", "- if it needs to do any asynchronous work, a", "now = self.clock.time_msec() new_events[:] = [ { \"type\": \"m.presence\", \"content\":", "i.e. we shouldn't notify for things that happened before this.", "room in rooms: user_streams |= self.room_to_user_streams.get(room, set()) time_now_ms = self.clock.time_msec()", "user_id_for_stream = user.to_string() if is_peeking: # Internally, the notifier keeps", "when rendering the metrics page, which is likely once per", "hs self.storage = hs.get_storage() self.event_sources = hs.get_event_sources() self.store = hs.get_datastore()", "if it is world readable or the user has joined", "-> None: time_now_ms = self.clock.time_msec() expired_streams = [] expire_before_ts =", "yielded to the deferred, so to notify the handler it", "be told there # is a new token. listener =", "it needs to do any asynchronous work, a background thread", "about. \"\"\" for room in self.rooms: lst = notifier.room_to_user_streams.get(room, set())", "new_events ] events.extend(new_events) end_token = end_token.copy_and_replace(keyname, new_key) return EventStreamResult(events, (from_token,", "token. listener = user_stream.new_listener(prev_token) listener.deferred = timeout_deferred( listener.deferred, (end_time -", "Optional[str], membership: Optional[str], event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] =", "that something has happened event wise. Will wake up all", "connection to the events stream. The events stream handler will", "import namedtuple from typing import ( Awaitable, Callable, Dict, Iterable,", "interested application service. self._notify_app_services(max_room_stream_token) self._notify_pusher_pool(max_room_stream_token) if self.federation_sender: self.federation_sender.notify_new_events(max_room_stream_token) def _notify_app_services(self,", "updated every time we get poked. # We start it", "self.deferred = deferred class _NotifierUserStream: \"\"\"This represents a user connected", "\"\"\"Notify any replication that a remote server has come back", "user_stream.current_token, } ) current_token = user_stream.current_token result = await callback(prev_token,", "stream_key: str, new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None,", "if it needs to do any asynchronous work, a background", "( state.content[\"history_visibility\"] == HistoryVisibility.WORLD_READABLE ) else: return False @log_function def", "= hs.get_state_handler() self.clock.looping_call( self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS ) # This is not", "of event\") def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: self._pusher_pool.on_new_notifications(max_room_stream_token) except Exception:", "room_ids=room_ids, from_token=from_token, ) return result async def _get_room_ids( self, user:", "real users) for keying peeking # over /events. # #", "\"stream\": stream_key, \"stream_id\": stream_id, \"listeners\": self.count_listeners(), } ) users_woken_by_stream_counter.labels(stream_key).inc() with", "stream_key, \"stream_id\": stream_id, \"listeners\": self.count_listeners(), } ) users_woken_by_stream_counter.labels(stream_key).inc() with PreserveLoggingContext():", "Unless required by applicable law or agreed to in writing,", "We start it at the current token since if we", "since if we get any streams # that have a", "self.current_token = current_token # The last token for which we", ") users_woken_by_stream_counter.labels(stream_key).inc() with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) noify_deferred.callback(self.current_token) def remove(self,", "the specific language governing permissions and # limitations under the", "self.event_sources.get_current_token() if room_ids is None: room_ids = await self.store.get_rooms_for_user(user_id) user_stream", "func returns true.\"\"\" n = 0 for x in it:", "name == \"presence\": now = self.clock.time_msec() new_events[:] = [ {", "else: if ( entry.type == EventTypes.Member and entry.membership == Membership.JOIN", "stream_key: str, stream_id: Union[int, RoomStreamToken], time_now_ms: int, ): \"\"\"Notify any", "an event stream per user_id. # This is used by", "and # limitations under the License. import logging from collections", "( Awaitable, Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar,", "\"_PEEKING_%s_%s\" % ( explicit_room_id, user_id_for_stream, ) result = await self.wait_for_events(", "permissions and # limitations under the License. import logging from", "for what I have done. user_id_for_stream = \"_PEEKING_%s_%s\" % (", "self.store = hs.get_datastore() self.pending_new_room_events = [] # type: List[_PendingRoomEventEntry] #", "resolved when there is a new token greater than the", "is used by both /sync and /events. # We want", "event\") def _notify_app_services_ephemeral( self, stream_key: str, new_token: Union[int, RoomStreamToken], users:", "joined the room. \"\"\" if pagination_config.from_token: from_token = pagination_config.from_token else:", "(from_token, from_token)) events = [] # type: List[EventBase] end_token =", "current_token: StreamToken, time_now_ms: int, ): self.user_id = user_id self.rooms =", "listener.deferred = timeout_deferred( listener.deferred, (end_time - now) / 1000.0, self.hs.get_reactor(),", "in self.user_to_user_stream.values(): if stream.count_listeners(): continue if stream.last_notified_ms < expire_before_ts: expired_streams.append(stream)", "# most when scraping it. def count_listeners(): all_user_streams = set()", "time_now_ms: The current time in milliseconds. \"\"\" self.current_token = self.current_token.copy_and_advance(stream_key,", "_notify_app_services_ephemeral( self, stream_key: str, new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]]", "self, stream_key: str, new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] =", "or [] rooms = rooms or [] with Measure(self.clock, \"on_new_event\"):", "|= self.room_to_user_streams.get(room, set()) time_now_ms = self.clock.time_msec() for user_stream in user_streams:", "users=users, rooms=rooms, ) self._on_updated_room_token(max_room_stream_token) def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken): \"\"\"Poke services", "waiting for a previous event to be persisted. Args: max_room_stream_token:", "\"\"\" users = users or [] rooms = rooms or", "event_pos = attr.ib(type=PersistedEventPosition) extra_users = attr.ib(type=Collection[UserID]) room_id = attr.ib(type=str) type", "{ \"notify\": self.user_id, \"stream\": stream_key, \"stream_id\": stream_id, \"listeners\": self.count_listeners(), }", "current_token # The last token for which we should wake", ") -> EventStreamResult: if after_token == before_token: return EventStreamResult([], (from_token,", "= None, ): \"\"\"Used to inform listeners that something has", "List, Optional, Set, Tuple, TypeVar, Union, ) import attr from", "cb in self.replication_callbacks: cb() def notify_remote_server_up(self, server: str): \"\"\"Notify any", "\"\"\"Notify any listeners for this user of a new event", "any interested application service. self._notify_app_services(max_room_stream_token) self._notify_pusher_pool(max_room_stream_token) if self.federation_sender: self.federation_sender.notify_new_events(max_room_stream_token) def", "entry.state_key ): self._user_joined_room(entry.state_key, entry.room_id) users.update(entry.extra_users) rooms.add(entry.room_id) if users or rooms:", "available. Callback is not given any arguments. It should *not*", "we are streaming from, i.e. we shouldn't notify for things", "self.remote_server_up_callbacks = [] # type: List[Callable[[str], None]] self.clock = hs.get_clock()", "and rooms, return any new events for them. If there", "or [], ) def on_new_room_event_args( self, room_id: str, event_type: str,", "their last token. if self.last_notified_token != token: return _NotificationListener(defer.succeed(self.current_token)) else:", "int: return len(self.notify_deferred.observers()) def new_listener(self, token: StreamToken) -> _NotificationListener: \"\"\"Returns", "Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, rooms: Optional[Collection[str]] =", "were queued waiting for a previous event to be persisted.", "a background thread should be started and wrapped with run_as_background_process.", "clash with any real users) for keying peeking # over", "wake up stream if something has already since happened #", "__init__(self, deferred): self.deferred = deferred class _NotifierUserStream: \"\"\"This represents a", "_NotifierUserStream: \"\"\"This represents a user connected to the event stream.", "\"\"\"Add a callback that will be called when some new", "if users or rooms: self.on_new_event( \"room_key\", max_room_stream_token, users=users, rooms=rooms, )", "get poked. # We start it at the current token", "background thread should be started and wrapped with run_as_background_process. \"\"\"", "= [ { \"type\": \"m.presence\", \"content\": format_user_presence_state(event, now), } for", "it. This gets updated every time we get poked. #", "-> _NotificationListener: \"\"\"Returns a deferred that is resolved when there", "user_stream.notify(stream_key, new_token, time_now_ms) except Exception: logger.exception(\"Failed to notify listener\") self.notify_replication()", ") current_token = user_stream.current_token result = await callback(prev_token, current_token) log_kv(", "federation_sender directly rather than registering as a # callback as", "from synapse.logging.context import PreserveLoggingContext from synapse.logging.opentracing import log_kv, start_active_span from", "x in it: if func(x): n += 1 return n", "} ) if result: break # Update the prev_token to", "import ( Awaitable, Callable, Dict, Iterable, List, Optional, Set, Tuple,", "state.content: return ( state.content[\"history_visibility\"] == HistoryVisibility.WORLD_READABLE ) else: return False", "listener from all the indexes in the Notifier it knows", "= self.event_sources.get_current_token() if room_ids is None: room_ids = await self.store.get_rooms_for_user(user_id)", "class _NotifierUserStream: \"\"\"This represents a user connected to the event", "handler will have yielded to the deferred, so to notify", "attr from prometheus_client import Counter from twisted.internet import defer import", "now = self.clock.time_msec() if end_time <= now: break # Now", "the notifier keeps an event stream per user_id. # This", "non empty response or the timeout fires. \"\"\" user_stream =", "a new token. listener = user_stream.new_listener(prev_token) listener.deferred = timeout_deferred( listener.deferred,", "remove_expired_streams(self) -> None: time_now_ms = self.clock.time_msec() expired_streams = [] expire_before_ts", "for room in user_stream.rooms: s = self.room_to_user_streams.setdefault(room, set()) s.add(user_stream) def", "on_new_replication_data(self) -> None: \"\"\"Used to inform replication listeners that something", "-> None: \"\"\"Notify the any replication listeners that there's a", "independently of /sync, # without polluting its contents. So we", "the user has joined the room. \"\"\" if pagination_config.from_token: from_token", "is not given any arguments. It should *not* return a", "# type: Dict[str, Set[_NotifierUserStream]] self.hs = hs self.storage = hs.get_storage()", "will be called when some new data is available. Callback", "[] expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS for stream in self.user_to_user_stream.values():", "PreserveLoggingContext from synapse.logging.opentracing import log_kv, start_active_span from synapse.logging.utils import log_function", "The token from which we are streaming from, i.e. we", "\"\"\"Used by handlers to inform the notifier something has happened", "callback: Callable[[StreamToken, StreamToken], Awaitable[T]], room_ids=None, from_token=StreamToken.START, ) -> T: \"\"\"Wait", "60 * 1000 def __init__(self, hs: \"synapse.server.HomeServer\"): self.user_to_user_stream = {}", "all the indexes in the Notifier it knows about. \"\"\"", "before returning. If explicit_room_id is not set, the user's joined", "# since their last token. if self.last_notified_token != token: return", "def _notify_app_services_ephemeral( self, stream_key: str, new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str,", "users or [] rooms = rooms or [] with Measure(self.clock,", "You may obtain a copy of the License at #", "streams for stream in list(self.user_to_user_stream.values()): all_user_streams.add(stream) return sum(stream.count_listeners() for stream", "all_user_streams.add(stream) return sum(stream.count_listeners() for stream in all_user_streams) LaterGauge(\"synapse_notifier_listeners\", \"\", [],", ") -> EventStreamResult: \"\"\"For the given user and rooms, return", "them up. self.last_notified_token = current_token self.last_notified_ms = time_now_ms with PreserveLoggingContext():", "timeout or if the timeout had # already expired. current_token", "self.clock.looping_call( self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS ) # This is not a very", "Collection, PersistedEventPosition, RoomStreamToken, StreamToken, UserID, ) from synapse.util.async_helpers import ObservableDeferred,", "true.\"\"\" n = 0 for x in it: if func(x):", "after_token == before_token: return EventStreamResult([], (from_token, from_token)) events = []", "\"\"\" # Immediately wake up stream if something has already", "in self.replication_callbacks: cb() def notify_remote_server_up(self, server: str): \"\"\"Notify any replication", "synapse.events import EventBase from synapse.handlers.presence import format_user_presence_state from synapse.logging.context import", "cheap test to perform, but it's only executed # when", "event from an event source. Args: stream_key: The stream the", "which we should wake up any streams that have a", "entry.room_id) users.update(entry.extra_users) rooms.add(entry.room_id) if users or rooms: self.on_new_event( \"room_key\", max_room_stream_token,", "log_kv( { \"wait_for_events\": \"sleep\", \"token\": prev_token, } ) with PreserveLoggingContext():", "\"\"\" __slots__ = [\"deferred\"] def __init__(self, deferred): self.deferred = deferred", "(from_token, end_token)) user_id_for_stream = user.to_string() if is_peeking: # Internally, the", "self.last_notified_ms = time_now_ms noify_deferred = self.notify_deferred log_kv( { \"notify\": self.user_id,", "def get_events_for( self, user: UserID, pagination_config: PaginationConfig, timeout: int, is_guest:", "I have done. user_id_for_stream = \"_PEEKING_%s_%s\" % ( explicit_room_id, user_id_for_stream,", "} ) for user in users: user_stream = self.user_to_user_stream.get(str(user)) if", "+ timeout while not result: try: now = self.clock.time_msec() if", "is_peeking = not is_joined async def check_for_updates( before_token: StreamToken, after_token:", "replication that a remote server has come back up\"\"\" #", "notifier something has happened in the room, room event wise.", "Counter from twisted.internet import defer import synapse.server from synapse.api.constants import", "if stream.last_notified_ms < expire_before_ts: expired_streams.append(stream) for expired_stream in expired_streams: expired_stream.remove(self)", "_NotificationListener(defer.succeed(self.current_token)) else: return _NotificationListener(self.notify_deferred.observe()) class EventStreamResult(namedtuple(\"EventStreamResult\", (\"events\", \"tokens\"))): def __bool__(self):", "used by both /sync and /events. # We want /events", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "self.current_token.copy_and_advance(stream_key, stream_id) self.last_notified_token = self.current_token self.last_notified_ms = time_now_ms noify_deferred =", "attr.ib(type=Collection[UserID]) room_id = attr.ib(type=str) type = attr.ib(type=str) state_key = attr.ib(type=Optional[str])", "The highest stream_id below which all events have been persisted.", "has happened between the old prev_token and the current_token prev_token", "listening in so that it can remove itself from the", "any of the normal user event streams\"\"\" self.notify_replication() async def", "start_active_span from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from", "StreamToken) -> _NotificationListener: \"\"\"Returns a deferred that is resolved when", "): \"\"\"Used to inform listeners that something has happened event", "self.notify_replication() async def wait_for_events( self, user_id: str, timeout: int, callback:", "notifier.user_to_user_stream.pop(self.user_id) def count_listeners(self) -> int: return len(self.notify_deferred.observers()) def new_listener(self, token:", "idea whether they should be # woken up or not,", "it and b) it introduces # circular dependencies. if self.federation_sender:", ") with start_active_span(\"wait_for_events.deferred\"): log_kv( { \"wait_for_events\": \"sleep\", \"token\": prev_token, }", "bool(result), } ) if result: break # Update the prev_token", "user and rooms, return any new events for them. If", "keyname) if before_id == after_id: continue new_events, new_key = await", "\"on_new_event\"): user_streams = set() log_kv( { \"waking_up_explicit_users\": len(users), \"waking_up_explicit_rooms\": len(rooms),", "= users or [] rooms = rooms or [] with", "back online after having been # down. self.remote_server_up_callbacks = []", "len(self.user_to_user_stream) ) def add_replication_callback(self, cb: Callable[[], None]): \"\"\"Add a callback", "Optional[Collection[UserID]] = None, ): \"\"\"Used by handlers to inform the", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "users, ) def on_new_replication_data(self) -> None: \"\"\"Used to inform replication", "License. # You may obtain a copy of the License", "happened if there was no timeout or if the timeout", "None prev_token = from_token if timeout: end_time = self.clock.time_msec() +", "count_listeners(): all_user_streams = set() # type: Set[_NotifierUserStream] for streams in", "have come back online after having been # down. self.remote_server_up_callbacks", "RoomStreamToken, StreamToken, UserID, ) from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from", "\"\"\"This class is responsible for notifying any listeners when there", "after having been # down. self.remote_server_up_callbacks = [] # type:", "new events to happen before returning. If explicit_room_id is not", "[], count_listeners) LaterGauge( \"synapse_notifier_rooms\", \"\", [], lambda: count(bool, list(self.room_to_user_streams.values())), )", "= set() log_kv( { \"waking_up_explicit_users\": len(users), \"waking_up_explicit_rooms\": len(rooms), } )", "event_type=event.type, state_key=event.get(\"state_key\"), membership=event.content.get(\"membership\"), max_room_stream_token=max_room_stream_token, extra_users=extra_users or [], ) def on_new_room_event_args(", "self.replication_callbacks = [] # type: List[Callable[[], None]] # Called when", "are streaming from, i.e. we shouldn't notify for things that", "self._on_updated_room_token(max_room_stream_token) def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken): \"\"\"Poke services that might care", "to inform replication listeners that something has happened without waking", "\"\"\" for room in self.rooms: lst = notifier.room_to_user_streams.get(room, set()) lst.discard(self)", "event source. Args: stream_key: The stream the event came from.", "} ) current_token = user_stream.current_token result = await callback(prev_token, current_token)", "break # Update the prev_token to the current_token since nothing", "given token. Args: token: The token from which we are", "before_id = getattr(before_token, keyname) after_id = getattr(after_token, keyname) if before_id", "start it at the current token since if we get", "defer.CancelledError: log_kv({\"wait_for_events\": \"cancelled\"}) break if result is None: # This", "= set() # type: Set[_NotifierUserStream] for streams in list(self.room_to_user_streams.values()): all_user_streams", "so lets just wake them up. self.last_notified_token = current_token self.last_notified_ms", "Optional[Collection[str]] = None, ): \"\"\"Used to inform listeners that something", "Membership.JOIN and entry.state_key ): self._user_joined_room(entry.state_key, entry.room_id) users.update(entry.extra_users) rooms.add(entry.room_id) if users", "self.user_to_user_stream.values(): if stream.count_listeners(): continue if stream.last_notified_ms < expire_before_ts: expired_streams.append(stream) for", "lambda: len(self.user_to_user_stream) ) def add_replication_callback(self, cb: Callable[[], None]): \"\"\"Add a", "a given point a user may have a number of", "Measure(self.clock, \"on_new_event\"): user_streams = set() log_kv( { \"waking_up_explicit_users\": len(users), \"waking_up_explicit_rooms\":", "from the indexes in the Notifier class. \"\"\" def __init__(", "attr.ib(type=str) type = attr.ib(type=str) state_key = attr.ib(type=Optional[str]) membership = attr.ib(type=Optional[str])", "= await self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if explicit_room_id in joined_room_ids: return", "we already have a reference to it and b) it", "rooms.add(entry.room_id) if users or rooms: self.on_new_event( \"room_key\", max_room_stream_token, users=users, rooms=rooms,", "on_new_event( self, stream_key: str, new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]]", "PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) noify_deferred.callback(self.current_token) def remove(self, notifier: \"Notifier\"): \"\"\"Remove", "def new_listener(self, token: StreamToken) -> _NotificationListener: \"\"\"Returns a deferred that", "[], lambda: count(bool, list(self.room_to_user_streams.values())), ) LaterGauge( \"synapse_notifier_users\", \"\", [], lambda:", "server has come back up\"\"\" # We call federation_sender directly", "@attr.s(slots=True, frozen=True) class _PendingRoomEventEntry: event_pos = attr.ib(type=PersistedEventPosition) extra_users = attr.ib(type=Collection[UserID])", "user_id_for_stream = \"_PEEKING_%s_%s\" % ( explicit_room_id, user_id_for_stream, ) result =", "HistoryVisibility, Membership from synapse.api.errors import AuthError from synapse.events import EventBase", "are new things to stream over replication self.replication_callbacks = []", "= current_token self.last_notified_ms = time_now_ms with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred())", "stream.count_listeners(): continue if stream.last_notified_ms < expire_before_ts: expired_streams.append(stream) for expired_stream in", "users in the `extra_users` param. The events can be peristed", "wise. Will wake up all listeners for the given users", "Membership from synapse.api.errors import AuthError from synapse.events import EventBase from", "be persisted. Args: max_room_stream_token: The highest stream_id below which all", "current_token prev_token = current_token except defer.TimeoutError: log_kv({\"wait_for_events\": \"timeout\"}) break except", "notify listener\") self.notify_replication() # Notify appservices self._notify_app_services_ephemeral( stream_key, new_token, users,", ") if state and \"history_visibility\" in state.content: return ( state.content[\"history_visibility\"]", "been persisted. \"\"\" pending = self.pending_new_room_events self.pending_new_room_events = [] users", "room position has been updated. \"\"\" # poke any interested", "something has already since happened # since their last token.", "state and \"history_visibility\" in state.content: return ( state.content[\"history_visibility\"] == HistoryVisibility.WORLD_READABLE", "the any replication listeners that there's a new event\"\"\" for", "= None, ): \"\"\"Unwraps event and calls `on_new_room_event_args`.\"\"\" self.on_new_room_event_args( event_pos=event_pos,", "will be polled for events. If explicit_room_id is set, that", "inform listeners that something has happened event wise. Will wake", "gets updated every time we get poked. # We start", "all events have been persisted. \"\"\" pending = self.pending_new_room_events self.pending_new_room_events", "self._notify_pusher_pool(max_room_stream_token) if self.federation_sender: self.federation_sender.notify_new_events(max_room_stream_token) def _notify_app_services(self, max_room_stream_token: RoomStreamToken): try: self.appservice_handler.notify_interested_services(max_room_stream_token)", "or rooms: self.on_new_event( \"room_key\", max_room_stream_token, users=users, rooms=rooms, ) self._on_updated_room_token(max_room_stream_token) def", "an illegal user ID # (which thus cannot clash with", "has come back up\"\"\" # We call federation_sender directly rather", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "[] ) except Exception: logger.exception(\"Error notifying application services of event\")", "Deferred - if it needs to do any asynchronous work,", "most when scraping it. def count_listeners(): all_user_streams = set() #", "can be peristed out of order. The notifier will wait", "we get any streams # that have a token from", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "room_id = attr.ib(type=str) type = attr.ib(type=str) state_key = attr.ib(type=Optional[str]) membership", "from prometheus_client import Counter from twisted.internet import defer import synapse.server", "explicit_room_id, user_id_for_stream, ) result = await self.wait_for_events( user_id_for_stream, timeout, check_for_updates,", "joined_room_ids = await self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if explicit_room_id in joined_room_ids:", "user_id. # This is used by both /sync and /events.", "= Counter( \"synapse_notifier_users_woken_by_stream\", \"\", [\"stream\"] ) T = TypeVar(\"T\") #", "\"presence\": now = self.clock.time_msec() new_events[:] = [ { \"type\": \"m.presence\",", "def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: self._pusher_pool.on_new_notifications(max_room_stream_token) except Exception: logger.exception(\"Error pusher", "room_ids=None, from_token=StreamToken.START, ) -> T: \"\"\"Wait until the callback returns", "used from the /events stream. \"\"\" UNUSED_STREAM_EXPIRY_MS = 10 *", "if explicit_room_id in joined_room_ids: return [explicit_room_id], True if await self._is_world_readable(explicit_room_id):", "event stream. It tracks the most recent stream token for", "the callback returns a non empty response or the timeout", "required by applicable law or agreed to in writing, software", "return n class _NotificationListener: \"\"\"This represents a single client connection", "without waking up any of the normal user event streams\"\"\"", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "self.appservice_handler.notify_interested_services_ephemeral( stream_key, stream_token, users or [] ) except Exception: logger.exception(\"Error", "is a new token greater than the given token. Args:", "in self.rooms: lst = notifier.room_to_user_streams.get(room, set()) lst.discard(self) notifier.user_to_user_stream.pop(self.user_id) def count_listeners(self)", "the /events stream. \"\"\" UNUSED_STREAM_EXPIRY_MS = 10 * 60 *", "stream the event came from. time_now_ms: The current time in", "user_id self.rooms = set(rooms) self.current_token = current_token # The last", "wait for up to `timeout` milliseconds for any new events", "room_id: str): new_user_stream = self.user_to_user_stream.get(user_id) if new_user_stream is not None:", "that it can remove itself from the indexes in the", "has happened event wise. Will wake up all listeners for", "await source.get_new_events( user=user, from_key=getattr(from_token, keyname), limit=limit, is_guest=is_peeking, room_ids=room_ids, explicit_room_id=explicit_room_id, )", "end_token)) user_id_for_stream = user.to_string() if is_peeking: # Internally, the notifier", "agreed to in writing, software # distributed under the License", "asynchronous work, a background thread should be started and wrapped", "StreamToken], Awaitable[T]], room_ids=None, from_token=StreamToken.START, ) -> T: \"\"\"Wait until the", "not given any arguments. It should *not* return a Deferred", "distributed under the License is distributed on an \"AS IS\"", "= [\"deferred\"] def __init__(self, deferred): self.deferred = deferred class _NotifierUserStream:", "name, source in self.event_sources.sources.items(): keyname = \"%s_key\" % name before_id", "greater than the given token. Args: token: The token from", "type: Set[str] for entry in pending: if entry.event_pos.persisted_after(max_room_stream_token): self.pending_new_room_events.append(entry) else:", "): self._user_joined_room(entry.state_key, entry.room_id) users.update(entry.extra_users) rooms.add(entry.room_id) if users or rooms: self.on_new_event(", "notifying application services of event\") def _notify_app_services_ephemeral( self, stream_key: str,", "replication listeners that there's a new event\"\"\" for cb in", "already have a reference to it and b) it introduces", "# type: List[Callable[[], None]] # Called when remote servers have", "It should *not* return a Deferred - if it needs", "new event from an event source. Args: stream_key: The stream", "\"\"\"Notify for the room events that were queued waiting for", "await self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if explicit_room_id in joined_room_ids: return [explicit_room_id],", "milliseconds for any new events to happen before returning. If", "is a new token. listener = user_stream.new_listener(prev_token) listener.deferred = timeout_deferred(", "callback(prev_token, current_token) log_kv( { \"wait_for_events\": \"result\", \"result\": bool(result), } )", "\"room\": new_events = await filter_events_for_client( self.storage, user.to_string(), new_events, is_peeking=is_peeking, )", "PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, ): \"\"\"Unwraps event", "Exception: logger.exception(\"Failed to notify listener\") self.notify_replication() # Notify appservices self._notify_app_services_ephemeral(", "end_time <= now: break # Now we wait for the", "joined rooms will be polled for events. If explicit_room_id is", "class _PendingRoomEventEntry: event_pos = attr.ib(type=PersistedEventPosition) extra_users = attr.ib(type=Collection[UserID]) room_id =", "result: try: now = self.clock.time_msec() if end_time <= now: break", "room_id: str) -> bool: state = await self.state_handler.get_current_state( room_id, EventTypes.RoomHistoryVisibility,", "return result async def get_events_for( self, user: UserID, pagination_config: PaginationConfig,", "normal user event streams\"\"\" self.notify_replication() async def wait_for_events( self, user_id:", "1000.0, self.hs.get_reactor(), ) with start_active_span(\"wait_for_events.deferred\"): log_kv( { \"wait_for_events\": \"sleep\", \"token\":", "Dict[str, Set[_NotifierUserStream]] self.hs = hs self.storage = hs.get_storage() self.event_sources =", "RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, rooms: Optional[Collection[str]] = None,", "# type: Set[str] for entry in pending: if entry.event_pos.persisted_after(max_room_stream_token): self.pending_new_room_events.append(entry)", "have done. user_id_for_stream = \"_PEEKING_%s_%s\" % ( explicit_room_id, user_id_for_stream, )", "a non empty response or the timeout fires. \"\"\" user_stream", "import AuthError from synapse.events import EventBase from synapse.handlers.presence import format_user_presence_state", "streams # that have a token from before we have", "self.federation_sender = None if hs.should_send_federation(): self.federation_sender = hs.get_federation_sender() self.state_handler =", "check_for_updates, room_ids=room_ids, from_token=from_token, ) return result async def _get_room_ids( self,", "we get poked. # We start it at the current", "= user_stream.current_token result = await callback(prev_token, current_token) return result async", "type: Set[_NotifierUserStream] for streams in list(self.room_to_user_streams.values()): all_user_streams |= streams for", "handlers to inform the notifier something has happened in the", "stream_id: Union[int, RoomStreamToken], time_now_ms: int, ): \"\"\"Notify any listeners for", "type: List[_PendingRoomEventEntry] # Called when there are new things to", "or the user has joined the room. \"\"\" if pagination_config.from_token:", "in list(self.user_to_user_stream.values()): all_user_streams.add(stream) return sum(stream.count_listeners() for stream in all_user_streams) LaterGauge(\"synapse_notifier_listeners\",", "for any new events to happen before returning. If explicit_room_id", "current_token) log_kv( { \"wait_for_events\": \"result\", \"result\": bool(result), } ) if", "self.clock = hs.get_clock() self.appservice_handler = hs.get_application_service_handler() self._pusher_pool = hs.get_pusherpool() self.federation_sender", "is not set, the user's joined rooms will be polled", "2014 - 2016 OpenMarket Ltd # # Licensed under the", "set()) s.add(user_stream) def _user_joined_room(self, user_id: str, room_id: str): new_user_stream =", "* 60 * 1000 def __init__(self, hs: \"synapse.server.HomeServer\"): self.user_to_user_stream =", "in it for which func returns true.\"\"\" n = 0", "on_new_room_event( self, event: EventBase, event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]]", "time_now_ms=self.clock.time_msec(), ) self._register_with_keys(user_stream) result = None prev_token = from_token if", "Args: token: The token from which we are streaming from,", "notify_remote_server_up(self, server: str): \"\"\"Notify any replication that a remote server", "TODO(paul): Should be shared somewhere def count(func: Callable[[T], bool], it:", "\"sleep\", \"token\": prev_token, } ) with PreserveLoggingContext(): await listener.deferred log_kv(", "up stream if something has already since happened # since", "= rooms or [] with Measure(self.clock, \"on_new_event\"): user_streams = set()", "before notifying the client streams. \"\"\" self.pending_new_room_events.append( _PendingRoomEventEntry( event_pos=event_pos, extra_users=extra_users", "joined_room_ids: return [explicit_room_id], True if await self._is_world_readable(explicit_room_id): return [explicit_room_id], False", "TypeVar, Union, ) import attr from prometheus_client import Counter from", "room_ids is None: room_ids = await self.store.get_rooms_for_user(user_id) user_stream = _NotifierUserStream(", "Now we wait for the _NotifierUserStream to be told there", "OR CONDITIONS OF ANY KIND, either express or implied. #", "if the timeout had # already expired. current_token = user_stream.current_token", "None: \"\"\"Notify the any replication listeners that there's a new", "the event stream. It tracks the most recent stream token", "Set[UserID] rooms = set() # type: Set[str] for entry in", "if timeout: end_time = self.clock.time_msec() + timeout while not result:", "the License is distributed on an \"AS IS\" BASIS, #", "stream token for that user. At a given point a", "logger.exception(\"Error notifying application services of event\") def _notify_app_services_ephemeral( self, stream_key:", "what I have done. user_id_for_stream = \"_PEEKING_%s_%s\" % ( explicit_room_id,", "def check_for_updates( before_token: StreamToken, after_token: StreamToken ) -> EventStreamResult: if", "/events to be used for peeking independently of /sync, #", "event\"\"\" for cb in self.replication_callbacks: cb() def notify_remote_server_up(self, server: str):", "allowed\") return joined_room_ids, True async def _is_world_readable(self, room_id: str) ->", "will wait until all previous events have been persisted before", "keyname = \"%s_key\" % name before_id = getattr(before_token, keyname) after_id", "self.UNUSED_STREAM_EXPIRY_MS ) # This is not a very cheap test", "before_token: StreamToken, after_token: StreamToken ) -> EventStreamResult: if after_token ==", "from synapse.events import EventBase from synapse.handlers.presence import format_user_presence_state from synapse.logging.context", "for user_stream in user_streams: try: user_stream.notify(stream_key, new_token, time_now_ms) except Exception:", "or [] ) except Exception: logger.exception(\"Error notifying application services of", "log_kv( { \"wait_for_events\": \"result\", \"result\": bool(result), } ) if result:", "This happened if there was no timeout or if the", "The current time in milliseconds. \"\"\" self.current_token = self.current_token.copy_and_advance(stream_key, stream_id)", "happened # since their last token. if self.last_notified_token != token:", "isinstance(new_token, int): stream_token = new_token self.appservice_handler.notify_interested_services_ephemeral( stream_key, stream_token, users or", "law or agreed to in writing, software # distributed under", "None, ): \"\"\"Unwraps event and calls `on_new_room_event_args`.\"\"\" self.on_new_room_event_args( event_pos=event_pos, room_id=event.room_id,", "may have a number of streams listening for events. This", "new token greater than the given token. Args: token: The", "user_streams.add(user_stream) for room in rooms: user_streams |= self.room_to_user_streams.get(room, set()) time_now_ms", "None: current_token = self.event_sources.get_current_token() if room_ids is None: room_ids =", "also keep track of which rooms it is listening in", "PreserveLoggingContext(): await listener.deferred log_kv( { \"wait_for_events\": \"woken\", \"token\": user_stream.current_token, }", "room will be polled for events only if it is", "time_now_ms with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) def notify( self, stream_key:", "== after_id: continue new_events, new_key = await source.get_new_events( user=user, from_key=getattr(from_token,", "is_joined async def check_for_updates( before_token: StreamToken, after_token: StreamToken ) ->", "to notify the handler it is sufficient to resolve the", "of event\") def on_new_event( self, stream_key: str, new_token: Union[int, RoomStreamToken],", "Set, Tuple, TypeVar, Union, ) import attr from prometheus_client import", "return False @log_function def remove_expired_streams(self) -> None: time_now_ms = self.clock.time_msec()", "any real users) for keying peeking # over /events. #", "import PaginationConfig from synapse.types import ( Collection, PersistedEventPosition, RoomStreamToken, StreamToken,", "from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.api.errors import AuthError", "else: return False @log_function def remove_expired_streams(self) -> None: time_now_ms =", "= from_token for name, source in self.event_sources.sources.items(): keyname = \"%s_key\"", "may obtain a copy of the License at # #", "given any arguments. It should *not* return a Deferred -", "the given user and rooms, return any new events for", "So we invent an illegal user ID # (which thus", "True async def _is_world_readable(self, room_id: str) -> bool: state =", "stream_id: The new id for the stream the event came", "a new event from an event source. Args: stream_key: The", "# type: List[Callable[[str], None]] self.clock = hs.get_clock() self.appservice_handler = hs.get_application_service_handler()", "= hs self.storage = hs.get_storage() self.event_sources = hs.get_event_sources() self.store =", "it: if func(x): n += 1 return n class _NotificationListener:", "# (which thus cannot clash with any real users) for", "RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, ): \"\"\"Used by handlers to", "may not use this file except in compliance with the", "max_room_stream_token: The highest stream_id below which all events have been", "int, ): self.user_id = user_id self.rooms = set(rooms) self.current_token =", "= Counter(\"synapse_notifier_notified_events\", \"\") users_woken_by_stream_counter = Counter( \"synapse_notifier_users_woken_by_stream\", \"\", [\"stream\"] )", "this file except in compliance with the License. # You", "application services of event\") def _notify_app_services_ephemeral( self, stream_key: str, new_token:", "the notifier something has happened in the room, room event", "= notifier.room_to_user_streams.get(room, set()) lst.discard(self) notifier.user_to_user_stream.pop(self.user_id) def count_listeners(self) -> int: return", "This is not a very cheap test to perform, but", "return [explicit_room_id], True if await self._is_world_readable(explicit_room_id): return [explicit_room_id], False raise", "= await callback(prev_token, current_token) return result async def get_events_for( self,", "available for it. Primarily used from the /events stream. \"\"\"", "# # Licensed under the Apache License, Version 2.0 (the", "by both /sync and /events. # We want /events to", "# We call federation_sender directly rather than registering as a", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "# type: Set[_NotifierUserStream] for streams in list(self.room_to_user_streams.values()): all_user_streams |= streams", "start_active_span(\"wait_for_events.deferred\"): log_kv( { \"wait_for_events\": \"sleep\", \"token\": prev_token, } ) with", "peeking # over /events. # # I am sorry for", "listeners that something has happened event wise. Will wake up", "now: break # Now we wait for the _NotifierUserStream to", "= hs.get_application_service_handler() self._pusher_pool = hs.get_pusherpool() self.federation_sender = None if hs.should_send_federation():", "for a previous event to be persisted. Args: max_room_stream_token: The", "except Exception: logger.exception(\"Error pusher pool of event\") def on_new_event( self,", "StreamToken, time_now_ms: int, ): self.user_id = user_id self.rooms = set(rooms)", "The events can be peristed out of order. The notifier", "fires. \"\"\" user_stream = self.user_to_user_stream.get(user_id) if user_stream is None: current_token", "not result: try: now = self.clock.time_msec() if end_time <= now:", "\"\"\"Returns a deferred that is resolved when there is a", "self.pending_new_room_events = [] # type: List[_PendingRoomEventEntry] # Called when there", "(which thus cannot clash with any real users) for keying", "None, ): try: stream_token = None if isinstance(new_token, int): stream_token", "-> T: \"\"\"Wait until the callback returns a non empty", "max_room_stream_token: RoomStreamToken): try: self._pusher_pool.on_new_notifications(max_room_stream_token) except Exception: logger.exception(\"Error pusher pool of", "None if isinstance(new_token, int): stream_token = new_token self.appservice_handler.notify_interested_services_ephemeral( stream_key, stream_token,", "without polluting its contents. So we invent an illegal user", "event came from. stream_id: The new id for the stream", "of items in it for which func returns true.\"\"\" n", "\"\"\" UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000 def __init__(self,", "streams listening for events. This listener will also keep track", "the number of items in it for which func returns", "keyname) after_id = getattr(after_token, keyname) if before_id == after_id: continue", "type: Set[UserID] rooms = set() # type: Set[str] for entry", "current_token except defer.TimeoutError: log_kv({\"wait_for_events\": \"timeout\"}) break except defer.CancelledError: log_kv({\"wait_for_events\": \"cancelled\"})", "StreamToken, UserID, ) from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.metrics", "new events for them. If there are no new events", "or not, so lets just wake them up. self.last_notified_token =", "invent an illegal user ID # (which thus cannot clash", "from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.metrics import Measure from", "in milliseconds. \"\"\" self.current_token = self.current_token.copy_and_advance(stream_key, stream_id) self.last_notified_token = self.current_token", "= user_stream for room in user_stream.rooms: s = self.room_to_user_streams.setdefault(room, set())", "We call federation_sender directly rather than registering as a #", "happened in the room, room event wise. This triggers the", "self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS ) # This is not a very cheap", "= None if hs.should_send_federation(): self.federation_sender = hs.get_federation_sender() self.state_handler = hs.get_state_handler()", "The notifier will wait until all previous events have been", "for this user of a new event from an event", "room in self.rooms: lst = notifier.room_to_user_streams.get(room, set()) lst.discard(self) notifier.user_to_user_stream.pop(self.user_id) def", "count(bool, list(self.room_to_user_streams.values())), ) LaterGauge( \"synapse_notifier_users\", \"\", [], lambda: len(self.user_to_user_stream) )", "or implied. # See the License for the specific language", "except defer.CancelledError: log_kv({\"wait_for_events\": \"cancelled\"}) break if result is None: #", "new_user_stream is not None: room_streams = self.room_to_user_streams.setdefault(room_id, set()) room_streams.add(new_user_stream) new_user_stream.rooms.add(room_id)", "hs.get_federation_sender() self.state_handler = hs.get_state_handler() self.clock.looping_call( self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS ) # This", "in new_events ] events.extend(new_events) end_token = end_token.copy_and_replace(keyname, new_key) return EventStreamResult(events,", "of streams listening for events. This listener will also keep", "synapse.logging.opentracing import log_kv, start_active_span from synapse.logging.utils import log_function from synapse.metrics", "int, callback: Callable[[StreamToken, StreamToken], Awaitable[T]], room_ids=None, from_token=StreamToken.START, ) -> T:", "new token. listener = user_stream.new_listener(prev_token) listener.deferred = timeout_deferred( listener.deferred, (end_time", ") def add_replication_callback(self, cb: Callable[[], None]): \"\"\"Add a callback that", "# already expired. current_token = user_stream.current_token result = await callback(prev_token,", "max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, ): \"\"\"Unwraps event and", "log_kv({\"wait_for_events\": \"timeout\"}) break except defer.CancelledError: log_kv({\"wait_for_events\": \"cancelled\"}) break if result", "-> int: \"\"\"Return the number of items in it for", "there are no new events wait for up to `timeout`", "Notifier: \"\"\"This class is responsible for notifying any listeners when", "None: time_now_ms = self.clock.time_msec() expired_streams = [] expire_before_ts = time_now_ms", "self.pending_new_room_events.append(entry) else: if ( entry.type == EventTypes.Member and entry.membership ==", "rooms: Collection[str], current_token: StreamToken, time_now_ms: int, ): self.user_id = user_id", "the room, room event wise. This triggers the notifier to", ") -> T: \"\"\"Wait until the callback returns a non", "any replication that a remote server has come back up\"\"\"", ") else: return False @log_function def remove_expired_streams(self) -> None: time_now_ms", "_notify_app_services(self, max_room_stream_token: RoomStreamToken): try: self.appservice_handler.notify_interested_services(max_room_stream_token) except Exception: logger.exception(\"Error notifying application", "# We start it at the current token since if", "services of event\") def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: self._pusher_pool.on_new_notifications(max_room_stream_token) except", "entry.membership == Membership.JOIN and entry.state_key ): self._user_joined_room(entry.state_key, entry.room_id) users.update(entry.extra_users) rooms.add(entry.room_id)", "utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd #", "knows about. \"\"\" for room in self.rooms: lst = notifier.room_to_user_streams.get(room,", "list(self.user_to_user_stream.values()): all_user_streams.add(stream) return sum(stream.count_listeners() for stream in all_user_streams) LaterGauge(\"synapse_notifier_listeners\", \"\",", "self.current_token self.last_notified_ms = time_now_ms noify_deferred = self.notify_deferred log_kv( { \"notify\":", "streams in list(self.room_to_user_streams.values()): all_user_streams |= streams for stream in list(self.user_to_user_stream.values()):", "self, stream_key: str, stream_id: Union[int, RoomStreamToken], time_now_ms: int, ): \"\"\"Notify", "replication listeners that something has happened without waking up any", "{ \"type\": \"m.presence\", \"content\": format_user_presence_state(event, now), } for event in", "# The last token for which we should wake up", "await listener.deferred log_kv( { \"wait_for_events\": \"woken\", \"token\": user_stream.current_token, } )", "continue new_events, new_key = await source.get_new_events( user=user, from_key=getattr(from_token, keyname), limit=limit,", "event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, ): \"\"\"Used", "= self.event_sources.get_current_token() limit = pagination_config.limit room_ids, is_joined = await self._get_room_ids(user,", "self.on_new_event( \"room_key\", max_room_stream_token, users=users, rooms=rooms, ) self._on_updated_room_token(max_room_stream_token) def _on_updated_room_token(self, max_room_stream_token:", "recent stream token for that user. At a given point", "[] # type: List[_PendingRoomEventEntry] # Called when there are new", "new_token self.appservice_handler.notify_interested_services_ephemeral( stream_key, stream_token, users or [] ) except Exception:", "str, new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, ):", "\"type\": \"m.presence\", \"content\": format_user_presence_state(event, now), } for event in new_events", "notifier will wait until all previous events have been persisted", "user_stream = self.user_to_user_stream.get(user_id) if user_stream is None: current_token = self.event_sources.get_current_token()", "the normal user event streams\"\"\" self.notify_replication() async def wait_for_events( self,", "def __init__(self, hs: \"synapse.server.HomeServer\"): self.user_to_user_stream = {} # type: Dict[str,", "= self.clock.time_msec() new_events[:] = [ { \"type\": \"m.presence\", \"content\": format_user_presence_state(event,", "returns true.\"\"\" n = 0 for x in it: if", "last token for which we should wake up any streams", "event: EventBase, event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None,", "# type: List[EventBase] end_token = from_token for name, source in", "self.storage, user.to_string(), new_events, is_peeking=is_peeking, ) elif name == \"presence\": now", "might care that the room position has been updated. \"\"\"", "def _notify_app_services(self, max_room_stream_token: RoomStreamToken): try: self.appservice_handler.notify_interested_services(max_room_stream_token) except Exception: logger.exception(\"Error notifying", "s.add(user_stream) def _user_joined_room(self, user_id: str, room_id: str): new_user_stream = self.user_to_user_stream.get(user_id)", "from collections import namedtuple from typing import ( Awaitable, Callable,", "time in milliseconds. \"\"\" self.current_token = self.current_token.copy_and_advance(stream_key, stream_id) self.last_notified_token =", "or [] with Measure(self.clock, \"on_new_event\"): user_streams = set() log_kv( {", "there's a new event\"\"\" for cb in self.replication_callbacks: cb() def", "triggers the notifier to wake up any listeners that are", "self.federation_sender.notify_new_events(max_room_stream_token) def _notify_app_services(self, max_room_stream_token: RoomStreamToken): try: self.appservice_handler.notify_interested_services(max_room_stream_token) except Exception: logger.exception(\"Error", "before_id == after_id: continue new_events, new_key = await source.get_new_events( user=user,", "pagination_config.from_token: from_token = pagination_config.from_token else: from_token = self.event_sources.get_current_token() limit =", "/events stream. \"\"\" UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000", "int, ): \"\"\"Notify any listeners for this user of a", "Awaitable, Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union,", "log_kv( { \"wait_for_events\": \"woken\", \"token\": user_stream.current_token, } ) current_token =", "self.user_to_user_stream = {} # type: Dict[str, _NotifierUserStream] self.room_to_user_streams = {}", "extra_users=extra_users or [], room_id=room_id, type=event_type, state_key=state_key, membership=membership, ) ) self._notify_pending_new_room_events(max_room_stream_token)", "log_kv( { \"notify\": self.user_id, \"stream\": stream_key, \"stream_id\": stream_id, \"listeners\": self.count_listeners(),", "\"\"\" user_stream = self.user_to_user_stream.get(user_id) if user_stream is None: current_token =", "result is None: # This happened if there was no", "user_streams |= self.room_to_user_streams.get(room, set()) time_now_ms = self.clock.time_msec() for user_stream in", "is sufficient to resolve the deferred. \"\"\" __slots__ = [\"deferred\"]", "* 1000 def __init__(self, hs: \"synapse.server.HomeServer\"): self.user_to_user_stream = {} #", "EventBase, event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, ):", "def notify( self, stream_key: str, stream_id: Union[int, RoomStreamToken], time_now_ms: int,", "for the given users and rooms. \"\"\" users = users", "!= token: return _NotificationListener(defer.succeed(self.current_token)) else: return _NotificationListener(self.notify_deferred.observe()) class EventStreamResult(namedtuple(\"EventStreamResult\", (\"events\",", "world readable or the user has joined the room. \"\"\"", "at # most when scraping it. def count_listeners(): all_user_streams =", "be polled for events only if it is world readable", "wake up all listeners for the given users and rooms.", "expired_stream.remove(self) @log_function def _register_with_keys(self, user_stream: _NotifierUserStream): self.user_to_user_stream[user_stream.user_id] = user_stream for", "def __bool__(self): return bool(self.events) @attr.s(slots=True, frozen=True) class _PendingRoomEventEntry: event_pos =", "List[EventBase] end_token = from_token for name, source in self.event_sources.sources.items(): keyname", "for room in rooms: user_streams |= self.room_to_user_streams.get(room, set()) time_now_ms =", "self._get_room_ids(user, explicit_room_id) is_peeking = not is_joined async def check_for_updates( before_token:", "likely once per minute at # most when scraping it.", "def _user_joined_room(self, user_id: str, room_id: str): new_user_stream = self.user_to_user_stream.get(user_id) if", "resolve the deferred. \"\"\" __slots__ = [\"deferred\"] def __init__(self, deferred):", "= user.to_string() if is_peeking: # Internally, the notifier keeps an", "sufficient to resolve the deferred. \"\"\" __slots__ = [\"deferred\"] def", "with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) def notify( self, stream_key: str,", "when some new data is available. Callback is not given", "This triggers the notifier to wake up any listeners that", "number of items in it for which func returns true.\"\"\"", "\"wait_for_events\": \"sleep\", \"token\": prev_token, } ) with PreserveLoggingContext(): await listener.deferred", "servers have come back online after having been # down.", "comes before it. This gets updated every time we get", "get_events_for( self, user: UserID, pagination_config: PaginationConfig, timeout: int, is_guest: bool", "from which we are streaming from, i.e. we shouldn't notify", "\"\"\" # poke any interested application service. self._notify_app_services(max_room_stream_token) self._notify_pusher_pool(max_room_stream_token) if", "users_woken_by_stream_counter = Counter( \"synapse_notifier_users_woken_by_stream\", \"\", [\"stream\"] ) T = TypeVar(\"T\")", "PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, ): \"\"\"Used by", "for which func returns true.\"\"\" n = 0 for x", "there # is a new token. listener = user_stream.new_listener(prev_token) listener.deferred", "minute at # most when scraping it. def count_listeners(): all_user_streams", "\"Notifier\"): \"\"\"Remove this listener from all the indexes in the", "ID # (which thus cannot clash with any real users)", "[], room_id=room_id, type=event_type, state_key=state_key, membership=membership, ) ) self._notify_pending_new_room_events(max_room_stream_token) self.notify_replication() def", "class EventStreamResult(namedtuple(\"EventStreamResult\", (\"events\", \"tokens\"))): def __bool__(self): return bool(self.events) @attr.s(slots=True, frozen=True)", "time_now_ms noify_deferred = self.notify_deferred log_kv( { \"notify\": self.user_id, \"stream\": stream_key,", "= set() # type: Set[UserID] rooms = set() # type:", ") elif name == \"presence\": now = self.clock.time_msec() new_events[:] =", "extra_users: Optional[Collection[UserID]] = None, ): \"\"\"Unwraps event and calls `on_new_room_event_args`.\"\"\"", "# Immediately wake up stream if something has already since", "= self.user_to_user_stream.get(user_id) if user_stream is None: current_token = self.event_sources.get_current_token() if", "deferred): self.deferred = deferred class _NotifierUserStream: \"\"\"This represents a user", "user connected to the event stream. It tracks the most", "self.room_to_user_streams.get(room, set()) time_now_ms = self.clock.time_msec() for user_stream in user_streams: try:", "in writing, software # distributed under the License is distributed", "def on_new_replication_data(self) -> None: \"\"\"Used to inform replication listeners that", "= None, ): \"\"\"Used by handlers to inform the notifier", "and /events. # We want /events to be used for", "if explicit_room_id: if explicit_room_id in joined_room_ids: return [explicit_room_id], True if", "return EventStreamResult(events, (from_token, end_token)) user_id_for_stream = user.to_string() if is_peeking: #", "room event wise. This triggers the notifier to wake up", ") from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.metrics import Measure", "entry.type == EventTypes.Member and entry.membership == Membership.JOIN and entry.state_key ):", "== Membership.JOIN and entry.state_key ): self._user_joined_room(entry.state_key, entry.room_id) users.update(entry.extra_users) rooms.add(entry.room_id) if", "import log_kv, start_active_span from synapse.logging.utils import log_function from synapse.metrics import", "await self.store.get_rooms_for_user(user_id) user_stream = _NotifierUserStream( user_id=user_id, rooms=room_ids, current_token=current_token, time_now_ms=self.clock.time_msec(), )", "user_id: str, rooms: Collection[str], current_token: StreamToken, time_now_ms: int, ): self.user_id", "that are listening to the room, and any listeners for", "HistoryVisibility.WORLD_READABLE ) else: return False @log_function def remove_expired_streams(self) -> None:", "s = self.room_to_user_streams.setdefault(room, set()) s.add(user_stream) def _user_joined_room(self, user_id: str, room_id:", "perform, but it's only executed # when rendering the metrics", "events only if it is world readable or the user", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "format_user_presence_state from synapse.logging.context import PreserveLoggingContext from synapse.logging.opentracing import log_kv, start_active_span", "License, Version 2.0 (the \"License\"); # you may not use", "from typing import ( Awaitable, Callable, Dict, Iterable, List, Optional,", "Awaitable[T]], room_ids=None, from_token=StreamToken.START, ) -> T: \"\"\"Wait until the callback", "self.federation_sender = hs.get_federation_sender() self.state_handler = hs.get_state_handler() self.clock.looping_call( self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS )", "items in it for which func returns true.\"\"\" n =", "user_stream = _NotifierUserStream( user_id=user_id, rooms=room_ids, current_token=current_token, time_now_ms=self.clock.time_msec(), ) self._register_with_keys(user_stream) result", "self.notify_replication() # Notify appservices self._notify_app_services_ephemeral( stream_key, new_token, users, ) def", "or [], room_id=room_id, type=event_type, state_key=state_key, membership=membership, ) ) self._notify_pending_new_room_events(max_room_stream_token) self.notify_replication()", "\"\", [\"stream\"] ) T = TypeVar(\"T\") # TODO(paul): Should be", "Optional[str], event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, ):", "class. \"\"\" def __init__( self, user_id: str, rooms: Collection[str], current_token:", "the License for the specific language governing permissions and #", "when there are new things to stream over replication self.replication_callbacks", "state_key=state_key, membership=membership, ) ) self._notify_pending_new_room_events(max_room_stream_token) self.notify_replication() def _notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken):", "if stream.count_listeners(): continue if stream.last_notified_ms < expire_before_ts: expired_streams.append(stream) for expired_stream", "= getattr(before_token, keyname) after_id = getattr(after_token, keyname) if before_id ==", "the notifier to wake up any listeners that are listening", "Optional[Collection[Union[str, UserID]]] = None, rooms: Optional[Collection[str]] = None, ): \"\"\"Used", "`timeout` milliseconds for any new events to happen before returning.", "None]): \"\"\"Add a callback that will be called when some", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "Called when remote servers have come back online after having", "than the given token. Args: token: The token from which", "returns a non empty response or the timeout fires. \"\"\"", "if new_user_stream is not None: room_streams = self.room_to_user_streams.setdefault(room_id, set()) room_streams.add(new_user_stream)", "logging.getLogger(__name__) notified_events_counter = Counter(\"synapse_notifier_notified_events\", \"\") users_woken_by_stream_counter = Counter( \"synapse_notifier_users_woken_by_stream\", \"\",", "\"room_key\", max_room_stream_token, users=users, rooms=rooms, ) self._on_updated_room_token(max_room_stream_token) def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken):", "peristed out of order. The notifier will wait until all", "if after_token == before_token: return EventStreamResult([], (from_token, from_token)) events =", "persisted. \"\"\" pending = self.pending_new_room_events self.pending_new_room_events = [] users =", "before it. This gets updated every time we get poked.", "joined_room_ids, True async def _is_world_readable(self, room_id: str) -> bool: state", "-> bool: state = await self.state_handler.get_current_state( room_id, EventTypes.RoomHistoryVisibility, \"\" )", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "set() # type: Set[_NotifierUserStream] for streams in list(self.room_to_user_streams.values()): all_user_streams |=", "current_token since nothing # has happened between the old prev_token", "at the current token since if we get any streams", "self.user_id = user_id self.rooms = set(rooms) self.current_token = current_token #", "streams that have a # token that comes before it.", "with PreserveLoggingContext(): await listener.deferred log_kv( { \"wait_for_events\": \"woken\", \"token\": user_stream.current_token,", "else: from_token = self.event_sources.get_current_token() limit = pagination_config.limit room_ids, is_joined =", "want /events to be used for peeking independently of /sync,", "_notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken): \"\"\"Notify for the room events that were", "1 return n class _NotificationListener: \"\"\"This represents a single client", "__bool__(self): return bool(self.events) @attr.s(slots=True, frozen=True) class _PendingRoomEventEntry: event_pos = attr.ib(type=PersistedEventPosition)", "_user_joined_room(self, user_id: str, room_id: str): new_user_stream = self.user_to_user_stream.get(user_id) if new_user_stream", "None: room_ids = await self.store.get_rooms_for_user(user_id) user_stream = _NotifierUserStream( user_id=user_id, rooms=room_ids,", "synapse.api.errors import AuthError from synapse.events import EventBase from synapse.handlers.presence import", "stream_token = new_token self.appservice_handler.notify_interested_services_ephemeral( stream_key, stream_token, users or [] )", "If explicit_room_id is not set, the user's joined rooms will", "be called when some new data is available. Callback is", "- now) / 1000.0, self.hs.get_reactor(), ) with start_active_span(\"wait_for_events.deferred\"): log_kv( {", "the room events that were queued waiting for a previous", "pagination_config.limit room_ids, is_joined = await self._get_room_ids(user, explicit_room_id) is_peeking = not", "with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) noify_deferred.callback(self.current_token) def remove(self, notifier: \"Notifier\"):", "StreamToken ) -> EventStreamResult: if after_token == before_token: return EventStreamResult([],", "The events stream handler will have yielded to the deferred,", "self.notify_deferred log_kv( { \"notify\": self.user_id, \"stream\": stream_key, \"stream_id\": stream_id, \"listeners\":", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "from_token=StreamToken.START, ) -> T: \"\"\"Wait until the callback returns a", "polluting its contents. So we invent an illegal user ID", "self, user_id: str, rooms: Collection[str], current_token: StreamToken, time_now_ms: int, ):", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "result = await self.wait_for_events( user_id_for_stream, timeout, check_for_updates, room_ids=room_ids, from_token=from_token, )", "noify_deferred = self.notify_deferred log_kv( { \"notify\": self.user_id, \"stream\": stream_key, \"stream_id\":", "typing import ( Awaitable, Callable, Dict, Iterable, List, Optional, Set,", "for it. Primarily used from the /events stream. \"\"\" UNUSED_STREAM_EXPIRY_MS", "to `timeout` milliseconds for any new events to happen before", "\"\"\" def __init__( self, user_id: str, rooms: Collection[str], current_token: StreamToken,", "a Deferred - if it needs to do any asynchronous", "and \"history_visibility\" in state.content: return ( state.content[\"history_visibility\"] == HistoryVisibility.WORLD_READABLE )", "{ \"wait_for_events\": \"sleep\", \"token\": prev_token, } ) with PreserveLoggingContext(): await", "since nothing # has happened between the old prev_token and", "PaginationConfig, timeout: int, is_guest: bool = False, explicit_room_id: Optional[str] =", "the Apache License, Version 2.0 (the \"License\"); # you may", "the deferred, so to notify the handler it is sufficient", "stream. The events stream handler will have yielded to the", "logger.exception(\"Error notifying application services of event\") def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken):", "the client streams. \"\"\" self.pending_new_room_events.append( _PendingRoomEventEntry( event_pos=event_pos, extra_users=extra_users or [],", "import log_function from synapse.metrics import LaterGauge from synapse.streams.config import PaginationConfig", "listener = user_stream.new_listener(prev_token) listener.deferred = timeout_deferred( listener.deferred, (end_time - now)", "is_joined = await self._get_room_ids(user, explicit_room_id) is_peeking = not is_joined async", "from_token for name, source in self.event_sources.sources.items(): keyname = \"%s_key\" %", "Callable[[StreamToken, StreamToken], Awaitable[T]], room_ids=None, from_token=StreamToken.START, ) -> T: \"\"\"Wait until", "class is responsible for notifying any listeners when there are", "return any new events for them. If there are no", "stream_id) self.last_notified_token = self.current_token self.last_notified_ms = time_now_ms noify_deferred = self.notify_deferred", "self.replication_callbacks.append(cb) def on_new_room_event( self, event: EventBase, event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken,", "`extra_users` param. The events can be peristed out of order.", "events can be peristed out of order. The notifier will", "= self.current_token self.last_notified_ms = time_now_ms noify_deferred = self.notify_deferred log_kv( {", "synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.streams.config import", "it is world readable or the user has joined the", "inform replication listeners that something has happened without waking up", "event in new_events ] events.extend(new_events) end_token = end_token.copy_and_replace(keyname, new_key) return", "is listening in so that it can remove itself from", "\"token\": prev_token, } ) with PreserveLoggingContext(): await listener.deferred log_kv( {", "% name before_id = getattr(before_token, keyname) after_id = getattr(after_token, keyname)", "# when rendering the metrics page, which is likely once", "can remove itself from the indexes in the Notifier class.", "-*- # Copyright 2014 - 2016 OpenMarket Ltd # #", "the License. import logging from collections import namedtuple from typing", "logger.exception(\"Failed to notify listener\") self.notify_replication() # Notify appservices self._notify_app_services_ephemeral( stream_key,", "time_now_ms) except Exception: logger.exception(\"Failed to notify listener\") self.notify_replication() # Notify", "notify( self, stream_key: str, stream_id: Union[int, RoomStreamToken], time_now_ms: int, ):", "application services of event\") def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: self._pusher_pool.on_new_notifications(max_room_stream_token)", "_NotificationListener(self.notify_deferred.observe()) class EventStreamResult(namedtuple(\"EventStreamResult\", (\"events\", \"tokens\"))): def __bool__(self): return bool(self.events) @attr.s(slots=True,", "listener.deferred log_kv( { \"wait_for_events\": \"woken\", \"token\": user_stream.current_token, } ) current_token", "new data is available. Callback is not given any arguments.", "to do any asynchronous work, a background thread should be", "synapse.streams.config import PaginationConfig from synapse.types import ( Collection, PersistedEventPosition, RoomStreamToken,", "test to perform, but it's only executed # when rendering", "for keying peeking # over /events. # # I am", "= logging.getLogger(__name__) notified_events_counter = Counter(\"synapse_notifier_notified_events\", \"\") users_woken_by_stream_counter = Counter( \"synapse_notifier_users_woken_by_stream\",", "it's only executed # when rendering the metrics page, which", "# Now we wait for the _NotifierUserStream to be told", "is set, that room will be polled for events only", "self.user_to_user_stream[user_stream.user_id] = user_stream for room in user_stream.rooms: s = self.room_to_user_streams.setdefault(room,", "is likely once per minute at # most when scraping", "hs.get_event_sources() self.store = hs.get_datastore() self.pending_new_room_events = [] # type: List[_PendingRoomEventEntry]", "callback(prev_token, current_token) return result async def get_events_for( self, user: UserID,", "\"\"\"This represents a single client connection to the events stream.", "are no new events wait for up to `timeout` milliseconds", "False @log_function def remove_expired_streams(self) -> None: time_now_ms = self.clock.time_msec() expired_streams", "\"Non-joined access not allowed\") return joined_room_ids, True async def _is_world_readable(self,", "under the License is distributed on an \"AS IS\" BASIS,", "a # token that comes before it. This gets updated", "given user and rooms, return any new events for them.", "in the Notifier class. \"\"\" def __init__( self, user_id: str,", "if user_stream is None: current_token = self.event_sources.get_current_token() if room_ids is", "any arguments. It should *not* return a Deferred - if", "Set[str] for entry in pending: if entry.event_pos.persisted_after(max_room_stream_token): self.pending_new_room_events.append(entry) else: if", "def _notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken): \"\"\"Notify for the room events that", "rooms: Optional[Collection[str]] = None, ): \"\"\"Used to inform listeners that", ") if name == \"room\": new_events = await filter_events_for_client( self.storage,", "self._register_with_keys(user_stream) result = None prev_token = from_token if timeout: end_time", "any streams # that have a token from before we", ") result = await self.wait_for_events( user_id_for_stream, timeout, check_for_updates, room_ids=room_ids, from_token=from_token,", "\"\"\"This represents a user connected to the event stream. It", "user_id_for_stream, timeout, check_for_updates, room_ids=room_ids, from_token=from_token, ) return result async def", "EventStreamResult(events, (from_token, end_token)) user_id_for_stream = user.to_string() if is_peeking: # Internally,", "it is sufficient to resolve the deferred. \"\"\" __slots__ =", "from_token=from_token, ) return result async def _get_room_ids( self, user: UserID,", "UserID, explicit_room_id: Optional[str] ) -> Tuple[Collection[str], bool]: joined_room_ids = await", "room, and any listeners for the users in the `extra_users`", "the events stream. The events stream handler will have yielded", "the current_token since nothing # has happened between the old", "if self.federation_sender: self.federation_sender.notify_new_events(max_room_stream_token) def _notify_app_services(self, max_room_stream_token: RoomStreamToken): try: self.appservice_handler.notify_interested_services(max_room_stream_token) except", "self.last_notified_token = self.current_token self.last_notified_ms = time_now_ms noify_deferred = self.notify_deferred log_kv(", "} ) users_woken_by_stream_counter.labels(stream_key).inc() with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) noify_deferred.callback(self.current_token) def", "something has happened without waking up any of the normal", "prev_token = from_token if timeout: end_time = self.clock.time_msec() + timeout", "that there's a new event\"\"\" for cb in self.replication_callbacks: cb()", "for the _NotifierUserStream to be told there # is a", ") import attr from prometheus_client import Counter from twisted.internet import", "no idea whether they should be # woken up or", "to wake up any listeners that are listening to the", "is not a very cheap test to perform, but it's", "no new events wait for up to `timeout` milliseconds for", "limit = pagination_config.limit room_ids, is_joined = await self._get_room_ids(user, explicit_room_id) is_peeking", "pagination_config.from_token else: from_token = self.event_sources.get_current_token() limit = pagination_config.limit room_ids, is_joined", "or the timeout fires. \"\"\" user_stream = self.user_to_user_stream.get(user_id) if user_stream", "new_token, users, ) def on_new_replication_data(self) -> None: \"\"\"Used to inform", "import format_user_presence_state from synapse.logging.context import PreserveLoggingContext from synapse.logging.opentracing import log_kv,", "that user. At a given point a user may have", "notify the handler it is sufficient to resolve the deferred.", "] events.extend(new_events) end_token = end_token.copy_and_replace(keyname, new_key) return EventStreamResult(events, (from_token, end_token))", "This listener will also keep track of which rooms it", "been # down. self.remote_server_up_callbacks = [] # type: List[Callable[[str], None]]", "lets just wake them up. self.last_notified_token = current_token self.last_notified_ms =", "\"wait_for_events\": \"result\", \"result\": bool(result), } ) if result: break #", "from_key=getattr(from_token, keyname), limit=limit, is_guest=is_peeking, room_ids=room_ids, explicit_room_id=explicit_room_id, ) if name ==", "if isinstance(new_token, int): stream_token = new_token self.appservice_handler.notify_interested_services_ephemeral( stream_key, stream_token, users", "= None prev_token = from_token if timeout: end_time = self.clock.time_msec()", "response or the timeout fires. \"\"\" user_stream = self.user_to_user_stream.get(user_id) if", "= end_token.copy_and_replace(keyname, new_key) return EventStreamResult(events, (from_token, end_token)) user_id_for_stream = user.to_string()", "shouldn't notify for things that happened before this. \"\"\" #", "self._pusher_pool.on_new_notifications(max_room_stream_token) except Exception: logger.exception(\"Error pusher pool of event\") def on_new_event(", "ANY KIND, either express or implied. # See the License", "\"\"\"Return the number of items in it for which func", "the License. # You may obtain a copy of the", "# that have a token from before we have no", "the given users and rooms. \"\"\" users = users or", "wake them up. self.last_notified_token = current_token self.last_notified_ms = time_now_ms with", "set() # type: Set[UserID] rooms = set() # type: Set[str]", "# See the License for the specific language governing permissions", "in users: user_stream = self.user_to_user_stream.get(str(user)) if user_stream is not None:", "from_token = pagination_config.from_token else: from_token = self.event_sources.get_current_token() limit = pagination_config.limit", "source.get_new_events( user=user, from_key=getattr(from_token, keyname), limit=limit, is_guest=is_peeking, room_ids=room_ids, explicit_room_id=explicit_room_id, ) if", "listeners that something has happened without waking up any of", "wise. This triggers the notifier to wake up any listeners", "Iterable, List, Optional, Set, Tuple, TypeVar, Union, ) import attr", "state_key=event.get(\"state_key\"), membership=event.content.get(\"membership\"), max_room_stream_token=max_room_stream_token, extra_users=extra_users or [], ) def on_new_room_event_args( self,", "room_id=room_id, type=event_type, state_key=state_key, membership=membership, ) ) self._notify_pending_new_room_events(max_room_stream_token) self.notify_replication() def _notify_pending_new_room_events(self,", "_on_updated_room_token(self, max_room_stream_token: RoomStreamToken): \"\"\"Poke services that might care that the", ") -> Tuple[Collection[str], bool]: joined_room_ids = await self.store.get_rooms_for_user(user.to_string()) if explicit_room_id:", "max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, ): \"\"\"Used by handlers", "self._notify_app_services_ephemeral( stream_key, new_token, users, ) def on_new_replication_data(self) -> None: \"\"\"Used", "both /sync and /events. # We want /events to be", "connected to the event stream. It tracks the most recent", "user_id_for_stream, ) result = await self.wait_for_events( user_id_for_stream, timeout, check_for_updates, room_ids=room_ids,", "# is a new token. listener = user_stream.new_listener(prev_token) listener.deferred =", "things that happened before this. \"\"\" # Immediately wake up", "__init__(self, hs: \"synapse.server.HomeServer\"): self.user_to_user_stream = {} # type: Dict[str, _NotifierUserStream]", "= 10 * 60 * 1000 def __init__(self, hs: \"synapse.server.HomeServer\"):", "str, timeout: int, callback: Callable[[StreamToken, StreamToken], Awaitable[T]], room_ids=None, from_token=StreamToken.START, )", "1000 def __init__(self, hs: \"synapse.server.HomeServer\"): self.user_to_user_stream = {} # type:", "hs.get_application_service_handler() self._pusher_pool = hs.get_pusherpool() self.federation_sender = None if hs.should_send_federation(): self.federation_sender", "max_room_stream_token=max_room_stream_token, extra_users=extra_users or [], ) def on_new_room_event_args( self, room_id: str,", "\"synapse_notifier_rooms\", \"\", [], lambda: count(bool, list(self.room_to_user_streams.values())), ) LaterGauge( \"synapse_notifier_users\", \"\",", "except Exception: logger.exception(\"Failed to notify listener\") self.notify_replication() # Notify appservices", "import PreserveLoggingContext from synapse.logging.opentracing import log_kv, start_active_span from synapse.logging.utils import", "the timeout fires. \"\"\" user_stream = self.user_to_user_stream.get(user_id) if user_stream is", "= hs.get_clock() self.appservice_handler = hs.get_application_service_handler() self._pusher_pool = hs.get_pusherpool() self.federation_sender =", "that have a token from before we have no idea", "EventTypes, HistoryVisibility, Membership from synapse.api.errors import AuthError from synapse.events import", "self.room_to_user_streams.setdefault(room, set()) s.add(user_stream) def _user_joined_room(self, user_id: str, room_id: str): new_user_stream", "\"\"\" self.pending_new_room_events.append( _PendingRoomEventEntry( event_pos=event_pos, extra_users=extra_users or [], room_id=room_id, type=event_type, state_key=state_key,", "Optional[str] = None, ) -> EventStreamResult: \"\"\"For the given user", "users: user_stream = self.user_to_user_stream.get(str(user)) if user_stream is not None: user_streams.add(user_stream)", "old prev_token and the current_token prev_token = current_token except defer.TimeoutError:", "when there are new events available for it. Primarily used", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "explicit_room_id=explicit_room_id, ) if name == \"room\": new_events = await filter_events_for_client(", "user_id: str, timeout: int, callback: Callable[[StreamToken, StreamToken], Awaitable[T]], room_ids=None, from_token=StreamToken.START,", "= time_now_ms - self.UNUSED_STREAM_EXPIRY_MS for stream in self.user_to_user_stream.values(): if stream.count_listeners():", "a deferred that is resolved when there is a new", ") self._notify_pending_new_room_events(max_room_stream_token) self.notify_replication() def _notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken): \"\"\"Notify for the", "users and rooms. \"\"\" users = users or [] rooms", "writing, software # distributed under the License is distributed on", "self.notify_deferred = ObservableDeferred(defer.Deferred()) def notify( self, stream_key: str, stream_id: Union[int,", "callback as a) we already have a reference to it", "explicit_room_id in joined_room_ids: return [explicit_room_id], True if await self._is_world_readable(explicit_room_id): return", "Optional, Set, Tuple, TypeVar, Union, ) import attr from prometheus_client", "# Called when there are new things to stream over", "License. import logging from collections import namedtuple from typing import", "and calls `on_new_room_event_args`.\"\"\" self.on_new_room_event_args( event_pos=event_pos, room_id=event.room_id, event_type=event.type, state_key=event.get(\"state_key\"), membership=event.content.get(\"membership\"), max_room_stream_token=max_room_stream_token,", "event came from. time_now_ms: The current time in milliseconds. \"\"\"", "from synapse.logging.opentracing import log_kv, start_active_span from synapse.logging.utils import log_function from", "import ObservableDeferred, timeout_deferred from synapse.util.metrics import Measure from synapse.visibility import", "be # woken up or not, so lets just wake", "all previous events have been persisted before notifying the client", "from the /events stream. \"\"\" UNUSED_STREAM_EXPIRY_MS = 10 * 60", "# poke any interested application service. self._notify_app_services(max_room_stream_token) self._notify_pusher_pool(max_room_stream_token) if self.federation_sender:", "have a number of streams listening for events. This listener", ") for user in users: user_stream = self.user_to_user_stream.get(str(user)) if user_stream", "room, room event wise. This triggers the notifier to wake", "returning. If explicit_room_id is not set, the user's joined rooms", "synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.api.errors import AuthError from", "[] # type: List[Callable[[str], None]] self.clock = hs.get_clock() self.appservice_handler =", "in the `extra_users` param. The events can be peristed out", "Should be shared somewhere def count(func: Callable[[T], bool], it: Iterable[T])", "break # Now we wait for the _NotifierUserStream to be", "keyname), limit=limit, is_guest=is_peeking, room_ids=room_ids, explicit_room_id=explicit_room_id, ) if name == \"room\":", "so that it can remove itself from the indexes in", "= {} # type: Dict[str, Set[_NotifierUserStream]] self.hs = hs self.storage", "log_kv, start_active_span from synapse.logging.utils import log_function from synapse.metrics import LaterGauge", "illegal user ID # (which thus cannot clash with any", "AuthError(403, \"Non-joined access not allowed\") return joined_room_ids, True async def", "= [] users = set() # type: Set[UserID] rooms =", "single client connection to the events stream. The events stream", "room_ids, is_joined = await self._get_room_ids(user, explicit_room_id) is_peeking = not is_joined", "user.to_string() if is_peeking: # Internally, the notifier keeps an event", "LaterGauge( \"synapse_notifier_users\", \"\", [], lambda: len(self.user_to_user_stream) ) def add_replication_callback(self, cb:", "expired_streams.append(stream) for expired_stream in expired_streams: expired_stream.remove(self) @log_function def _register_with_keys(self, user_stream:", "# without polluting its contents. So we invent an illegal", "): \"\"\"Notify any listeners for this user of a new", "if await self._is_world_readable(explicit_room_id): return [explicit_room_id], False raise AuthError(403, \"Non-joined access", "count_listeners(self) -> int: return len(self.notify_deferred.observers()) def new_listener(self, token: StreamToken) ->", "Set[_NotifierUserStream]] self.hs = hs self.storage = hs.get_storage() self.event_sources = hs.get_event_sources()", "_is_world_readable(self, room_id: str) -> bool: state = await self.state_handler.get_current_state( room_id,", "events.extend(new_events) end_token = end_token.copy_and_replace(keyname, new_key) return EventStreamResult(events, (from_token, end_token)) user_id_for_stream", "= not is_joined async def check_for_updates( before_token: StreamToken, after_token: StreamToken", "rather than registering as a # callback as a) we", "import ( Collection, PersistedEventPosition, RoomStreamToken, StreamToken, UserID, ) from synapse.util.async_helpers", "If explicit_room_id is set, that room will be polled for", "up any of the normal user event streams\"\"\" self.notify_replication() async", "before this. \"\"\" # Immediately wake up stream if something", "\"\"\" self.replication_callbacks.append(cb) def on_new_room_event( self, event: EventBase, event_pos: PersistedEventPosition, max_room_stream_token:", "user of a new event from an event source. Args:", "events stream. The events stream handler will have yielded to", "users = users or [] rooms = rooms or []", "if hs.should_send_federation(): self.federation_sender = hs.get_federation_sender() self.state_handler = hs.get_state_handler() self.clock.looping_call( self.remove_expired_streams,", "of event\") def _notify_app_services_ephemeral( self, stream_key: str, new_token: Union[int, RoomStreamToken],", "self.on_new_room_event_args( event_pos=event_pos, room_id=event.room_id, event_type=event.type, state_key=event.get(\"state_key\"), membership=event.content.get(\"membership\"), max_room_stream_token=max_room_stream_token, extra_users=extra_users or [],", "events = [] # type: List[EventBase] end_token = from_token for", "List[_PendingRoomEventEntry] # Called when there are new things to stream", "timeout_deferred( listener.deferred, (end_time - now) / 1000.0, self.hs.get_reactor(), ) with", "users: Optional[Collection[Union[str, UserID]]] = None, rooms: Optional[Collection[str]] = None, ):", "= await callback(prev_token, current_token) log_kv( { \"wait_for_events\": \"result\", \"result\": bool(result),", "event wise. Will wake up all listeners for the given", "self.clock.time_msec() + timeout while not result: try: now = self.clock.time_msec()", "-> int: return len(self.notify_deferred.observers()) def new_listener(self, token: StreamToken) -> _NotificationListener:", "- 2016 OpenMarket Ltd # # Licensed under the Apache", "nothing # has happened between the old prev_token and the", "happen before returning. If explicit_room_id is not set, the user's", "= pagination_config.limit room_ids, is_joined = await self._get_room_ids(user, explicit_room_id) is_peeking =", "listeners that there's a new event\"\"\" for cb in self.replication_callbacks:", "Optional[str] ) -> Tuple[Collection[str], bool]: joined_room_ids = await self.store.get_rooms_for_user(user.to_string()) if", "break except defer.CancelledError: log_kv({\"wait_for_events\": \"cancelled\"}) break if result is None:", "stream the event came from. stream_id: The new id for", "streams\"\"\" self.notify_replication() async def wait_for_events( self, user_id: str, timeout: int,", "poke any interested application service. self._notify_app_services(max_room_stream_token) self._notify_pusher_pool(max_room_stream_token) if self.federation_sender: self.federation_sender.notify_new_events(max_room_stream_token)", "if result: break # Update the prev_token to the current_token", "= self.current_token.copy_and_advance(stream_key, stream_id) self.last_notified_token = self.current_token self.last_notified_ms = time_now_ms noify_deferred", "is_peeking: # Internally, the notifier keeps an event stream per", "which rooms it is listening in so that it can", "logger.exception(\"Error pusher pool of event\") def on_new_event( self, stream_key: str,", "not allowed\") return joined_room_ids, True async def _is_world_readable(self, room_id: str)", "service. self._notify_app_services(max_room_stream_token) self._notify_pusher_pool(max_room_stream_token) if self.federation_sender: self.federation_sender.notify_new_events(max_room_stream_token) def _notify_app_services(self, max_room_stream_token: RoomStreamToken):", "to notify listener\") self.notify_replication() # Notify appservices self._notify_app_services_ephemeral( stream_key, new_token,", "EventStreamResult([], (from_token, from_token)) events = [] # type: List[EventBase] end_token", "def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken): \"\"\"Poke services that might care that", "token for which we should wake up any streams that", "import synapse.server from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.api.errors", "for user in users: user_stream = self.user_to_user_stream.get(str(user)) if user_stream is", "# limitations under the License. import logging from collections import", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "self.last_notified_ms = time_now_ms with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) def notify(", "Called when there are new things to stream over replication", "user_stream in user_streams: try: user_stream.notify(stream_key, new_token, time_now_ms) except Exception: logger.exception(\"Failed", "new_key = await source.get_new_events( user=user, from_key=getattr(from_token, keyname), limit=limit, is_guest=is_peeking, room_ids=room_ids,", "# callback as a) we already have a reference to", "it. Primarily used from the /events stream. \"\"\" UNUSED_STREAM_EXPIRY_MS =", "Notifier it knows about. \"\"\" for room in self.rooms: lst", "not a very cheap test to perform, but it's only", "online after having been # down. self.remote_server_up_callbacks = [] #", "max_room_stream_token: RoomStreamToken): \"\"\"Notify for the room events that were queued", "_NotifierUserStream): self.user_to_user_stream[user_stream.user_id] = user_stream for room in user_stream.rooms: s =", "_PendingRoomEventEntry( event_pos=event_pos, extra_users=extra_users or [], room_id=room_id, type=event_type, state_key=state_key, membership=membership, )", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "empty response or the timeout fires. \"\"\" user_stream = self.user_to_user_stream.get(user_id)", "room_ids=room_ids, explicit_room_id=explicit_room_id, ) if name == \"room\": new_events = await", "{ \"wait_for_events\": \"woken\", \"token\": user_stream.current_token, } ) current_token = user_stream.current_token", "that the room position has been updated. \"\"\" # poke", "= self.user_to_user_stream.get(user_id) if new_user_stream is not None: room_streams = self.room_to_user_streams.setdefault(room_id,", ") def on_new_replication_data(self) -> None: \"\"\"Used to inform replication listeners", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "pagination_config: PaginationConfig, timeout: int, is_guest: bool = False, explicit_room_id: Optional[str]", "self.state_handler.get_current_state( room_id, EventTypes.RoomHistoryVisibility, \"\" ) if state and \"history_visibility\" in", "no timeout or if the timeout had # already expired.", "= timeout_deferred( listener.deferred, (end_time - now) / 1000.0, self.hs.get_reactor(), )", "with run_as_background_process. \"\"\" self.replication_callbacks.append(cb) def on_new_room_event( self, event: EventBase, event_pos:", "token from which we are streaming from, i.e. we shouldn't", "state.content[\"history_visibility\"] == HistoryVisibility.WORLD_READABLE ) else: return False @log_function def remove_expired_streams(self)", "sum(stream.count_listeners() for stream in all_user_streams) LaterGauge(\"synapse_notifier_listeners\", \"\", [], count_listeners) LaterGauge(", "import EventTypes, HistoryVisibility, Membership from synapse.api.errors import AuthError from synapse.events", "user_stream.rooms: s = self.room_to_user_streams.setdefault(room, set()) s.add(user_stream) def _user_joined_room(self, user_id: str,", "(\"events\", \"tokens\"))): def __bool__(self): return bool(self.events) @attr.s(slots=True, frozen=True) class _PendingRoomEventEntry:", "this. \"\"\" # Immediately wake up stream if something has", "= [] expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS for stream in", "user: UserID, explicit_room_id: Optional[str] ) -> Tuple[Collection[str], bool]: joined_room_ids =", "scraping it. def count_listeners(): all_user_streams = set() # type: Set[_NotifierUserStream]", "def notify_replication(self) -> None: \"\"\"Notify the any replication listeners that", "a user connected to the event stream. It tracks the", "in self.event_sources.sources.items(): keyname = \"%s_key\" % name before_id = getattr(before_token,", "[], ) def on_new_room_event_args( self, room_id: str, event_type: str, state_key:", "( explicit_room_id, user_id_for_stream, ) result = await self.wait_for_events( user_id_for_stream, timeout,", "self.last_notified_token = current_token self.last_notified_ms = time_now_ms with PreserveLoggingContext(): self.notify_deferred =", "type: Dict[str, _NotifierUserStream] self.room_to_user_streams = {} # type: Dict[str, Set[_NotifierUserStream]]", "= current_token except defer.TimeoutError: log_kv({\"wait_for_events\": \"timeout\"}) break except defer.CancelledError: log_kv({\"wait_for_events\":", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "used for peeking independently of /sync, # without polluting its", "that a remote server has come back up\"\"\" # We", "by handlers to inform the notifier something has happened in", "\"waking_up_explicit_rooms\": len(rooms), } ) for user in users: user_stream =", "user_stream is not None: user_streams.add(user_stream) for room in rooms: user_streams", "type = attr.ib(type=str) state_key = attr.ib(type=Optional[str]) membership = attr.ib(type=Optional[str]) class", "time_now_ms = self.clock.time_msec() expired_streams = [] expire_before_ts = time_now_ms -", "PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) def notify( self, stream_key: str, stream_id:", "str, new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, rooms:", "specific language governing permissions and # limitations under the License.", "appservices self._notify_app_services_ephemeral( stream_key, new_token, users, ) def on_new_replication_data(self) -> None:", "namedtuple from typing import ( Awaitable, Callable, Dict, Iterable, List,", "filter_events_for_client logger = logging.getLogger(__name__) notified_events_counter = Counter(\"synapse_notifier_notified_events\", \"\") users_woken_by_stream_counter =", "the room. \"\"\" if pagination_config.from_token: from_token = pagination_config.from_token else: from_token", "new_events, is_peeking=is_peeking, ) elif name == \"presence\": now = self.clock.time_msec()", "str): \"\"\"Notify any replication that a remote server has come", "<gh_stars>1-10 # -*- coding: utf-8 -*- # Copyright 2014 -", "which we are streaming from, i.e. we shouldn't notify for", "user. At a given point a user may have a", "self.event_sources.sources.items(): keyname = \"%s_key\" % name before_id = getattr(before_token, keyname)", "the stream the event came from. time_now_ms: The current time", "# type: Dict[str, _NotifierUserStream] self.room_to_user_streams = {} # type: Dict[str,", "set, that room will be polled for events only if", "to resolve the deferred. \"\"\" __slots__ = [\"deferred\"] def __init__(self,", "entry.event_pos.persisted_after(max_room_stream_token): self.pending_new_room_events.append(entry) else: if ( entry.type == EventTypes.Member and entry.membership", "up\"\"\" # We call federation_sender directly rather than registering as", "since happened # since their last token. if self.last_notified_token !=", "# you may not use this file except in compliance", "stream in all_user_streams) LaterGauge(\"synapse_notifier_listeners\", \"\", [], count_listeners) LaterGauge( \"synapse_notifier_rooms\", \"\",", "came from. stream_id: The new id for the stream the", "out of order. The notifier will wait until all previous", "very cheap test to perform, but it's only executed #", "while not result: try: now = self.clock.time_msec() if end_time <=", "room events that were queued waiting for a previous event", "to perform, but it's only executed # when rendering the", "synapse.logging.context import PreserveLoggingContext from synapse.logging.opentracing import log_kv, start_active_span from synapse.logging.utils", "Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union, )", "-> EventStreamResult: if after_token == before_token: return EventStreamResult([], (from_token, from_token))", "up or not, so lets just wake them up. self.last_notified_token", "None: # This happened if there was no timeout or", "events wait for up to `timeout` milliseconds for any new", "self.rooms: lst = notifier.room_to_user_streams.get(room, set()) lst.discard(self) notifier.user_to_user_stream.pop(self.user_id) def count_listeners(self) ->", "log_function from synapse.metrics import LaterGauge from synapse.streams.config import PaginationConfig from", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "a user may have a number of streams listening for", "the old prev_token and the current_token prev_token = current_token except", "position has been updated. \"\"\" # poke any interested application", "expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS for stream in self.user_to_user_stream.values(): if", "break if result is None: # This happened if there", "the event came from. stream_id: The new id for the", "calls `on_new_room_event_args`.\"\"\" self.on_new_room_event_args( event_pos=event_pos, room_id=event.room_id, event_type=event.type, state_key=event.get(\"state_key\"), membership=event.content.get(\"membership\"), max_room_stream_token=max_room_stream_token, extra_users=extra_users", "} ) with PreserveLoggingContext(): await listener.deferred log_kv( { \"wait_for_events\": \"woken\",", "notifier: \"Notifier\"): \"\"\"Remove this listener from all the indexes in", "str, event_type: str, state_key: Optional[str], membership: Optional[str], event_pos: PersistedEventPosition, max_room_stream_token:", "we shouldn't notify for things that happened before this. \"\"\"", "not None: room_streams = self.room_to_user_streams.setdefault(room_id, set()) room_streams.add(new_user_stream) new_user_stream.rooms.add(room_id) def notify_replication(self)", "is available. Callback is not given any arguments. It should", "\"\"\"Unwraps event and calls `on_new_room_event_args`.\"\"\" self.on_new_room_event_args( event_pos=event_pos, room_id=event.room_id, event_type=event.type, state_key=event.get(\"state_key\"),", "under the Apache License, Version 2.0 (the \"License\"); # you", "every time we get poked. # We start it at", "EventTypes.RoomHistoryVisibility, \"\" ) if state and \"history_visibility\" in state.content: return", "since their last token. if self.last_notified_token != token: return _NotificationListener(defer.succeed(self.current_token))", "in the room, room event wise. This triggers the notifier", ") with PreserveLoggingContext(): await listener.deferred log_kv( { \"wait_for_events\": \"woken\", \"token\":", "wake up any streams that have a # token that", "explicit_room_id: if explicit_room_id in joined_room_ids: return [explicit_room_id], True if await", "str, rooms: Collection[str], current_token: StreamToken, time_now_ms: int, ): self.user_id =", "{} # type: Dict[str, Set[_NotifierUserStream]] self.hs = hs self.storage =", ") # This is not a very cheap test to", "prev_token = current_token except defer.TimeoutError: log_kv({\"wait_for_events\": \"timeout\"}) break except defer.CancelledError:", "event_pos=event_pos, extra_users=extra_users or [], room_id=room_id, type=event_type, state_key=state_key, membership=membership, ) )", "the indexes in the Notifier it knows about. \"\"\" for", "async def _get_room_ids( self, user: UserID, explicit_room_id: Optional[str] ) ->", "have no idea whether they should be # woken up", "# -*- coding: utf-8 -*- # Copyright 2014 - 2016", "for stream in all_user_streams) LaterGauge(\"synapse_notifier_listeners\", \"\", [], count_listeners) LaterGauge( \"synapse_notifier_rooms\",", "If there are no new events wait for up to", "a very cheap test to perform, but it's only executed", "[\"stream\"] ) T = TypeVar(\"T\") # TODO(paul): Should be shared", "if func(x): n += 1 return n class _NotificationListener: \"\"\"This", "we invent an illegal user ID # (which thus cannot", "expired_streams = [] expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS for stream", "except Exception: logger.exception(\"Error notifying application services of event\") def _notify_pusher_pool(self,", "import filter_events_for_client logger = logging.getLogger(__name__) notified_events_counter = Counter(\"synapse_notifier_notified_events\", \"\") users_woken_by_stream_counter", "access not allowed\") return joined_room_ids, True async def _is_world_readable(self, room_id:", "directly rather than registering as a # callback as a)", "log_kv( { \"waking_up_explicit_users\": len(users), \"waking_up_explicit_rooms\": len(rooms), } ) for user", "hs.should_send_federation(): self.federation_sender = hs.get_federation_sender() self.state_handler = hs.get_state_handler() self.clock.looping_call( self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS", "for the room events that were queued waiting for a", "event to be persisted. Args: max_room_stream_token: The highest stream_id below", "Callback is not given any arguments. It should *not* return", "current_token = user_stream.current_token result = await callback(prev_token, current_token) return result", "only if it is world readable or the user has", "listeners for the given users and rooms. \"\"\" users =", "== EventTypes.Member and entry.membership == Membership.JOIN and entry.state_key ): self._user_joined_room(entry.state_key,", "explicit_room_id is not set, the user's joined rooms will be", "Exception: logger.exception(\"Error pusher pool of event\") def on_new_event( self, stream_key:", "await filter_events_for_client( self.storage, user.to_string(), new_events, is_peeking=is_peeking, ) elif name ==", "= self.notify_deferred log_kv( { \"notify\": self.user_id, \"stream\": stream_key, \"stream_id\": stream_id,", "|= streams for stream in list(self.user_to_user_stream.values()): all_user_streams.add(stream) return sum(stream.count_listeners() for", "user_id: str, room_id: str): new_user_stream = self.user_to_user_stream.get(user_id) if new_user_stream is", "there are new events available for it. Primarily used from", "event stream per user_id. # This is used by both", "from an event source. Args: stream_key: The stream the event", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "listener will also keep track of which rooms it is", "\"\", [], count_listeners) LaterGauge( \"synapse_notifier_rooms\", \"\", [], lambda: count(bool, list(self.room_to_user_streams.values())),", "if something has already since happened # since their last", "lst = notifier.room_to_user_streams.get(room, set()) lst.discard(self) notifier.user_to_user_stream.pop(self.user_id) def count_listeners(self) -> int:", "None]] # Called when remote servers have come back online", "updated. \"\"\" # poke any interested application service. self._notify_app_services(max_room_stream_token) self._notify_pusher_pool(max_room_stream_token)", "= ObservableDeferred(defer.Deferred()) def notify( self, stream_key: str, stream_id: Union[int, RoomStreamToken],", "\"synapse.server.HomeServer\"): self.user_to_user_stream = {} # type: Dict[str, _NotifierUserStream] self.room_to_user_streams =", "below which all events have been persisted. \"\"\" pending =", "def count_listeners(self) -> int: return len(self.notify_deferred.observers()) def new_listener(self, token: StreamToken)", "set() # type: Set[str] for entry in pending: if entry.event_pos.persisted_after(max_room_stream_token):", "T = TypeVar(\"T\") # TODO(paul): Should be shared somewhere def", "Set[_NotifierUserStream] for streams in list(self.room_to_user_streams.values()): all_user_streams |= streams for stream", "result = None prev_token = from_token if timeout: end_time =", "result async def get_events_for( self, user: UserID, pagination_config: PaginationConfig, timeout:", "has happened without waking up any of the normal user", "be used for peeking independently of /sync, # without polluting", "timeout: end_time = self.clock.time_msec() + timeout while not result: try:", "False raise AuthError(403, \"Non-joined access not allowed\") return joined_room_ids, True", "to inform listeners that something has happened event wise. Will", "source in self.event_sources.sources.items(): keyname = \"%s_key\" % name before_id =", "if there was no timeout or if the timeout had", "thread should be started and wrapped with run_as_background_process. \"\"\" self.replication_callbacks.append(cb)", ") self._on_updated_room_token(max_room_stream_token) def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken): \"\"\"Poke services that might", "rooms or [] with Measure(self.clock, \"on_new_event\"): user_streams = set() log_kv(", "have yielded to the deferred, so to notify the handler", "prev_token to the current_token since nothing # has happened between", "= await self.wait_for_events( user_id_for_stream, timeout, check_for_updates, room_ids=room_ids, from_token=from_token, ) return", "for stream in self.user_to_user_stream.values(): if stream.count_listeners(): continue if stream.last_notified_ms <", "try: self.appservice_handler.notify_interested_services(max_room_stream_token) except Exception: logger.exception(\"Error notifying application services of event\")", "This gets updated every time we get poked. # We", "\"m.presence\", \"content\": format_user_presence_state(event, now), } for event in new_events ]", "current_token = user_stream.current_token result = await callback(prev_token, current_token) log_kv( {", "user_streams = set() log_kv( { \"waking_up_explicit_users\": len(users), \"waking_up_explicit_rooms\": len(rooms), }", "# I am sorry for what I have done. user_id_for_stream", "= \"_PEEKING_%s_%s\" % ( explicit_room_id, user_id_for_stream, ) result = await", "expire_before_ts: expired_streams.append(stream) for expired_stream in expired_streams: expired_stream.remove(self) @log_function def _register_with_keys(self,", "for events only if it is world readable or the", "0 for x in it: if func(x): n += 1", "RoomStreamToken): \"\"\"Notify for the room events that were queued waiting", "/events. # We want /events to be used for peeking", "over replication self.replication_callbacks = [] # type: List[Callable[[], None]] #", "def _get_room_ids( self, user: UserID, explicit_room_id: Optional[str] ) -> Tuple[Collection[str],", "peeking independently of /sync, # without polluting its contents. So", "await callback(prev_token, current_token) log_kv( { \"wait_for_events\": \"result\", \"result\": bool(result), }", "deferred. \"\"\" __slots__ = [\"deferred\"] def __init__(self, deferred): self.deferred =", "callback that will be called when some new data is", "on_new_room_event_args( self, room_id: str, event_type: str, state_key: Optional[str], membership: Optional[str],", "that were queued waiting for a previous event to be", "import Counter from twisted.internet import defer import synapse.server from synapse.api.constants", "token for that user. At a given point a user", "came from. time_now_ms: The current time in milliseconds. \"\"\" self.current_token", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "notified_events_counter = Counter(\"synapse_notifier_notified_events\", \"\") users_woken_by_stream_counter = Counter( \"synapse_notifier_users_woken_by_stream\", \"\", [\"stream\"]", "lst.discard(self) notifier.user_to_user_stream.pop(self.user_id) def count_listeners(self) -> int: return len(self.notify_deferred.observers()) def new_listener(self,", "\"tokens\"))): def __bool__(self): return bool(self.events) @attr.s(slots=True, frozen=True) class _PendingRoomEventEntry: event_pos", "self.replication_callbacks: cb() def notify_remote_server_up(self, server: str): \"\"\"Notify any replication that", "try: user_stream.notify(stream_key, new_token, time_now_ms) except Exception: logger.exception(\"Failed to notify listener\")", "self.UNUSED_STREAM_EXPIRY_MS for stream in self.user_to_user_stream.values(): if stream.count_listeners(): continue if stream.last_notified_ms", "if name == \"room\": new_events = await filter_events_for_client( self.storage, user.to_string(),", "add_replication_callback(self, cb: Callable[[], None]): \"\"\"Add a callback that will be", "[] # type: List[EventBase] end_token = from_token for name, source", "that will be called when some new data is available.", "any replication listeners that there's a new event\"\"\" for cb", "new event\"\"\" for cb in self.replication_callbacks: cb() def notify_remote_server_up(self, server:", "room_streams = self.room_to_user_streams.setdefault(room_id, set()) room_streams.add(new_user_stream) new_user_stream.rooms.add(room_id) def notify_replication(self) -> None:", "# This is not a very cheap test to perform,", "\"waking_up_explicit_users\": len(users), \"waking_up_explicit_rooms\": len(rooms), } ) for user in users:", "application service. self._notify_app_services(max_room_stream_token) self._notify_pusher_pool(max_room_stream_token) if self.federation_sender: self.federation_sender.notify_new_events(max_room_stream_token) def _notify_app_services(self, max_room_stream_token:", "[ { \"type\": \"m.presence\", \"content\": format_user_presence_state(event, now), } for event", "# We want /events to be used for peeking independently", "room_id, EventTypes.RoomHistoryVisibility, \"\" ) if state and \"history_visibility\" in state.content:", "stream_key, stream_token, users or [] ) except Exception: logger.exception(\"Error notifying", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "= None, rooms: Optional[Collection[str]] = None, ): \"\"\"Used to inform", "class Notifier: \"\"\"This class is responsible for notifying any listeners", "before we have no idea whether they should be #", "= [] # type: List[EventBase] end_token = from_token for name,", "_get_room_ids( self, user: UserID, explicit_room_id: Optional[str] ) -> Tuple[Collection[str], bool]:", "source. Args: stream_key: The stream the event came from. stream_id:", "and b) it introduces # circular dependencies. if self.federation_sender: self.federation_sender.wake_destination(server)", "func(x): n += 1 return n class _NotificationListener: \"\"\"This represents", "has been updated. \"\"\" # poke any interested application service.", "def __init__(self, deferred): self.deferred = deferred class _NotifierUserStream: \"\"\"This represents", "rooms will be polled for events. If explicit_room_id is set,", "str): new_user_stream = self.user_to_user_stream.get(user_id) if new_user_stream is not None: room_streams", "expired_streams: expired_stream.remove(self) @log_function def _register_with_keys(self, user_stream: _NotifierUserStream): self.user_to_user_stream[user_stream.user_id] = user_stream", "hs: \"synapse.server.HomeServer\"): self.user_to_user_stream = {} # type: Dict[str, _NotifierUserStream] self.room_to_user_streams", "the most recent stream token for that user. At a", "await self._is_world_readable(explicit_room_id): return [explicit_room_id], False raise AuthError(403, \"Non-joined access not", "the Notifier class. \"\"\" def __init__( self, user_id: str, rooms:", "to the event stream. It tracks the most recent stream", "def _register_with_keys(self, user_stream: _NotifierUserStream): self.user_to_user_stream[user_stream.user_id] = user_stream for room in", "the current token since if we get any streams #", "them. If there are no new events wait for up", "def remove_expired_streams(self) -> None: time_now_ms = self.clock.time_msec() expired_streams = []", "there are new things to stream over replication self.replication_callbacks =", "[] users = set() # type: Set[UserID] rooms = set()", "is not None: room_streams = self.room_to_user_streams.setdefault(room_id, set()) room_streams.add(new_user_stream) new_user_stream.rooms.add(room_id) def", "for the stream the event came from. time_now_ms: The current", "events. If explicit_room_id is set, that room will be polled", "for events. If explicit_room_id is set, that room will be", "responsible for notifying any listeners when there are new events", "until the callback returns a non empty response or the", "deferred that is resolved when there is a new token", "user has joined the room. \"\"\" if pagination_config.from_token: from_token =", "event streams\"\"\" self.notify_replication() async def wait_for_events( self, user_id: str, timeout:", "server: str): \"\"\"Notify any replication that a remote server has", "already since happened # since their last token. if self.last_notified_token", "in list(self.room_to_user_streams.values()): all_user_streams |= streams for stream in list(self.user_to_user_stream.values()): all_user_streams.add(stream)", "extra_users = attr.ib(type=Collection[UserID]) room_id = attr.ib(type=str) type = attr.ib(type=str) state_key", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "lambda: count(bool, list(self.room_to_user_streams.values())), ) LaterGauge( \"synapse_notifier_users\", \"\", [], lambda: len(self.user_to_user_stream)", "Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, ): try: stream_token", "from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.streams.config", "notifying any listeners when there are new events available for", "but it's only executed # when rendering the metrics page,", "result = await callback(prev_token, current_token) return result async def get_events_for(", "something has happened event wise. Will wake up all listeners", "and rooms. \"\"\" users = users or [] rooms =", "is not None: user_streams.add(user_stream) for room in rooms: user_streams |=", "\"\"\"Used to inform replication listeners that something has happened without", "def wait_for_events( self, user_id: str, timeout: int, callback: Callable[[StreamToken, StreamToken],", "services that might care that the room position has been", "2016 OpenMarket Ltd # # Licensed under the Apache License,", "so to notify the handler it is sufficient to resolve", "notify for things that happened before this. \"\"\" # Immediately", "between the old prev_token and the current_token prev_token = current_token", "self.appservice_handler.notify_interested_services(max_room_stream_token) except Exception: logger.exception(\"Error notifying application services of event\") def", "notifier keeps an event stream per user_id. # This is", "logger = logging.getLogger(__name__) notified_events_counter = Counter(\"synapse_notifier_notified_events\", \"\") users_woken_by_stream_counter = Counter(", "self.last_notified_token != token: return _NotificationListener(defer.succeed(self.current_token)) else: return _NotificationListener(self.notify_deferred.observe()) class EventStreamResult(namedtuple(\"EventStreamResult\",", "result = await callback(prev_token, current_token) log_kv( { \"wait_for_events\": \"result\", \"result\":", "a callback that will be called when some new data", "the prev_token to the current_token since nothing # has happened", "is resolved when there is a new token greater than", "for cb in self.replication_callbacks: cb() def notify_remote_server_up(self, server: str): \"\"\"Notify", "started and wrapped with run_as_background_process. \"\"\" self.replication_callbacks.append(cb) def on_new_room_event( self,", "users = set() # type: Set[UserID] rooms = set() #", "care that the room position has been updated. \"\"\" #", "hs.get_state_handler() self.clock.looping_call( self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS ) # This is not a", "Optional[Collection[UserID]] = None, ): \"\"\"Unwraps event and calls `on_new_room_event_args`.\"\"\" self.on_new_room_event_args(", "for peeking independently of /sync, # without polluting its contents.", "self.appservice_handler = hs.get_application_service_handler() self._pusher_pool = hs.get_pusherpool() self.federation_sender = None if", "is_guest=is_peeking, room_ids=room_ids, explicit_room_id=explicit_room_id, ) if name == \"room\": new_events =", "listening to the room, and any listeners for the users", "now) / 1000.0, self.hs.get_reactor(), ) with start_active_span(\"wait_for_events.deferred\"): log_kv( { \"wait_for_events\":", "in rooms: user_streams |= self.room_to_user_streams.get(room, set()) time_now_ms = self.clock.time_msec() for", "highest stream_id below which all events have been persisted. \"\"\"", "I am sorry for what I have done. user_id_for_stream =", "PaginationConfig from synapse.types import ( Collection, PersistedEventPosition, RoomStreamToken, StreamToken, UserID,", "up any listeners that are listening to the room, and", "= getattr(after_token, keyname) if before_id == after_id: continue new_events, new_key", "= self.room_to_user_streams.setdefault(room_id, set()) room_streams.add(new_user_stream) new_user_stream.rooms.add(room_id) def notify_replication(self) -> None: \"\"\"Notify", "we wait for the _NotifierUserStream to be told there #", "- self.UNUSED_STREAM_EXPIRY_MS for stream in self.user_to_user_stream.values(): if stream.count_listeners(): continue if", "@log_function def _register_with_keys(self, user_stream: _NotifierUserStream): self.user_to_user_stream[user_stream.user_id] = user_stream for room", "token that comes before it. This gets updated every time", "for expired_stream in expired_streams: expired_stream.remove(self) @log_function def _register_with_keys(self, user_stream: _NotifierUserStream):", "to it and b) it introduces # circular dependencies. if", "): self.user_id = user_id self.rooms = set(rooms) self.current_token = current_token", ") return result async def _get_room_ids( self, user: UserID, explicit_room_id:", "= pagination_config.from_token else: from_token = self.event_sources.get_current_token() limit = pagination_config.limit room_ids,", ") ) self._notify_pending_new_room_events(max_room_stream_token) self.notify_replication() def _notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken): \"\"\"Notify for", "a # callback as a) we already have a reference", "up all listeners for the given users and rooms. \"\"\"", "they should be # woken up or not, so lets", "bool(self.events) @attr.s(slots=True, frozen=True) class _PendingRoomEventEntry: event_pos = attr.ib(type=PersistedEventPosition) extra_users =", "Primarily used from the /events stream. \"\"\" UNUSED_STREAM_EXPIRY_MS = 10", "from synapse.types import ( Collection, PersistedEventPosition, RoomStreamToken, StreamToken, UserID, )", "events have been persisted before notifying the client streams. \"\"\"", "use this file except in compliance with the License. #", "when there is a new token greater than the given", "given users and rooms. \"\"\" users = users or []", "is world readable or the user has joined the room.", "for stream in list(self.user_to_user_stream.values()): all_user_streams.add(stream) return sum(stream.count_listeners() for stream in", "hs.get_storage() self.event_sources = hs.get_event_sources() self.store = hs.get_datastore() self.pending_new_room_events = []", "\"listeners\": self.count_listeners(), } ) users_woken_by_stream_counter.labels(stream_key).inc() with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred())", "Callable[[], None]): \"\"\"Add a callback that will be called when", "new_events = await filter_events_for_client( self.storage, user.to_string(), new_events, is_peeking=is_peeking, ) elif", "user.to_string(), new_events, is_peeking=is_peeking, ) elif name == \"presence\": now =", "have been persisted. \"\"\" pending = self.pending_new_room_events self.pending_new_room_events = []", "= 0 for x in it: if func(x): n +=", "Tuple, TypeVar, Union, ) import attr from prometheus_client import Counter", "self.user_to_user_stream.get(user_id) if new_user_stream is not None: room_streams = self.room_to_user_streams.setdefault(room_id, set())", "woken up or not, so lets just wake them up.", "{ \"wait_for_events\": \"result\", \"result\": bool(result), } ) if result: break", "have a # token that comes before it. This gets", "for name, source in self.event_sources.sources.items(): keyname = \"%s_key\" % name", "\"\"\"For the given user and rooms, return any new events", "the room position has been updated. \"\"\" # poke any", "= hs.get_datastore() self.pending_new_room_events = [] # type: List[_PendingRoomEventEntry] # Called", "hs.get_pusherpool() self.federation_sender = None if hs.should_send_federation(): self.federation_sender = hs.get_federation_sender() self.state_handler", "Tuple[Collection[str], bool]: joined_room_ids = await self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if explicit_room_id", "= attr.ib(type=str) state_key = attr.ib(type=Optional[str]) membership = attr.ib(type=Optional[str]) class Notifier:", "rooms = set() # type: Set[str] for entry in pending:", "registering as a # callback as a) we already have", "is_guest: bool = False, explicit_room_id: Optional[str] = None, ) ->", "return ( state.content[\"history_visibility\"] == HistoryVisibility.WORLD_READABLE ) else: return False @log_function", "user's joined rooms will be polled for events. If explicit_room_id", "\"%s_key\" % name before_id = getattr(before_token, keyname) after_id = getattr(after_token,", "users: Optional[Collection[Union[str, UserID]]] = None, ): try: stream_token = None", "EventStreamResult: \"\"\"For the given user and rooms, return any new", "10 * 60 * 1000 def __init__(self, hs: \"synapse.server.HomeServer\"): self.user_to_user_stream", "else: return _NotificationListener(self.notify_deferred.observe()) class EventStreamResult(namedtuple(\"EventStreamResult\", (\"events\", \"tokens\"))): def __bool__(self): return", "is responsible for notifying any listeners when there are new", "\"history_visibility\" in state.content: return ( state.content[\"history_visibility\"] == HistoryVisibility.WORLD_READABLE ) else:", "in compliance with the License. # You may obtain a", "to the deferred, so to notify the handler it is", "membership=membership, ) ) self._notify_pending_new_room_events(max_room_stream_token) self.notify_replication() def _notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken): \"\"\"Notify", "software # distributed under the License is distributed on an", "up any streams that have a # token that comes", "int, is_guest: bool = False, explicit_room_id: Optional[str] = None, )", "= new_token self.appservice_handler.notify_interested_services_ephemeral( stream_key, stream_token, users or [] ) except", "until all previous events have been persisted before notifying the", "def on_new_room_event( self, event: EventBase, event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users:", "polled for events. If explicit_room_id is set, that room will", "== HistoryVisibility.WORLD_READABLE ) else: return False @log_function def remove_expired_streams(self) ->", "Exception: logger.exception(\"Error notifying application services of event\") def _notify_pusher_pool(self, max_room_stream_token:", "will have yielded to the deferred, so to notify the", "has already since happened # since their last token. if", "): try: stream_token = None if isinstance(new_token, int): stream_token =", "UserID]]] = None, rooms: Optional[Collection[str]] = None, ): \"\"\"Used to", "# token that comes before it. This gets updated every", "len(self.notify_deferred.observers()) def new_listener(self, token: StreamToken) -> _NotificationListener: \"\"\"Returns a deferred", "as a # callback as a) we already have a", "token since if we get any streams # that have", "new_events[:] = [ { \"type\": \"m.presence\", \"content\": format_user_presence_state(event, now), }", "self.count_listeners(), } ) users_woken_by_stream_counter.labels(stream_key).inc() with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) noify_deferred.callback(self.current_token)", "The last token for which we should wake up any", "somewhere def count(func: Callable[[T], bool], it: Iterable[T]) -> int: \"\"\"Return", "event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, ): \"\"\"Unwraps", "waking up any of the normal user event streams\"\"\" self.notify_replication()", "that have a # token that comes before it. This", "self.state_handler = hs.get_state_handler() self.clock.looping_call( self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS ) # This is", "max_room_stream_token, users=users, rooms=rooms, ) self._on_updated_room_token(max_room_stream_token) def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken): \"\"\"Poke", "if is_peeking: # Internally, the notifier keeps an event stream", "The stream the event came from. stream_id: The new id", "self._user_joined_room(entry.state_key, entry.room_id) users.update(entry.extra_users) rooms.add(entry.room_id) if users or rooms: self.on_new_event( \"room_key\",", "= await self._get_room_ids(user, explicit_room_id) is_peeking = not is_joined async def", "new_user_stream = self.user_to_user_stream.get(user_id) if new_user_stream is not None: room_streams =", "for things that happened before this. \"\"\" # Immediately wake", "synapse.types import ( Collection, PersistedEventPosition, RoomStreamToken, StreamToken, UserID, ) from", "all_user_streams |= streams for stream in list(self.user_to_user_stream.values()): all_user_streams.add(stream) return sum(stream.count_listeners()", "in it: if func(x): n += 1 return n class", "synapse.handlers.presence import format_user_presence_state from synapse.logging.context import PreserveLoggingContext from synapse.logging.opentracing import", "extra_users: Optional[Collection[UserID]] = None, ): \"\"\"Used by handlers to inform", "import LaterGauge from synapse.streams.config import PaginationConfig from synapse.types import (", "= attr.ib(type=str) type = attr.ib(type=str) state_key = attr.ib(type=Optional[str]) membership =", "that room will be polled for events only if it", "def _is_world_readable(self, room_id: str) -> bool: state = await self.state_handler.get_current_state(", "from all the indexes in the Notifier it knows about.", "a reference to it and b) it introduces # circular", "stream_id below which all events have been persisted. \"\"\" pending", "type: List[Callable[[str], None]] self.clock = hs.get_clock() self.appservice_handler = hs.get_application_service_handler() self._pusher_pool", "\"\"\"Used to inform listeners that something has happened event wise.", "ObservableDeferred, timeout_deferred from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client", "come back up\"\"\" # We call federation_sender directly rather than", "self.event_sources.get_current_token() limit = pagination_config.limit room_ids, is_joined = await self._get_room_ids(user, explicit_room_id)", "type: Dict[str, Set[_NotifierUserStream]] self.hs = hs self.storage = hs.get_storage() self.event_sources", "timeout had # already expired. current_token = user_stream.current_token result =", "set, the user's joined rooms will be polled for events.", "user_stream for room in user_stream.rooms: s = self.room_to_user_streams.setdefault(room, set()) s.add(user_stream)", ") if result: break # Update the prev_token to the", "[explicit_room_id], True if await self._is_world_readable(explicit_room_id): return [explicit_room_id], False raise AuthError(403,", "handler it is sufficient to resolve the deferred. \"\"\" __slots__", "new_events, new_key = await source.get_new_events( user=user, from_key=getattr(from_token, keyname), limit=limit, is_guest=is_peeking,", "with the License. # You may obtain a copy of", "= None, ): try: stream_token = None if isinstance(new_token, int):", "things to stream over replication self.replication_callbacks = [] # type:", "return [explicit_room_id], False raise AuthError(403, \"Non-joined access not allowed\") return", "pending = self.pending_new_room_events self.pending_new_room_events = [] users = set() #", "notifying application services of event\") def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try:", "user in users: user_stream = self.user_to_user_stream.get(str(user)) if user_stream is not", "id for the stream the event came from. time_now_ms: The", "if result is None: # This happened if there was", "bool], it: Iterable[T]) -> int: \"\"\"Return the number of items", "for event in new_events ] events.extend(new_events) end_token = end_token.copy_and_replace(keyname, new_key)", "events stream handler will have yielded to the deferred, so", "try: now = self.clock.time_msec() if end_time <= now: break #", "have been persisted before notifying the client streams. \"\"\" self.pending_new_room_events.append(", "None, rooms: Optional[Collection[str]] = None, ): \"\"\"Used to inform listeners", "== before_token: return EventStreamResult([], (from_token, from_token)) events = [] #", "told there # is a new token. listener = user_stream.new_listener(prev_token)", "== \"room\": new_events = await filter_events_for_client( self.storage, user.to_string(), new_events, is_peeking=is_peeking,", "events that were queued waiting for a previous event to", "from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client logger =", "end_token = from_token for name, source in self.event_sources.sources.items(): keyname =", "express or implied. # See the License for the specific", "rooms=room_ids, current_token=current_token, time_now_ms=self.clock.time_msec(), ) self._register_with_keys(user_stream) result = None prev_token =", "except in compliance with the License. # You may obtain", "collections import namedtuple from typing import ( Awaitable, Callable, Dict,", "= time_now_ms with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) def notify( self,", "= await self.store.get_rooms_for_user(user_id) user_stream = _NotifierUserStream( user_id=user_id, rooms=room_ids, current_token=current_token, time_now_ms=self.clock.time_msec(),", "= self.pending_new_room_events self.pending_new_room_events = [] users = set() # type:", "attr.ib(type=PersistedEventPosition) extra_users = attr.ib(type=Collection[UserID]) room_id = attr.ib(type=str) type = attr.ib(type=str)", "await callback(prev_token, current_token) return result async def get_events_for( self, user:", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "deferred class _NotifierUserStream: \"\"\"This represents a user connected to the", "is None: # This happened if there was no timeout", "rooms: self.on_new_event( \"room_key\", max_room_stream_token, users=users, rooms=rooms, ) self._on_updated_room_token(max_room_stream_token) def _on_updated_room_token(self,", "new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, rooms: Optional[Collection[str]]", "stream handler will have yielded to the deferred, so to", "called when some new data is available. Callback is not", "Will wake up all listeners for the given users and", "stream. \"\"\" UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000 def", "users or [] ) except Exception: logger.exception(\"Error notifying application services", "we should wake up any streams that have a #", "in joined_room_ids: return [explicit_room_id], True if await self._is_world_readable(explicit_room_id): return [explicit_room_id],", "Args: stream_key: The stream the event came from. stream_id: The", "there was no timeout or if the timeout had #", "already expired. current_token = user_stream.current_token result = await callback(prev_token, current_token)", "the room, and any listeners for the users in the", "CONDITIONS OF ANY KIND, either express or implied. # See", "\"result\", \"result\": bool(result), } ) if result: break # Update", "return a Deferred - if it needs to do any", "Ltd # # Licensed under the Apache License, Version 2.0", "__init__( self, user_id: str, rooms: Collection[str], current_token: StreamToken, time_now_ms: int,", "after_token: StreamToken ) -> EventStreamResult: if after_token == before_token: return", "= hs.get_storage() self.event_sources = hs.get_event_sources() self.store = hs.get_datastore() self.pending_new_room_events =", "from. time_now_ms: The current time in milliseconds. \"\"\" self.current_token =", "= self.user_to_user_stream.get(str(user)) if user_stream is not None: user_streams.add(user_stream) for room", "return EventStreamResult([], (from_token, from_token)) events = [] # type: List[EventBase]", "str, room_id: str): new_user_stream = self.user_to_user_stream.get(user_id) if new_user_stream is not", "stream_token = None if isinstance(new_token, int): stream_token = new_token self.appservice_handler.notify_interested_services_ephemeral(", "in user_streams: try: user_stream.notify(stream_key, new_token, time_now_ms) except Exception: logger.exception(\"Failed to", "tracks the most recent stream token for that user. At", "UserID, pagination_config: PaginationConfig, timeout: int, is_guest: bool = False, explicit_room_id:", "for events. This listener will also keep track of which", "check_for_updates( before_token: StreamToken, after_token: StreamToken ) -> EventStreamResult: if after_token", "n = 0 for x in it: if func(x): n", "membership = attr.ib(type=Optional[str]) class Notifier: \"\"\"This class is responsible for", "-*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket", "point a user may have a number of streams listening", "if state and \"history_visibility\" in state.content: return ( state.content[\"history_visibility\"] ==", "RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, ): \"\"\"Unwraps event and calls", "list(self.room_to_user_streams.values())), ) LaterGauge( \"synapse_notifier_users\", \"\", [], lambda: len(self.user_to_user_stream) ) def", "stream in list(self.user_to_user_stream.values()): all_user_streams.add(stream) return sum(stream.count_listeners() for stream in all_user_streams)", "EventTypes.Member and entry.membership == Membership.JOIN and entry.state_key ): self._user_joined_room(entry.state_key, entry.room_id)", "end_token = end_token.copy_and_replace(keyname, new_key) return EventStreamResult(events, (from_token, end_token)) user_id_for_stream =", "user event streams\"\"\" self.notify_replication() async def wait_for_events( self, user_id: str,", "PersistedEventPosition, RoomStreamToken, StreamToken, UserID, ) from synapse.util.async_helpers import ObservableDeferred, timeout_deferred", "new_listener(self, token: StreamToken) -> _NotificationListener: \"\"\"Returns a deferred that is", "\"\"\" pending = self.pending_new_room_events self.pending_new_room_events = [] users = set()", "needs to do any asynchronous work, a background thread should", "= time_now_ms noify_deferred = self.notify_deferred log_kv( { \"notify\": self.user_id, \"stream\":", "frozen=True) class _PendingRoomEventEntry: event_pos = attr.ib(type=PersistedEventPosition) extra_users = attr.ib(type=Collection[UserID]) room_id", "now), } for event in new_events ] events.extend(new_events) end_token =", "data is available. Callback is not given any arguments. It", "some new data is available. Callback is not given any", "current_token = self.event_sources.get_current_token() if room_ids is None: room_ids = await", "with Measure(self.clock, \"on_new_event\"): user_streams = set() log_kv( { \"waking_up_explicit_users\": len(users),", "shared somewhere def count(func: Callable[[T], bool], it: Iterable[T]) -> int:", "set()) time_now_ms = self.clock.time_msec() for user_stream in user_streams: try: user_stream.notify(stream_key,", "we have no idea whether they should be # woken", "rooms it is listening in so that it can remove", "TypeVar(\"T\") # TODO(paul): Should be shared somewhere def count(func: Callable[[T],", "to inform the notifier something has happened in the room,", "metrics page, which is likely once per minute at #", "for them. If there are no new events wait for", "bool = False, explicit_room_id: Optional[str] = None, ) -> EventStreamResult:", "-> EventStreamResult: \"\"\"For the given user and rooms, return any", "any listeners that are listening to the room, and any", "rooms=rooms, ) self._on_updated_room_token(max_room_stream_token) def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken): \"\"\"Poke services that", "from_token = self.event_sources.get_current_token() limit = pagination_config.limit room_ids, is_joined = await", "in the Notifier it knows about. \"\"\" for room in", "token: The token from which we are streaming from, i.e.", "if self.last_notified_token != token: return _NotificationListener(defer.succeed(self.current_token)) else: return _NotificationListener(self.notify_deferred.observe()) class", "in all_user_streams) LaterGauge(\"synapse_notifier_listeners\", \"\", [], count_listeners) LaterGauge( \"synapse_notifier_rooms\", \"\", [],", "the _NotifierUserStream to be told there # is a new", "str, stream_id: Union[int, RoomStreamToken], time_now_ms: int, ): \"\"\"Notify any listeners", "\"\") users_woken_by_stream_counter = Counter( \"synapse_notifier_users_woken_by_stream\", \"\", [\"stream\"] ) T =", "stream.last_notified_ms < expire_before_ts: expired_streams.append(stream) for expired_stream in expired_streams: expired_stream.remove(self) @log_function", "its contents. So we invent an illegal user ID #", "[explicit_room_id], False raise AuthError(403, \"Non-joined access not allowed\") return joined_room_ids,", "any listeners when there are new events available for it.", "and wrapped with run_as_background_process. \"\"\" self.replication_callbacks.append(cb) def on_new_room_event( self, event:", "# Called when remote servers have come back online after", "previous events have been persisted before notifying the client streams.", "if ( entry.type == EventTypes.Member and entry.membership == Membership.JOIN and", "user_stream is None: current_token = self.event_sources.get_current_token() if room_ids is None:", "/sync and /events. # We want /events to be used", "def notify_remote_server_up(self, server: str): \"\"\"Notify any replication that a remote", "twisted.internet import defer import synapse.server from synapse.api.constants import EventTypes, HistoryVisibility,", "[] rooms = rooms or [] with Measure(self.clock, \"on_new_event\"): user_streams", "raise AuthError(403, \"Non-joined access not allowed\") return joined_room_ids, True async", "max_room_stream_token: RoomStreamToken): try: self.appservice_handler.notify_interested_services(max_room_stream_token) except Exception: logger.exception(\"Error notifying application services", "self.wait_for_events( user_id_for_stream, timeout, check_for_updates, room_ids=room_ids, from_token=from_token, ) return result async", "a remote server has come back up\"\"\" # We call", "room_id: str, event_type: str, state_key: Optional[str], membership: Optional[str], event_pos: PersistedEventPosition,", "token: return _NotificationListener(defer.succeed(self.current_token)) else: return _NotificationListener(self.notify_deferred.observe()) class EventStreamResult(namedtuple(\"EventStreamResult\", (\"events\", \"tokens\"))):", "call federation_sender directly rather than registering as a # callback", "cb() def notify_remote_server_up(self, server: str): \"\"\"Notify any replication that a", "done. user_id_for_stream = \"_PEEKING_%s_%s\" % ( explicit_room_id, user_id_for_stream, ) result", "which all events have been persisted. \"\"\" pending = self.pending_new_room_events", "stream_id, \"listeners\": self.count_listeners(), } ) users_woken_by_stream_counter.labels(stream_key).inc() with PreserveLoggingContext(): self.notify_deferred =", "and the current_token prev_token = current_token except defer.TimeoutError: log_kv({\"wait_for_events\": \"timeout\"})", "new events available for it. Primarily used from the /events", "should wake up any streams that have a # token", "if we get any streams # that have a token", "hs.get_datastore() self.pending_new_room_events = [] # type: List[_PendingRoomEventEntry] # Called when", "def on_new_room_event_args( self, room_id: str, event_type: str, state_key: Optional[str], membership:", "-> Tuple[Collection[str], bool]: joined_room_ids = await self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if", "it can remove itself from the indexes in the Notifier", "client streams. \"\"\" self.pending_new_room_events.append( _PendingRoomEventEntry( event_pos=event_pos, extra_users=extra_users or [], room_id=room_id,", "if user_stream is not None: user_streams.add(user_stream) for room in rooms:", "events have been persisted. \"\"\" pending = self.pending_new_room_events self.pending_new_room_events =", "AuthError from synapse.events import EventBase from synapse.handlers.presence import format_user_presence_state from", "the current_token prev_token = current_token except defer.TimeoutError: log_kv({\"wait_for_events\": \"timeout\"}) break", "had # already expired. current_token = user_stream.current_token result = await", "for the users in the `extra_users` param. The events can", "def count_listeners(): all_user_streams = set() # type: Set[_NotifierUserStream] for streams", "attr.ib(type=Optional[str]) membership = attr.ib(type=Optional[str]) class Notifier: \"\"\"This class is responsible", "all_user_streams = set() # type: Set[_NotifierUserStream] for streams in list(self.room_to_user_streams.values()):", "result async def _get_room_ids( self, user: UserID, explicit_room_id: Optional[str] )", "(end_time - now) / 1000.0, self.hs.get_reactor(), ) with start_active_span(\"wait_for_events.deferred\"): log_kv(", "= attr.ib(type=Collection[UserID]) room_id = attr.ib(type=str) type = attr.ib(type=str) state_key =", "None: room_streams = self.room_to_user_streams.setdefault(room_id, set()) room_streams.add(new_user_stream) new_user_stream.rooms.add(room_id) def notify_replication(self) ->", "param. The events can be peristed out of order. The", "users_woken_by_stream_counter.labels(stream_key).inc() with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) noify_deferred.callback(self.current_token) def remove(self, notifier:", "# over /events. # # I am sorry for what", "_NotificationListener: \"\"\"This represents a single client connection to the events", "None, ): \"\"\"Used by handlers to inform the notifier something", "RoomStreamToken): \"\"\"Poke services that might care that the room position", "just wake them up. self.last_notified_token = current_token self.last_notified_ms = time_now_ms", "await self.state_handler.get_current_state( room_id, EventTypes.RoomHistoryVisibility, \"\" ) if state and \"history_visibility\"", "not, so lets just wake them up. self.last_notified_token = current_token", "): \"\"\"Unwraps event and calls `on_new_room_event_args`.\"\"\" self.on_new_room_event_args( event_pos=event_pos, room_id=event.room_id, event_type=event.type,", "*not* return a Deferred - if it needs to do", "wrapped with run_as_background_process. \"\"\" self.replication_callbacks.append(cb) def on_new_room_event( self, event: EventBase,", "Notify appservices self._notify_app_services_ephemeral( stream_key, new_token, users, ) def on_new_replication_data(self) ->", "timeout, check_for_updates, room_ids=room_ids, from_token=from_token, ) return result async def _get_room_ids(", "# type: List[_PendingRoomEventEntry] # Called when there are new things", "async def check_for_updates( before_token: StreamToken, after_token: StreamToken ) -> EventStreamResult:", "and entry.state_key ): self._user_joined_room(entry.state_key, entry.room_id) users.update(entry.extra_users) rooms.add(entry.room_id) if users or", "notifier to wake up any listeners that are listening to", "str, state_key: Optional[str], membership: Optional[str], event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users:", "indexes in the Notifier it knows about. \"\"\" for room", "self.clock.time_msec() if end_time <= now: break # Now we wait", "continue if stream.last_notified_ms < expire_before_ts: expired_streams.append(stream) for expired_stream in expired_streams:", "back up\"\"\" # We call federation_sender directly rather than registering", "= False, explicit_room_id: Optional[str] = None, ) -> EventStreamResult: \"\"\"For", "a) we already have a reference to it and b)", "a token from before we have no idea whether they", "time_now_ms - self.UNUSED_STREAM_EXPIRY_MS for stream in self.user_to_user_stream.values(): if stream.count_listeners(): continue", "any asynchronous work, a background thread should be started and", "None: user_streams.add(user_stream) for room in rooms: user_streams |= self.room_to_user_streams.get(room, set())", "in so that it can remove itself from the indexes", "self.current_token = self.current_token.copy_and_advance(stream_key, stream_id) self.last_notified_token = self.current_token self.last_notified_ms = time_now_ms", "listeners that are listening to the room, and any listeners", "getattr(before_token, keyname) after_id = getattr(after_token, keyname) if before_id == after_id:", "self._pusher_pool = hs.get_pusherpool() self.federation_sender = None if hs.should_send_federation(): self.federation_sender =", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "with start_active_span(\"wait_for_events.deferred\"): log_kv( { \"wait_for_events\": \"sleep\", \"token\": prev_token, } )", "StreamToken, after_token: StreamToken ) -> EventStreamResult: if after_token == before_token:", "elif name == \"presence\": now = self.clock.time_msec() new_events[:] = [", "been persisted before notifying the client streams. \"\"\" self.pending_new_room_events.append( _PendingRoomEventEntry(", "\"cancelled\"}) break if result is None: # This happened if", "itself from the indexes in the Notifier class. \"\"\" def", ") LaterGauge( \"synapse_notifier_users\", \"\", [], lambda: len(self.user_to_user_stream) ) def add_replication_callback(self,", "user may have a number of streams listening for events.", "timeout_deferred from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client logger", "self.user_id, \"stream\": stream_key, \"stream_id\": stream_id, \"listeners\": self.count_listeners(), } ) users_woken_by_stream_counter.labels(stream_key).inc()", "self._notify_pending_new_room_events(max_room_stream_token) self.notify_replication() def _notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken): \"\"\"Notify for the room", "event_pos=event_pos, room_id=event.room_id, event_type=event.type, state_key=event.get(\"state_key\"), membership=event.content.get(\"membership\"), max_room_stream_token=max_room_stream_token, extra_users=extra_users or [], )", "for streams in list(self.room_to_user_streams.values()): all_user_streams |= streams for stream in", "[], lambda: len(self.user_to_user_stream) ) def add_replication_callback(self, cb: Callable[[], None]): \"\"\"Add", "str) -> bool: state = await self.state_handler.get_current_state( room_id, EventTypes.RoomHistoryVisibility, \"\"", "def add_replication_callback(self, cb: Callable[[], None]): \"\"\"Add a callback that will", "self, user: UserID, pagination_config: PaginationConfig, timeout: int, is_guest: bool =", "True if await self._is_world_readable(explicit_room_id): return [explicit_room_id], False raise AuthError(403, \"Non-joined", "self.federation_sender: self.federation_sender.notify_new_events(max_room_stream_token) def _notify_app_services(self, max_room_stream_token: RoomStreamToken): try: self.appservice_handler.notify_interested_services(max_room_stream_token) except Exception:", "format_user_presence_state(event, now), } for event in new_events ] events.extend(new_events) end_token", "happened between the old prev_token and the current_token prev_token =", "has happened in the room, room event wise. This triggers", "services of event\") def _notify_app_services_ephemeral( self, stream_key: str, new_token: Union[int,", "something has happened in the room, room event wise. This", "Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under", "Version 2.0 (the \"License\"); # you may not use this", "remote servers have come back online after having been #", "self.hs.get_reactor(), ) with start_active_span(\"wait_for_events.deferred\"): log_kv( { \"wait_for_events\": \"sleep\", \"token\": prev_token,", "it. def count_listeners(): all_user_streams = set() # type: Set[_NotifierUserStream] for", "LaterGauge from synapse.streams.config import PaginationConfig from synapse.types import ( Collection,", "from synapse.streams.config import PaginationConfig from synapse.types import ( Collection, PersistedEventPosition,", "EventBase from synapse.handlers.presence import format_user_presence_state from synapse.logging.context import PreserveLoggingContext from", "for up to `timeout` milliseconds for any new events to", "# Update the prev_token to the current_token since nothing #", "listeners for this user of a new event from an", "return result async def _get_room_ids( self, user: UserID, explicit_room_id: Optional[str]", "n class _NotificationListener: \"\"\"This represents a single client connection to", "hs.get_clock() self.appservice_handler = hs.get_application_service_handler() self._pusher_pool = hs.get_pusherpool() self.federation_sender = None", "been updated. \"\"\" # poke any interested application service. self._notify_app_services(max_room_stream_token)", "\"\"\" if pagination_config.from_token: from_token = pagination_config.from_token else: from_token = self.event_sources.get_current_token()", "coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd", "run_as_background_process. \"\"\" self.replication_callbacks.append(cb) def on_new_room_event( self, event: EventBase, event_pos: PersistedEventPosition,", "return _NotificationListener(defer.succeed(self.current_token)) else: return _NotificationListener(self.notify_deferred.observe()) class EventStreamResult(namedtuple(\"EventStreamResult\", (\"events\", \"tokens\"))): def", "by applicable law or agreed to in writing, software #", "listeners when there are new events available for it. Primarily", "\"wait_for_events\": \"woken\", \"token\": user_stream.current_token, } ) current_token = user_stream.current_token result", "log_kv({\"wait_for_events\": \"cancelled\"}) break if result is None: # This happened", "readable or the user has joined the room. \"\"\" if", "explicit_room_id: Optional[str] = None, ) -> EventStreamResult: \"\"\"For the given", "it is listening in so that it can remove itself", "membership: Optional[str], event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None,", "keeps an event stream per user_id. # This is used", "\"stream_id\": stream_id, \"listeners\": self.count_listeners(), } ) users_woken_by_stream_counter.labels(stream_key).inc() with PreserveLoggingContext(): self.notify_deferred", "= deferred class _NotifierUserStream: \"\"\"This represents a user connected to", "from, i.e. we shouldn't notify for things that happened before", "notifier.room_to_user_streams.get(room, set()) lst.discard(self) notifier.user_to_user_stream.pop(self.user_id) def count_listeners(self) -> int: return len(self.notify_deferred.observers())", "state = await self.state_handler.get_current_state( room_id, EventTypes.RoomHistoryVisibility, \"\" ) if state", "from. stream_id: The new id for the stream the event", "new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, ): try:", "room_ids = await self.store.get_rooms_for_user(user_id) user_stream = _NotifierUserStream( user_id=user_id, rooms=room_ids, current_token=current_token,", "Iterable[T]) -> int: \"\"\"Return the number of items in it", "# Internally, the notifier keeps an event stream per user_id.", "a new token greater than the given token. Args: token:", "{ \"waking_up_explicit_users\": len(users), \"waking_up_explicit_rooms\": len(rooms), } ) for user in", "token. if self.last_notified_token != token: return _NotificationListener(defer.succeed(self.current_token)) else: return _NotificationListener(self.notify_deferred.observe())", "T: \"\"\"Wait until the callback returns a non empty response", "/sync, # without polluting its contents. So we invent an", "new_key) return EventStreamResult(events, (from_token, end_token)) user_id_for_stream = user.to_string() if is_peeking:", "over /events. # # I am sorry for what I", "timeout fires. \"\"\" user_stream = self.user_to_user_stream.get(user_id) if user_stream is None:", "streaming from, i.e. we shouldn't notify for things that happened", "inform the notifier something has happened in the room, room", "applicable law or agreed to in writing, software # distributed", "= self.clock.time_msec() expired_streams = [] expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS", "stream if something has already since happened # since their", "int: \"\"\"Return the number of items in it for which", "_NotifierUserStream to be told there # is a new token.", "do any asynchronous work, a background thread should be started", "that something has happened without waking up any of the", "= hs.get_federation_sender() self.state_handler = hs.get_state_handler() self.clock.looping_call( self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS ) #", "the user's joined rooms will be polled for events. If", "room in user_stream.rooms: s = self.room_to_user_streams.setdefault(room, set()) s.add(user_stream) def _user_joined_room(self,", "have a reference to it and b) it introduces #", "\"woken\", \"token\": user_stream.current_token, } ) current_token = user_stream.current_token result =", "user_streams: try: user_stream.notify(stream_key, new_token, time_now_ms) except Exception: logger.exception(\"Failed to notify", "await self._get_room_ids(user, explicit_room_id) is_peeking = not is_joined async def check_for_updates(", "synapse.visibility import filter_events_for_client logger = logging.getLogger(__name__) notified_events_counter = Counter(\"synapse_notifier_notified_events\", \"\")", "limitations under the License. import logging from collections import namedtuple", "cb: Callable[[], None]): \"\"\"Add a callback that will be called", "persisted. Args: max_room_stream_token: The highest stream_id below which all events", "return joined_room_ids, True async def _is_world_readable(self, room_id: str) -> bool:", "new events wait for up to `timeout` milliseconds for any", "= attr.ib(type=Optional[str]) membership = attr.ib(type=Optional[str]) class Notifier: \"\"\"This class is", "import Measure from synapse.visibility import filter_events_for_client logger = logging.getLogger(__name__) notified_events_counter", "rendering the metrics page, which is likely once per minute", "from synapse.api.errors import AuthError from synapse.events import EventBase from synapse.handlers.presence", "be started and wrapped with run_as_background_process. \"\"\" self.replication_callbacks.append(cb) def on_new_room_event(", "user_stream.new_listener(prev_token) listener.deferred = timeout_deferred( listener.deferred, (end_time - now) / 1000.0,", "RoomStreamToken], time_now_ms: int, ): \"\"\"Notify any listeners for this user", "in expired_streams: expired_stream.remove(self) @log_function def _register_with_keys(self, user_stream: _NotifierUserStream): self.user_to_user_stream[user_stream.user_id] =", "most recent stream token for that user. At a given", "event wise. This triggers the notifier to wake up any", "# You may obtain a copy of the License at", "Counter( \"synapse_notifier_users_woken_by_stream\", \"\", [\"stream\"] ) T = TypeVar(\"T\") # TODO(paul):", "when remote servers have come back online after having been", "sorry for what I have done. user_id_for_stream = \"_PEEKING_%s_%s\" %", "listeners for the users in the `extra_users` param. The events", "synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.metrics import Measure from synapse.visibility", "Union[int, RoomStreamToken], time_now_ms: int, ): \"\"\"Notify any listeners for this", "get any streams # that have a token from before", "synapse.server from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.api.errors import", "Args: max_room_stream_token: The highest stream_id below which all events have", "= self.clock.time_msec() for user_stream in user_streams: try: user_stream.notify(stream_key, new_token, time_now_ms)", "will also keep track of which rooms it is listening", "page, which is likely once per minute at # most", "be shared somewhere def count(func: Callable[[T], bool], it: Iterable[T]) ->", "except Exception: logger.exception(\"Error notifying application services of event\") def _notify_app_services_ephemeral(", "result: break # Update the prev_token to the current_token since", "= self.room_to_user_streams.setdefault(room, set()) s.add(user_stream) def _user_joined_room(self, user_id: str, room_id: str):", "time_now_ms: int, ): \"\"\"Notify any listeners for this user of", "not is_joined async def check_for_updates( before_token: StreamToken, after_token: StreamToken )", "EventStreamResult: if after_token == before_token: return EventStreamResult([], (from_token, from_token)) events", ") except Exception: logger.exception(\"Error notifying application services of event\") def", "if entry.event_pos.persisted_after(max_room_stream_token): self.pending_new_room_events.append(entry) else: if ( entry.type == EventTypes.Member and", "( Collection, PersistedEventPosition, RoomStreamToken, StreamToken, UserID, ) from synapse.util.async_helpers import", "\"\" ) if state and \"history_visibility\" in state.content: return (", "of order. The notifier will wait until all previous events", "remove itself from the indexes in the Notifier class. \"\"\"", "Measure from synapse.visibility import filter_events_for_client logger = logging.getLogger(__name__) notified_events_counter =", "a single client connection to the events stream. The events", "user_id=user_id, rooms=room_ids, current_token=current_token, time_now_ms=self.clock.time_msec(), ) self._register_with_keys(user_stream) result = None prev_token", "UserID]]] = None, ): try: stream_token = None if isinstance(new_token,", "the Notifier it knows about. \"\"\" for room in self.rooms:", "self.room_to_user_streams = {} # type: Dict[str, Set[_NotifierUserStream]] self.hs = hs", "a previous event to be persisted. Args: max_room_stream_token: The highest", "# This happened if there was no timeout or if", "return _NotificationListener(self.notify_deferred.observe()) class EventStreamResult(namedtuple(\"EventStreamResult\", (\"events\", \"tokens\"))): def __bool__(self): return bool(self.events)", "limit=limit, is_guest=is_peeking, room_ids=room_ids, explicit_room_id=explicit_room_id, ) if name == \"room\": new_events", "stream_token, users or [] ) except Exception: logger.exception(\"Error notifying application", "of which rooms it is listening in so that it", "to happen before returning. If explicit_room_id is not set, the", "user_stream.current_token result = await callback(prev_token, current_token) log_kv( { \"wait_for_events\": \"result\",", "\"content\": format_user_presence_state(event, now), } for event in new_events ] events.extend(new_events)", "async def get_events_for( self, user: UserID, pagination_config: PaginationConfig, timeout: int,", "this user of a new event from an event source.", "users or rooms: self.on_new_event( \"room_key\", max_room_stream_token, users=users, rooms=rooms, ) self._on_updated_room_token(max_room_stream_token)", "explicit_room_id) is_peeking = not is_joined async def check_for_updates( before_token: StreamToken,", "prev_token and the current_token prev_token = current_token except defer.TimeoutError: log_kv({\"wait_for_events\":", "from synapse.visibility import filter_events_for_client logger = logging.getLogger(__name__) notified_events_counter = Counter(\"synapse_notifier_notified_events\",", "for room in self.rooms: lst = notifier.room_to_user_streams.get(room, set()) lst.discard(self) notifier.user_to_user_stream.pop(self.user_id)", "\"License\"); # you may not use this file except in", "pending: if entry.event_pos.persisted_after(max_room_stream_token): self.pending_new_room_events.append(entry) else: if ( entry.type == EventTypes.Member", "users.update(entry.extra_users) rooms.add(entry.room_id) if users or rooms: self.on_new_event( \"room_key\", max_room_stream_token, users=users,", "Callable[[T], bool], it: Iterable[T]) -> int: \"\"\"Return the number of", "# This is used by both /sync and /events. #", "\"token\": user_stream.current_token, } ) current_token = user_stream.current_token result = await", "self.user_to_user_stream.get(user_id) if user_stream is None: current_token = self.event_sources.get_current_token() if room_ids", "all_user_streams) LaterGauge(\"synapse_notifier_listeners\", \"\", [], count_listeners) LaterGauge( \"synapse_notifier_rooms\", \"\", [], lambda:", "): \"\"\"Used by handlers to inform the notifier something has", "None]] self.clock = hs.get_clock() self.appservice_handler = hs.get_application_service_handler() self._pusher_pool = hs.get_pusherpool()", "timeout: int, callback: Callable[[StreamToken, StreamToken], Awaitable[T]], room_ids=None, from_token=StreamToken.START, ) ->", "represents a single client connection to the events stream. The", "work, a background thread should be started and wrapped with", "-> None: \"\"\"Used to inform replication listeners that something has", "for which we should wake up any streams that have", "room_id=event.room_id, event_type=event.type, state_key=event.get(\"state_key\"), membership=event.content.get(\"membership\"), max_room_stream_token=max_room_stream_token, extra_users=extra_users or [], ) def", "token. Args: token: The token from which we are streaming", "self.pending_new_room_events.append( _PendingRoomEventEntry( event_pos=event_pos, extra_users=extra_users or [], room_id=room_id, type=event_type, state_key=state_key, membership=membership,", "name before_id = getattr(before_token, keyname) after_id = getattr(after_token, keyname) if", "ObservableDeferred(defer.Deferred()) noify_deferred.callback(self.current_token) def remove(self, notifier: \"Notifier\"): \"\"\"Remove this listener from", "RoomStreamToken): try: self.appservice_handler.notify_interested_services(max_room_stream_token) except Exception: logger.exception(\"Error notifying application services of", "the timeout had # already expired. current_token = user_stream.current_token result", "# TODO(paul): Should be shared somewhere def count(func: Callable[[T], bool],", "= set(rooms) self.current_token = current_token # The last token for", "any streams that have a # token that comes before", "stream in self.user_to_user_stream.values(): if stream.count_listeners(): continue if stream.last_notified_ms < expire_before_ts:", "user_stream.current_token result = await callback(prev_token, current_token) return result async def", "time we get poked. # We start it at the", "notifying the client streams. \"\"\" self.pending_new_room_events.append( _PendingRoomEventEntry( event_pos=event_pos, extra_users=extra_users or", "LaterGauge(\"synapse_notifier_listeners\", \"\", [], count_listeners) LaterGauge( \"synapse_notifier_rooms\", \"\", [], lambda: count(bool,", "= attr.ib(type=Optional[str]) class Notifier: \"\"\"This class is responsible for notifying", "prev_token, } ) with PreserveLoggingContext(): await listener.deferred log_kv( { \"wait_for_events\":" ]
[ "prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price - voucher_amount def", "= Money(5, currency) # when total = base_checkout_total(subtotal, shipping_price, discount,", "assert checkout_line_info.voucher # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[]", "voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.save() voucher_amount = Money(Decimal(3),", ") sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_price = expected_undiscounted_unit_price - sale_discount_amount assert", "= Money(price_override, currency) product_collections = set(pc.id for pc in checkout_line_info.collections)", ") # then expected_voucher_amount = Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_price = variant.get_price(", "= quantity checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT", "channel_USD ): # given quantity = 3 checkout_line = checkout_with_single_item.lines.first()", "checkout_line_info.channel_listing.currency expected_undiscounted_price = Money(price_override, currency) product_collections = set(pc.id for pc", "checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher #", "calculating line total. assert ( prices_data.price_with_discounts == (expected_unit_price * quantity)", "== expected_unit_price * quantity # apply once per order is", "sale_discount = get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount", "= TaxedMoney(net=Money(3, \"USD\"), gross=Money(0, \"USD\")) assert base_tax_rate(price) == Decimal(\"0.0\") def", ") # then expected_undiscounted_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing,", "assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price - expected_voucher_amount", "discounts=[] ) # then expected_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel,", "prices_data.price_with_discounts == expected_price - expected_voucher_amount def test_calculate_base_line_unit_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD", "= base_checkout_total(subtotal, shipping_price, discount, currency) expected = subtotal + shipping_price", "checkout_line.quantity = quantity checkout_line.save() checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info =", "assert ( prices_data.price_with_discounts == (expected_unit_price * quantity) - expected_voucher_amount )", "Decimal(\"20.00\") checkout_line.price_override = price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order", "- expected_voucher_amount ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category, voucher, channel_USD", ") def test_base_tax_rate_net_price_zero(): price = TaxedMoney(net=Money(0, \"USD\"), gross=Money(3, \"USD\")) assert", "= Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing,", "voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount = voucher_amount", "assert prices_data.undiscounted_price == expected_undiscounted_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts", "prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_total_price(checkout_with_single_item): # given quantity", "expected_unit_price * quantity assert ( prices_data.price_with_discounts == (expected_unit_price - expected_voucher_amount)", "discounts=[discount_info] ) # then currency = checkout_line_info.channel_listing.currency expected_undiscounted_price = Money(price_override,", "calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then currency = checkout_line_info.channel_listing.currency", "# then currency = checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency) expected_voucher_amount", "== expected_price assert prices_data.price_with_sale == expected_price # apply once per", "= sale_discount(expected_undiscounted_price) expected_price = expected_undiscounted_price - sale_discount_amount assert prices_data.undiscounted_price ==", "quantity assert prices_data.price_with_sale == expected_price * quantity assert prices_data.price_with_discounts ==", "def test_calculate_base_line_unit_price_with_variant_on_sale_custom_price( checkout_with_single_item, discount_info, category ): # given line =", "discounts=[] ) # then currency = checkout_line_info.channel_listing.currency expected_price = Money(price_override,", "currency = checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency) assert prices_data.undiscounted_price ==", "# when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) #", "is applied when calculating line total. assert ( prices_data.price_with_discounts ==", "quantity assert ( prices_data.price_with_discounts == (expected_unit_price - voucher_amount) * quantity", "expected_unit_price * quantity assert prices_data.price_with_sale == expected_unit_price * quantity #", "gross=Money(0, currency)) subtotal = TaxedMoney(net=Money(10, currency), gross=Money(12, currency)) shipping_price =", "product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_price =", "quantity) - expected_voucher_amount ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category, voucher,", "* quantity assert prices_data.price_with_sale == expected_unit_price * quantity assert (", "prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price # apply once", "3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_lines_info, _", "DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value =", "Money(price_override, currency) product_collections = set(pc.id for pc in checkout_line_info.collections) _,", "= variant.product # when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info]", "expected_voucher_amount ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category, voucher, channel_USD ):", "channel=checkout_with_single_item.channel, variant_id=variant.id, ) expected_price = sale_discount(expected_undiscounted_price) assert prices_data.undiscounted_price == expected_undiscounted_price", "= get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount =", "then expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], )", "checkout_line_info.product = variant.product # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel,", "= taxed_money shipping_price = taxed_money discount = Money(5, currency) #", "= voucher_amount voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)", "checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency) assert prices_data.undiscounted_price == expected_price assert", "prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_custom_price(checkout_with_single_item): # given line = checkout_with_single_item.lines.first()", "test_calculate_base_line_total_price(checkout_with_single_item): # given quantity = 3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity", "prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_discounts_once_per_order_custom_prices( checkout_with_single_item, voucher, channel_USD ): #", "voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount = voucher_amount voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code", ") product_collections = set(pc.id for pc in checkout_line_info.collections) _, sale_discount", "assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category, voucher,", "import DiscountValueType, VoucherType from ...discount.utils import get_product_discount_on_sale from ..base_calculations import", "checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # when", "test_calculate_base_line_unit_price_with_variant_on_sale_custom_price( checkout_with_single_item, discount_info, category ): # given line = checkout_with_single_item.lines.first()", "checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount", "prices_data.price_with_discounts == (expected_unit_price - voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher_applied_once(", "= VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10) voucher_channel_listing", "= VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)", "then currency = checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency) expected_voucher_amount =", "when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then", "shipping_price = taxed_money discount = Money(5, currency) # when total", "calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then currency = checkout_line_info.channel_listing.currency", "decimal import Decimal from prices import Money, TaxedMoney from ...discount", "test_calculate_base_line_unit_price_with_fixed_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first()", "from ..base_calculations import ( base_checkout_total, base_tax_rate, calculate_base_line_total_price, calculate_base_line_unit_price, ) from", "True voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount", "discount_info, category ): # given checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info", "price_override line.save(update_fields=[\"price_override\"]) checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert", "prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_fixed_voucher( checkout_with_single_item,", "* quantity def test_calculate_base_line_total_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD ): # given", "= fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant", "prices_data.undiscounted_price == expected_undiscounted_unit_price * quantity assert prices_data.price_with_sale == expected_unit_price *", "expected_price = expected_undiscounted_price - sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_price assert", "quantity checkout_line.save() checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert", "== expected_price def test_calculate_base_line_unit_price_with_custom_price(checkout_with_single_item): # given line = checkout_with_single_item.lines.first() price_override", "def test_base_tax_rate_net_price_zero(): price = TaxedMoney(net=Money(0, \"USD\"), gross=Money(3, \"USD\")) assert base_tax_rate(price)", "voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type", "def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher_applied_once( checkout_with_single_item, discount_info, category, voucher, channel_USD ): # given", "# then expected_voucher_amount = Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_price = variant.get_price( product=checkout_line_info.product,", "import ( base_checkout_total, base_tax_rate, calculate_base_line_total_price, calculate_base_line_unit_price, ) from ..fetch import", "product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_price", "prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_custom_price( checkout_with_single_item,", "once per order is applied when calculating line total. assert", "channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_unit_price * quantity assert", "= base_checkout_total(subtotal, shipping_price, discount, currency) # then assert total ==", "\"USD\" zero_taxed_money = TaxedMoney(net=Money(0, currency), gross=Money(0, currency)) subtotal = TaxedMoney(net=Money(10,", "True voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10) voucher_channel_listing =", "voucher_amount def test_calculate_base_line_unit_price_with_fixed_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given checkout_line", "): # given checkout_line = checkout_with_single_item.lines.first() price_override = Decimal(\"20.00\") checkout_line.price_override", "prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_price", "voucher_amount voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info", "given currency = \"USD\" zero_taxed_money = TaxedMoney(net=Money(0, currency), gross=Money(0, currency))", "checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type =", "== expected_unit_price * quantity assert ( prices_data.price_with_discounts == (expected_unit_price *", "VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount", "prices_data.price_with_sale == expected_price # apply once per order is applied", "== (expected_unit_price - voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_percentage_voucher( checkout_with_single_item,", "checkout_lines_info[0] assert not checkout_line_info.voucher # when prices_data = calculate_base_line_unit_price( checkout_line_info,", "total = base_checkout_total(subtotal, shipping_price, discount, currency) # then assert total", "100, checkout_with_single_item.currency ) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale ==", "= checkout_line_info.variant # when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[]", "voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10)", "test_calculate_base_line_total_price_with_variant_on_sale_and_voucher_applied_once( checkout_with_single_item, discount_info, category, voucher, channel_USD ): # given quantity", ") assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price assert", "- voucher_amount ) def test_base_tax_rate_net_price_zero(): price = TaxedMoney(net=Money(0, \"USD\"), gross=Money(3,", "test_base_checkout_total(): # given currency = \"USD\" taxed_money = TaxedMoney(net=Money(10, currency),", "= VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value", "expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_custom_price( checkout_with_single_item, discount_info, category ): # given line", "discounts=[], ) assert prices_data.undiscounted_price == expected_price * quantity assert prices_data.price_with_sale", "* quantity assert prices_data.price_with_sale == expected_unit_price * quantity # apply", "line total. assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info,", "checkout_with_single_item.lines.first() price_override = Decimal(\"20.00\") checkout_line.price_override = price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type", "= Decimal(10) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value = voucher_percent_value voucher_channel_listing.save() checkout_with_single_item.voucher_code", "checkout_line.price_override = price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount", "checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type = DiscountValueType.PERCENTAGE", "checkout_with_single_item, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first() price_override", "expected_undiscounted_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price -", "checkout_line = checkout_with_single_item.lines.first() price_override = Decimal(\"20.00\") checkout_line.price_override = price_override checkout_line.save(update_fields=[\"price_override\"])", "= DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value", "assert prices_data.price_with_sale == expected_price * quantity assert prices_data.price_with_discounts == expected_price", "gross=Money(3, \"USD\")) assert base_tax_rate(price) == Decimal(\"0.0\") def test_base_tax_rate_gross_price_zero(): price =", "# given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save()", "= checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save()", "product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_price", "voucher.apply_once_per_order = True voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing =", "# given line = checkout_with_single_item.lines.first() price_override = Decimal(\"12.22\") line.price_override =", "product_collections = set(pc.id for pc in checkout_line_info.collections) _, sale_discount =", "def test_calculate_base_line_total_price_with_variant_on_sale( checkout_with_single_item, discount_info, category ): # given quantity =", "then assert total == expected def test_base_checkout_total_high_discount(): # given currency", "discounts=[], ) product_collections = set(pc.id for pc in checkout_line_info.collections) _,", "expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category, voucher, channel_USD ): #", "currency)) shipping_price = zero_taxed_money discount = Money(20, currency) # when", "checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant", "sale variant.product.category = category variant.product.save() checkout_line_info.product = variant.product # when", "voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first() price_override =", "currency) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price assert", "assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_custom_price( checkout_with_single_item, discount_info, category ):", "set(pc.id for pc in checkout_line_info.collections) _, sale_discount = get_product_discount_on_sale( product=checkout_line_info.product,", "discounts=[] ) # then expected_voucher_amount = Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_unit_price =", "checkout_with_single_item.channel, discounts=[] ) # then expected_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections,", "def test_base_tax_rate_gross_price_zero(): price = TaxedMoney(net=Money(3, \"USD\"), gross=Money(0, \"USD\")) assert base_tax_rate(price)", "* quantity assert prices_data.price_with_sale == expected_price * quantity assert prices_data.price_with_discounts", "# given checkout_line = checkout_with_single_item.lines.first() price_override = Decimal(\"20.00\") checkout_line.price_override =", "= checkout_lines_info[0] assert not checkout_line_info.voucher variant = checkout_line_info.variant # when", "assert prices_data.price_with_sale == expected_price # apply once per order is", "expected_price assert prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_total_price(checkout_with_single_item): #", "Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[],", "expected_undiscounted_price - sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_price assert prices_data.price_with_sale ==", "def test_calculate_base_line_unit_price_with_variant_on_sale( checkout_with_single_item, discount_info, category ): # given checkout_lines_info, _", "# when total = base_checkout_total(subtotal, shipping_price, discount, currency) expected =", "when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then", "prices_data.price_with_discounts == (expected_unit_price - expected_voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_discounts_apply_once_per_order(", "assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_discounts_once_per_order_custom_prices( checkout_with_single_item, voucher, channel_USD ):", "apply once per order is applied when calculating line total.", "def test_calculate_base_line_unit_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category, voucher, channel_USD ): # given", "channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_price * quantity", "expected_price = Money(price_override, currency) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale", "expected_price - voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD ): #", "= checkout_lines_info[0] assert not checkout_line_info.voucher # when prices_data = calculate_base_line_unit_price(", ") assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price #", "assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price # apply", "expected_unit_price * quantity assert prices_data.price_with_sale == expected_unit_price * quantity assert", "checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency)", "= calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then currency =", "= fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher # when", "+ shipping_price - discount # then assert total == expected", "from ...discount import DiscountValueType, VoucherType from ...discount.utils import get_product_discount_on_sale from", "given line = checkout_with_single_item.lines.first() price_override = Decimal(\"12.22\") line.price_override = price_override", "== expected_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price", "def test_base_checkout_total(): # given currency = \"USD\" taxed_money = TaxedMoney(net=Money(10,", "test_calculate_base_line_unit_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first()", "taxed_money shipping_price = taxed_money discount = Money(5, currency) # when", "assert prices_data.price_with_discounts == expected_price - expected_voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher_custom_prices( checkout_with_single_item, voucher,", "def test_calculate_base_line_unit_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD ): # given checkout_line =", "assert total == expected def test_base_checkout_total_high_discount(): # given currency =", "Money(20, currency) # when total = base_checkout_total(subtotal, shipping_price, discount, currency)", "= checkout_with_single_item.lines.first() price_override = Decimal(\"20.00\") checkout_line.price_override = price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product)", "taxed_money discount = Money(5, currency) # when total = base_checkout_total(subtotal,", "shipping_price, discount, currency) expected = subtotal + shipping_price - discount", "expected_undiscounted_price = Money(price_override, currency) product_collections = set(pc.id for pc in", "when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then", "( prices_data.price_with_discounts == (expected_unit_price - voucher_amount) * quantity ) def", "total. assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category,", "get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_unit_price)", "zero_taxed_money = TaxedMoney(net=Money(0, currency), gross=Money(0, currency)) subtotal = TaxedMoney(net=Money(10, currency),", "* quantity def test_calculate_base_line_total_price_with_variant_on_sale( checkout_with_single_item, discount_info, category ): # given", "= Decimal(\"20.00\") checkout_line.price_override = price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT", "calculating line total. assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_discounts_once_per_order_custom_prices( checkout_with_single_item,", "assert ( prices_data.price_with_discounts == (expected_unit_price - voucher_amount) * quantity )", "- voucher_amount def test_calculate_base_line_unit_price_with_fixed_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given", "voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing =", "sale_discount_amount = sale_discount(expected_undiscounted_price) expected_price = expected_undiscounted_price - sale_discount_amount assert prices_data.undiscounted_price", "currency), gross=Money(0, currency)) subtotal = TaxedMoney(net=Money(10, currency), gross=Money(12, currency)) shipping_price", "checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher", "when calculating line total. assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_and_voucher(", "== expected_price - voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD ):", "= TaxedMoney(net=Money(10, currency), gross=Money(10, currency)) subtotal = taxed_money shipping_price =", "_, sale_discount = get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, )", "expected_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price -", "then expected_voucher_amount = Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections,", "calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_price = variant.get_price(", "checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0]", "== Decimal(\"0.0\") def test_base_checkout_total(): # given currency = \"USD\" taxed_money", "prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then currency", "expected_price - expected_voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD ): #", "prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_voucher_amount", "(expected_unit_price * quantity) - expected_voucher_amount ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info,", "get_product_discount_on_sale from ..base_calculations import ( base_checkout_total, base_tax_rate, calculate_base_line_total_price, calculate_base_line_unit_price, )", "discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) expected_price = sale_discount(expected_undiscounted_price) assert prices_data.undiscounted_price ==", "checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher variant = checkout_line_info.variant #", "== Decimal(\"0.0\") def test_base_tax_rate_gross_price_zero(): price = TaxedMoney(net=Money(3, \"USD\"), gross=Money(0, \"USD\"))", "..fetch import fetch_checkout_lines def test_calculate_base_line_unit_price(checkout_with_single_item): # given checkout_lines_info, _ =", "quantity ) def test_calculate_base_line_total_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD ): # given", "order is applied when calculating line total. assert ( prices_data.price_with_discounts", "discounts=[] ) # then expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel,", "from decimal import Decimal from prices import Money, TaxedMoney from", "= checkout_line_info.variant # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[]", "== expected_price def test_calculate_base_line_unit_price_with_variant_on_sale( checkout_with_single_item, discount_info, category ): # given", "discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_price) expected_price = expected_undiscounted_price", "channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_price = expected_undiscounted_unit_price -", "checkout_with_single_item, discount_info, category ): # given quantity = 3 checkout_line", "checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type", "= price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True", "sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_unit_price = expected_undiscounted_unit_price - sale_discount_amount assert prices_data.undiscounted_price", "currency)) subtotal = taxed_money shipping_price = taxed_money discount = Money(5,", "subtotal = TaxedMoney(net=Money(10, currency), gross=Money(12, currency)) shipping_price = zero_taxed_money discount", "= zero_taxed_money discount = Money(20, currency) # when total =", "checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True", "checkout_with_single_item.channel, discounts=[discount_info] ) # then currency = checkout_line_info.channel_listing.currency expected_undiscounted_price =", "= calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_unit_price =", "- voucher_amount def test_calculate_base_line_total_price(checkout_with_single_item): # given quantity = 3 checkout_line", "- voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD", "not checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data = calculate_base_line_unit_price(", "...discount.utils import get_product_discount_on_sale from ..base_calculations import ( base_checkout_total, base_tax_rate, calculate_base_line_total_price,", "product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) expected_price = sale_discount(expected_undiscounted_price) assert prices_data.undiscounted_price", "given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order =", "# then currency = checkout_line_info.channel_listing.currency expected_undiscounted_price = Money(price_override, currency) product_collections", "checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # set category on", "prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_custom_price(checkout_with_single_item): #", "assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_custom_price(checkout_with_single_item):", "TaxedMoney(net=Money(10, currency), gross=Money(12, currency)) shipping_price = zero_taxed_money discount = Money(20,", "* quantity) - voucher_amount ) def test_base_tax_rate_net_price_zero(): price = TaxedMoney(net=Money(0,", "expected_price def test_calculate_base_line_unit_price_with_custom_price(checkout_with_single_item): # given line = checkout_with_single_item.lines.first() price_override =", "currency) expected = subtotal + shipping_price - discount # then", "quantity checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type", "checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher # when prices_data = calculate_base_line_unit_price(", "checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount = voucher_amount voucher_channel_listing.save() checkout_with_single_item.voucher_code =", "== expected_price assert prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_total_price(checkout_with_single_item):", "voucher_amount ) def test_base_tax_rate_net_price_zero(): price = TaxedMoney(net=Money(0, \"USD\"), gross=Money(3, \"USD\"))", "= TaxedMoney(net=Money(0, \"USD\"), gross=Money(3, \"USD\")) assert base_tax_rate(price) == Decimal(\"0.0\") def", "discounts=[], ) assert prices_data.undiscounted_price == expected_unit_price * quantity assert prices_data.price_with_sale", "expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_custom_price( checkout_with_single_item, discount_info, category", "checkout_line_info.product = variant.product # when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel,", "calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_price = variant.get_price(", "== expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale( checkout_with_single_item, discount_info,", "voucher_amount def test_calculate_base_line_total_price(checkout_with_single_item): # given quantity = 3 checkout_line =", "= Money(20, currency) # when total = base_checkout_total(subtotal, shipping_price, discount,", "variant = checkout_line_info.variant # set category on sale variant.product.category =", "prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_unit_price", "# when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) #", "Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount = voucher_amount voucher_channel_listing.save() checkout_with_single_item.voucher_code", "not checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data = calculate_base_line_total_price(", "checkout_line_info.collections) _, sale_discount = get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id,", "== expected_price - expected_voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD ):", "# given quantity = 3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity =", "prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then currency", "== expected_price # apply once per order is applied when", "( base_checkout_total, base_tax_rate, calculate_base_line_total_price, calculate_base_line_unit_price, ) from ..fetch import fetch_checkout_lines", "TaxedMoney from ...discount import DiscountValueType, VoucherType from ...discount.utils import get_product_discount_on_sale", "then expected_undiscounted_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], )", "set category on sale variant.product.category = category variant.product.save() checkout_line_info.product =", "then currency = checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency) assert prices_data.undiscounted_price", "Money(price_override, currency) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price", "= price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount =", "def test_calculate_base_line_unit_price_with_percentage_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given checkout_line =", "voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD ): # given checkout_line", "category variant.product.save() checkout_line_info.product = variant.product # when prices_data = calculate_base_line_total_price(", "= checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount = Money(Decimal(3),", "variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_unit_price = expected_undiscounted_unit_price - sale_discount_amount", "test_calculate_base_line_total_price_with_variant_on_sale( checkout_with_single_item, discount_info, category ): # given quantity = 3", "checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.discount_value_type =", "then expected_undiscounted_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], )", "line total. assert ( prices_data.price_with_discounts == (expected_unit_price * quantity) -", "= Money(price_override, currency) expected_voucher_amount = Money( price_override * voucher_percent_value /", "quantity checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save()", "expected_unit_price = expected_undiscounted_unit_price - sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_unit_price *", "checkout_with_single_item.channel, discounts=[] ) # then expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections,", "assert checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data = calculate_base_line_unit_price(", "variant.product.category = category variant.product.save() checkout_line_info.product = variant.product # when prices_data", "price_override * voucher_percent_value / 100, checkout_with_single_item.currency ) assert prices_data.undiscounted_price ==", ") def test_calculate_base_line_total_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD ): # given quantity", "checkout_with_single_item.channel, discounts=[] ) # then expected_voucher_amount = Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_unit_price", "== (expected_unit_price - voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher_applied_once( checkout_with_single_item,", "\"USD\")) assert base_tax_rate(price) == Decimal(\"0.0\") def test_base_checkout_total(): # given currency", "prices_data.price_with_sale == expected_price * quantity assert prices_data.price_with_discounts == expected_price *", "line.save(update_fields=[\"price_override\"]) checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not", "* quantity ) def test_calculate_base_line_total_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD ): #", "currency) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price #", "expected_price # apply once per order is applied when calculating", "quantity = 3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save()", "\"USD\" taxed_money = TaxedMoney(net=Money(10, currency), gross=Money(10, currency)) subtotal = taxed_money", "quantity # apply once per order is applied when calculating", "VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing", "shipping_price - discount # then assert total == expected def", "prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD", "discount_info, category, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first()", "when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then", "channel_USD ): # given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type =", "sale_discount = get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) expected_price", "* quantity # apply once per order is applied when", "# given currency = \"USD\" taxed_money = TaxedMoney(net=Money(10, currency), gross=Money(10,", "expected_price = Money(price_override, currency) expected_voucher_amount = Money( price_override * voucher_percent_value", "total. assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_discounts_once_per_order_custom_prices( checkout_with_single_item, voucher, channel_USD", "currency = checkout_line_info.channel_listing.currency expected_undiscounted_price = Money(price_override, currency) product_collections = set(pc.id", "= sale_discount(expected_undiscounted_price) assert prices_data.undiscounted_price == expected_undiscounted_price assert prices_data.price_with_sale == expected_price", "Decimal(\"12.22\") line.price_override = price_override line.save(update_fields=[\"price_override\"]) checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info", "sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_price assert prices_data.price_with_sale == expected_price assert", "Decimal(\"0.0\") def test_base_tax_rate_gross_price_zero(): price = TaxedMoney(net=Money(3, \"USD\"), gross=Money(0, \"USD\")) assert", "..base_calculations import ( base_checkout_total, base_tax_rate, calculate_base_line_total_price, calculate_base_line_unit_price, ) from ..fetch", "checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_price = variant.get_price( product=checkout_line_info.product,", "voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value = voucher_percent_value voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code", "checkout_line.save() checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not", "_ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher variant", "Money( price_override * voucher_percent_value / 100, checkout_with_single_item.currency ) assert prices_data.undiscounted_price", ") def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher_applied_once( checkout_with_single_item, discount_info, category, voucher, channel_USD ): #", "base_tax_rate(price) == Decimal(\"0.0\") def test_base_checkout_total(): # given currency = \"USD\"", "expected_price def test_calculate_base_line_unit_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD ): # given checkout_line", "test_base_checkout_total_high_discount(): # given currency = \"USD\" zero_taxed_money = TaxedMoney(net=Money(0, currency),", "test_calculate_base_line_unit_price_with_custom_price(checkout_with_single_item): # given line = checkout_with_single_item.lines.first() price_override = Decimal(\"12.22\") line.price_override", "quantity assert ( prices_data.price_with_discounts == (expected_unit_price * quantity) - voucher_amount", "Money, TaxedMoney from ...discount import DiscountValueType, VoucherType from ...discount.utils import", "voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)", "expected_price def test_calculate_base_line_unit_price_with_discounts_once_per_order_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given checkout_line", "checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections,", ") # then expected_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing,", "checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value", "checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_voucher_amount = Money(Decimal(\"1\"), checkout_with_single_item.currency)", "( prices_data.price_with_discounts == (expected_unit_price * quantity) - voucher_amount ) def", "expected_voucher_amount def test_calculate_base_line_unit_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD ): # given checkout_line", "gross=Money(0, \"USD\")) assert base_tax_rate(price) == Decimal(\"0.0\") def test_base_checkout_total(): # given", "then expected_voucher_amount = Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections,", "= checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product)", "variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) product_collections = set(pc.id", "expected_voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD ):", "def test_calculate_base_line_unit_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD ): # given checkout_line =", "expected_undiscounted_unit_price * quantity assert prices_data.price_with_sale == expected_unit_price * quantity assert", "= checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data", "expected_voucher_amount = Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel,", "channel_listing=checkout_line_info.channel_listing, discounts=[], ) product_collections = set(pc.id for pc in checkout_line_info.collections)", "from ...discount.utils import get_product_discount_on_sale from ..base_calculations import ( base_checkout_total, base_tax_rate,", "): # given line = checkout_with_single_item.lines.first() price_override = Decimal(\"20.00\") line.price_override", "voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing", "def test_calculate_base_line_total_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD ): # given quantity =", "channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale", "product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_unit_price =", "# then expected_voucher_amount = Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_unit_price = variant.get_price( product=checkout_line_info.product,", "checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections,", "checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value", "price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.discount_value_type", "# then assert total == expected def test_base_checkout_total_high_discount(): # given", "_ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher # when", "# given checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert", "* quantity assert prices_data.price_with_discounts == expected_price * quantity def test_calculate_base_line_total_price_with_fixed_voucher(", "quantity assert prices_data.price_with_discounts == expected_price * quantity def test_calculate_base_line_total_price_with_fixed_voucher( checkout_with_single_item,", "# then currency = checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency) assert", "- expected_voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given", "expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD", "DiscountValueType, VoucherType from ...discount.utils import get_product_discount_on_sale from ..base_calculations import (", "checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order =", "category ): # given quantity = 3 checkout_line = checkout_with_single_item.lines.first()", "# then expected_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[],", "base_checkout_total, base_tax_rate, calculate_base_line_total_price, calculate_base_line_unit_price, ) from ..fetch import fetch_checkout_lines def", "# given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order", "collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) product_collections = set(pc.id for pc", "= \"USD\" zero_taxed_money = TaxedMoney(net=Money(0, currency), gross=Money(0, currency)) subtotal =", "channel_USD ): # given checkout_line = checkout_with_single_item.lines.first() price_override = Decimal(\"20.00\")", "assert prices_data.price_with_sale == expected_unit_price * quantity # apply once per", "discount_info, category, voucher, channel_USD ): # given quantity = 3", "product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_price) expected_price =", "assert ( prices_data.price_with_discounts == (expected_unit_price - expected_voucher_amount) * quantity )", "* voucher_percent_value / 100, checkout_with_single_item.currency ) assert prices_data.undiscounted_price == expected_price", "test_calculate_base_line_unit_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first()", "# then expected_undiscounted_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[],", "base_checkout_total(subtotal, shipping_price, discount, currency) expected = subtotal + shipping_price -", "= 3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_line", "checkout_with_single_item.channel, discounts=[] ) # then expected_voucher_amount = Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_price", "given currency = \"USD\" taxed_money = TaxedMoney(net=Money(10, currency), gross=Money(10, currency))", "= True voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10) voucher_channel_listing", "== expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_custom_price( checkout_with_single_item, discount_info, category ): # given", "base_tax_rate(price) == Decimal(\"0.0\") def test_base_tax_rate_gross_price_zero(): price = TaxedMoney(net=Money(3, \"USD\"), gross=Money(0,", "line = checkout_with_single_item.lines.first() price_override = Decimal(\"20.00\") line.price_override = price_override line.save(update_fields=[\"price_override\"])", ") sale_discount_amount = sale_discount(expected_undiscounted_price) expected_price = expected_undiscounted_price - sale_discount_amount assert", "variant_id=variant.id, ) expected_price = sale_discount(expected_undiscounted_price) assert prices_data.undiscounted_price == expected_undiscounted_price assert", ") def test_calculate_base_line_total_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD ): # given quantity", "_ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant =", "assert not checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data =", "from ..fetch import fetch_checkout_lines def test_calculate_base_line_unit_price(checkout_with_single_item): # given checkout_lines_info, _", "def test_calculate_base_line_total_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD ): # given quantity =", "checkout_with_single_item.currency) expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], )", "checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.save() voucher_amount", "= voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value = voucher_percent_value voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info,", "= sale_discount(expected_undiscounted_unit_price) expected_price = expected_undiscounted_unit_price - sale_discount_amount assert prices_data.undiscounted_price ==", "prices_data.undiscounted_price == expected_unit_price * quantity assert prices_data.price_with_sale == expected_unit_price *", "== (expected_unit_price - expected_voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_discounts_apply_once_per_order( checkout_with_single_item,", ") sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_unit_price = expected_undiscounted_unit_price - sale_discount_amount assert", "(expected_unit_price * quantity) - voucher_amount ) def test_base_tax_rate_net_price_zero(): price =", "assert prices_data.undiscounted_price == expected_undiscounted_unit_price * quantity assert prices_data.price_with_sale == expected_price", "checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency)", "== expected_price def test_calculate_base_line_unit_price_with_discounts_once_per_order_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given", "price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount = Money(Decimal(3),", "== expected def test_base_checkout_total_high_discount(): # given currency = \"USD\" zero_taxed_money", "import get_product_discount_on_sale from ..base_calculations import ( base_checkout_total, base_tax_rate, calculate_base_line_total_price, calculate_base_line_unit_price,", "VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value =", "checkout_with_single_item, discount_info, category, voucher, channel_USD ): # given checkout_line =", "voucher_percent_value = Decimal(10) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value = voucher_percent_value voucher_channel_listing.save()", "= calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_price =", "expected_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert", "Money(5, currency) # when total = base_checkout_total(subtotal, shipping_price, discount, currency)", "get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) expected_price = sale_discount(expected_undiscounted_price)", "assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD ):", "checkout_with_single_item, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product)", "checkout_with_single_item.currency ) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price", "expected_price - voucher_amount def test_calculate_base_line_unit_price_with_fixed_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD ): #", "prices_data.undiscounted_price == expected_undiscounted_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts ==", "assert prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_unit_price_with_fixed_voucher_custom_prices( checkout_with_single_item, voucher,", "discount = Money(5, currency) # when total = base_checkout_total(subtotal, shipping_price,", "checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then currency = checkout_line_info.channel_listing.currency expected_undiscounted_price", "sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_price = expected_undiscounted_unit_price - sale_discount_amount assert prices_data.undiscounted_price", "assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale( checkout_with_single_item, discount_info, category ):", "when total = base_checkout_total(subtotal, shipping_price, discount, currency) # then assert", "test_calculate_base_line_unit_price_with_discounts_once_per_order_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first()", "checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_lines_info, _ =", "= calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_voucher_amount =", "currency = \"USD\" zero_taxed_money = TaxedMoney(net=Money(0, currency), gross=Money(0, currency)) subtotal", "variant.product # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] )", "variant.product # when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] )", "calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_voucher_amount = Money(Decimal(\"1\"),", "): # given quantity = 3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity", "= checkout_lines_info[0] assert not checkout_line_info.voucher variant = checkout_line_info.variant # set", "checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data =", "TaxedMoney(net=Money(3, \"USD\"), gross=Money(0, \"USD\")) assert base_tax_rate(price) == Decimal(\"0.0\") def test_base_checkout_total():", "collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_price *", "* quantity assert prices_data.price_with_discounts == expected_price * quantity def test_calculate_base_line_total_price_with_variant_on_sale(", "category, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product)", "= TaxedMoney(net=Money(10, currency), gross=Money(12, currency)) shipping_price = zero_taxed_money discount =", "checkout_with_single_item.lines.first() price_override = Decimal(\"12.22\") line.price_override = price_override line.save(update_fields=[\"price_override\"]) checkout_lines_info, _", "expected_price - expected_voucher_amount def test_calculate_base_line_unit_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD ): #", "# set category on sale variant.product.category = category variant.product.save() checkout_line_info.product", "= variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price", "= voucher_percent_value voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)", ") # then expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing,", "== expected_price def test_calculate_base_line_unit_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD ): # given", "= \"USD\" taxed_money = TaxedMoney(net=Money(10, currency), gross=Money(10, currency)) subtotal =", "price = TaxedMoney(net=Money(0, \"USD\"), gross=Money(3, \"USD\")) assert base_tax_rate(price) == Decimal(\"0.0\")", "expected_voucher_amount = Money( price_override * voucher_percent_value / 100, checkout_with_single_item.currency )", "sale_discount(expected_undiscounted_price) expected_price = expected_undiscounted_price - sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_price", "expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert", "test_calculate_base_line_total_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD ): # given quantity = 3", "voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save()", "= Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount = voucher_amount voucher_channel_listing.save()", "checkout_with_single_item, voucher, channel_USD ): # given quantity = 3 checkout_line", "assert base_tax_rate(price) == Decimal(\"0.0\") def test_base_tax_rate_gross_price_zero(): price = TaxedMoney(net=Money(3, \"USD\"),", "def test_calculate_base_line_unit_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD ): # given checkout_line =", "checkout_with_single_item.channel, discounts=[] ) # then currency = checkout_line_info.channel_listing.currency expected_price =", "= voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert", "(expected_unit_price - voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_percentage_voucher( checkout_with_single_item, voucher,", "for pc in checkout_line_info.collections) _, sale_discount = get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections,", "applied when calculating line total. assert prices_data.price_with_discounts == expected_price def", "given quantity = 3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity", "== expected_price - voucher_amount def test_calculate_base_line_total_price(checkout_with_single_item): # given quantity =", "expected_price assert prices_data.price_with_discounts == expected_price - expected_voucher_amount def test_calculate_base_line_unit_price_with_discounts_apply_once_per_order( checkout_with_single_item,", "_ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher #", "checkout_with_single_item, discount_info, category ): # given line = checkout_with_single_item.lines.first() price_override", "checkout_line.quantity = quantity checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type =", "= variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) product_collections =", "currency), gross=Money(10, currency)) subtotal = taxed_money shipping_price = taxed_money discount", "checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_price = variant.get_price( product=checkout_line_info.product,", "assert checkout_line_info.voucher variant = checkout_line_info.variant # set category on sale", "(expected_unit_price - voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher_applied_once( checkout_with_single_item, discount_info,", "# given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type", "# apply once per order is applied when calculating line", "\"USD\"), gross=Money(3, \"USD\")) assert base_tax_rate(price) == Decimal(\"0.0\") def test_base_tax_rate_gross_price_zero(): price", "from prices import Money, TaxedMoney from ...discount import DiscountValueType, VoucherType", "discounts=[discount_info] ) # then expected_undiscounted_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel,", "checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # set", "price_override = Decimal(\"20.00\") checkout_line.price_override = price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type =", "expected_voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given checkout_line", "expected_price = sale_discount(expected_undiscounted_price) assert prices_data.undiscounted_price == expected_undiscounted_price assert prices_data.price_with_sale ==", "= checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)", "expected_undiscounted_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) product_collections", "voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.save() voucher_amount =", "given line = checkout_with_single_item.lines.first() price_override = Decimal(\"20.00\") line.price_override = price_override", "assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_fixed_voucher(", "assert prices_data.price_with_discounts == expected_price * quantity def test_calculate_base_line_total_price_with_fixed_voucher( checkout_with_single_item, voucher,", "product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_unit_price", "get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_price)", "quantity checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order", "prices_data.undiscounted_price == expected_undiscounted_unit_price * quantity assert prices_data.price_with_sale == expected_price *", "prices import Money, TaxedMoney from ...discount import DiscountValueType, VoucherType from", "= price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type = DiscountValueType.PERCENTAGE", "== expected_price * quantity def test_calculate_base_line_total_price_with_variant_on_sale( checkout_with_single_item, discount_info, category ):", "# given currency = \"USD\" zero_taxed_money = TaxedMoney(net=Money(0, currency), gross=Money(0,", "voucher_channel_listing.discount = voucher_amount voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ =", "discount, currency) expected = subtotal + shipping_price - discount #", "discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_unit_price = expected_undiscounted_unit_price", "- voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher_applied_once( checkout_with_single_item, discount_info, category,", "= fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher # when prices_data", "def test_calculate_base_line_unit_price_with_fixed_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given checkout_line =", "prices_data.price_with_discounts == expected_price * quantity def test_calculate_base_line_total_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD", "when calculating line total. assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_discounts_once_per_order_custom_prices(", "- discount # then assert total == expected def test_base_checkout_total_high_discount():", "Decimal from prices import Money, TaxedMoney from ...discount import DiscountValueType,", "category variant.product.save() checkout_line_info.product = variant.product # when prices_data = calculate_base_line_unit_price(", "Decimal(\"20.00\") checkout_line.price_override = price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save()", "= checkout_with_single_item.lines.first() price_override = Decimal(\"12.22\") line.price_override = price_override line.save(update_fields=[\"price_override\"]) checkout_lines_info,", "def test_calculate_base_line_total_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD ): # given quantity =", "= subtotal + shipping_price - discount # then assert total", "( prices_data.price_with_discounts == (expected_unit_price * quantity) - expected_voucher_amount ) def", "quantity assert prices_data.price_with_discounts == expected_price * quantity def test_calculate_base_line_total_price_with_variant_on_sale( checkout_with_single_item,", "channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_price) expected_price = expected_undiscounted_price -", "checkout_line_info.variant # set category on sale variant.product.category = category variant.product.save()", "# then expected_undiscounted_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[],", "import fetch_checkout_lines def test_calculate_base_line_unit_price(checkout_with_single_item): # given checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)", "line total. assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_discounts_once_per_order_custom_prices( checkout_with_single_item, voucher,", "voucher.apply_once_per_order = True voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10)", "quantity assert ( prices_data.price_with_discounts == (expected_unit_price - expected_voucher_amount) * quantity", ") assert prices_data.undiscounted_price == expected_unit_price * quantity assert prices_data.price_with_sale ==", "test_calculate_base_line_unit_price_with_variant_on_sale( checkout_with_single_item, discount_info, category ): # given checkout_lines_info, _ =", ") def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category, voucher, channel_USD ): #", "= True voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)", "product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) expected_price = sale_discount(expected_undiscounted_price) assert", "voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info =", "prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_price", "= Money(price_override, currency) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale ==", "prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD ): #", "prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale( checkout_with_single_item, discount_info, category ): #", "calculate_base_line_total_price, calculate_base_line_unit_price, ) from ..fetch import fetch_checkout_lines def test_calculate_base_line_unit_price(checkout_with_single_item): #", "channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale ==", "quantity def test_calculate_base_line_total_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD ): # given quantity", "expected_price def test_calculate_base_line_unit_price_with_variant_on_sale( checkout_with_single_item, discount_info, category ): # given checkout_lines_info,", "- sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_unit_price * quantity assert prices_data.price_with_sale", "expected_unit_price * quantity assert ( prices_data.price_with_discounts == (expected_unit_price - voucher_amount)", "collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_price assert", "- expected_voucher_amount def test_calculate_base_line_unit_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD ): # given", "checkout_line.price_override = price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order =", "base_checkout_total(subtotal, shipping_price, discount, currency) # then assert total == zero_taxed_money", "prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_unit_price", "* quantity) - expected_voucher_amount ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category,", "currency = checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency) expected_voucher_amount = Money(", "channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_unit_price = expected_undiscounted_unit_price -", "variant.product.save() checkout_line_info.product = variant.product # when prices_data = calculate_base_line_unit_price( checkout_line_info,", "expected_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price def", "== (expected_unit_price * quantity) - expected_voucher_amount ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher( checkout_with_single_item,", "discounts=[] ) # then expected_voucher_amount = Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_price =", "discounts=[], ) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price", "checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.discount_value_type =", "== expected_price * quantity assert prices_data.price_with_discounts == expected_price * quantity", "\"USD\"), gross=Money(0, \"USD\")) assert base_tax_rate(price) == Decimal(\"0.0\") def test_base_checkout_total(): #", "quantity assert prices_data.price_with_sale == expected_unit_price * quantity # apply once", "checkout_line_info.voucher # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] )", "prices_data.price_with_sale == expected_unit_price * quantity # apply once per order", "variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_price) expected_price = expected_undiscounted_price - sale_discount_amount", "variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price ==", "= price_override line.save(update_fields=[\"price_override\"]) checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0]", "channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) product_collections = set(pc.id for pc in", "checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data = calculate_base_line_unit_price( checkout_line_info,", "def test_calculate_base_line_total_price(checkout_with_single_item): # given quantity = 3 checkout_line = checkout_with_single_item.lines.first()", "= get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) expected_price =", "calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_unit_price = variant.get_price(", "* quantity assert ( prices_data.price_with_discounts == (expected_unit_price * quantity) -", "* quantity assert ( prices_data.price_with_discounts == (expected_unit_price - expected_voucher_amount) *", "price_override = Decimal(\"12.22\") line.price_override = price_override line.save(update_fields=[\"price_override\"]) checkout_lines_info, _ =", "= checkout_line_info.channel_listing.currency expected_undiscounted_price = Money(price_override, currency) product_collections = set(pc.id for", "taxed_money = TaxedMoney(net=Money(10, currency), gross=Money(10, currency)) subtotal = taxed_money shipping_price", "= variant.product # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info]", "== expected_price - voucher_amount def test_calculate_base_line_unit_price_with_fixed_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD ):", "== expected_price - expected_voucher_amount def test_calculate_base_line_unit_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD ):", "quantity def test_calculate_base_line_total_price_with_variant_on_sale( checkout_with_single_item, discount_info, category ): # given quantity", "checkout_line_info.variant # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] )", "= category variant.product.save() checkout_line_info.product = variant.product # when prices_data =", "def test_calculate_base_line_unit_price_with_discounts_once_per_order_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given checkout_line =", "checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount =", "test_calculate_base_line_unit_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first()", "calculating line total. assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_and_voucher( checkout_with_single_item,", "in checkout_line_info.collections) _, sale_discount = get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel,", "category, voucher, channel_USD ): # given quantity = 3 checkout_line", "voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher_applied_once( checkout_with_single_item, discount_info, category, voucher,", "= VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency)", "* quantity ) def test_calculate_base_line_total_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD ): #", "sale_discount(expected_undiscounted_unit_price) expected_price = expected_undiscounted_unit_price - sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_unit_price", "prices_data.price_with_discounts == (expected_unit_price - voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_percentage_voucher(", "total = base_checkout_total(subtotal, shipping_price, discount, currency) expected = subtotal +", "product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_unit_price", "= calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_voucher_amount =", "= expected_undiscounted_unit_price - sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_unit_price * quantity", "voucher.save() voucher_percent_value = Decimal(10) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value = voucher_percent_value", "fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher # when prices_data", "subtotal = taxed_money shipping_price = taxed_money discount = Money(5, currency)", "channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_unit_price * quantity", "voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher", "prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price - expected_voucher_amount def", "discount_info, category ): # given line = checkout_with_single_item.lines.first() price_override =", "<reponame>nestfiy/saleor<gh_stars>0 from decimal import Decimal from prices import Money, TaxedMoney", "== expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category, voucher, channel_USD ):", "checkout_with_single_item, discount_info, category ): # given checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)", "Decimal(\"20.00\") checkout_line.price_override = price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type", "- sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_price assert prices_data.price_with_sale == expected_price", "test_base_tax_rate_net_price_zero(): price = TaxedMoney(net=Money(0, \"USD\"), gross=Money(3, \"USD\")) assert base_tax_rate(price) ==", "sale_discount(expected_undiscounted_price) assert prices_data.undiscounted_price == expected_undiscounted_price assert prices_data.price_with_sale == expected_price assert", "expected_voucher_amount = Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel,", "): # given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT", "total. assert ( prices_data.price_with_discounts == (expected_unit_price * quantity) - expected_voucher_amount", "def test_calculate_base_line_unit_price(checkout_with_single_item): # given checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info =", ") # then currency = checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency)", "not checkout_line_info.voucher # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[]", "...discount import DiscountValueType, VoucherType from ...discount.utils import get_product_discount_on_sale from ..base_calculations", "VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10) voucher_channel_listing =", "product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_price) expected_price", "line.price_override = price_override line.save(update_fields=[\"price_override\"]) checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info =", "== expected_unit_price * quantity assert prices_data.price_with_sale == expected_unit_price * quantity", "checkout_line.price_override = price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type =", "fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant #", "calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_voucher_amount = Money(Decimal(\"1\"),", "* quantity assert ( prices_data.price_with_discounts == (expected_unit_price - voucher_amount) *", "test_calculate_base_line_total_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD ): # given quantity = 3", "assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale(", "given checkout_line = checkout_with_single_item.lines.first() price_override = Decimal(\"20.00\") checkout_line.price_override = price_override", "prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_voucher_amount", "prices_data.price_with_sale == expected_unit_price * quantity assert ( prices_data.price_with_discounts == (expected_unit_price", "subtotal + shipping_price - discount # then assert total ==", "voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value = voucher_percent_value voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _", ") # then expected_undiscounted_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing,", "(expected_unit_price - expected_voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher,", "== expected_undiscounted_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price", "= fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher variant =", "checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_line = checkout_with_single_item.lines.first()", "= Decimal(\"20.00\") line.price_override = price_override line.save(update_fields=[\"price_override\"]) checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)", ") assert prices_data.undiscounted_price == expected_price * quantity assert prices_data.price_with_sale ==", "= calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_price =", "Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[],", "given checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not", "collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_unit_price *", "== expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_custom_price( checkout_with_single_item, discount_info,", "= Money( price_override * voucher_percent_value / 100, checkout_with_single_item.currency ) assert", "= checkout_lines_info[0] assert checkout_line_info.voucher # when prices_data = calculate_base_line_unit_price( checkout_line_info,", "Decimal(\"20.00\") line.price_override = price_override line.save(update_fields=[\"price_override\"]) checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info", "\"USD\")) assert base_tax_rate(price) == Decimal(\"0.0\") def test_base_tax_rate_gross_price_zero(): price = TaxedMoney(net=Money(3,", "# then expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[],", "fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher # when prices_data =", "price_override checkout_line.save(update_fields=[\"price_override\"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save()", "TaxedMoney(net=Money(0, currency), gross=Money(0, currency)) subtotal = TaxedMoney(net=Money(10, currency), gross=Money(12, currency))", "checkout_lines_info[0] assert not checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data", "# when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) #", "channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_price * quantity assert", "assert prices_data.undiscounted_price == expected_price * quantity assert prices_data.price_with_sale == expected_price", "expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_custom_price(checkout_with_single_item): # given line", "currency) # when total = base_checkout_total(subtotal, shipping_price, discount, currency) expected", "): # given checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0]", "assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price - voucher_amount", "TaxedMoney(net=Money(0, \"USD\"), gross=Money(3, \"USD\")) assert base_tax_rate(price) == Decimal(\"0.0\") def test_base_tax_rate_gross_price_zero():", "gross=Money(12, currency)) shipping_price = zero_taxed_money discount = Money(20, currency) #", "on sale variant.product.category = category variant.product.save() checkout_line_info.product = variant.product #", "3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_line =", "# when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) #", "expected_price assert prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher( checkout_with_single_item,", "calculate_base_line_unit_price, ) from ..fetch import fetch_checkout_lines def test_calculate_base_line_unit_price(checkout_with_single_item): # given", "expected_price * quantity def test_calculate_base_line_total_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD ): #", "test_calculate_base_line_total_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD ): # given quantity = 3", "def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category, voucher, channel_USD ): # given", "assert base_tax_rate(price) == Decimal(\"0.0\") def test_base_checkout_total(): # given currency =", "# when total = base_checkout_total(subtotal, shipping_price, discount, currency) # then", "checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then currency = checkout_line_info.channel_listing.currency expected_price", "assert prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher( checkout_with_single_item, voucher,", "= checkout_line_info.variant # set category on sale variant.product.category = category", "= calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then currency =", "test_calculate_base_line_unit_price_with_percentage_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first()", "currency) expected_voucher_amount = Money( price_override * voucher_percent_value / 100, checkout_with_single_item.currency", "voucher, channel_USD ): # given quantity = 3 checkout_line =", "= 3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_lines_info,", "expected_unit_price * quantity assert ( prices_data.price_with_discounts == (expected_unit_price * quantity)", "prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_custom_price( checkout_with_single_item, discount_info, category ): #", "expected_unit_price * quantity # apply once per order is applied", "category ): # given checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info =", "quantity ) def test_calculate_base_line_total_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD ): # given", "== expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_custom_price(checkout_with_single_item): # given", "assert prices_data.undiscounted_price == expected_unit_price * quantity assert prices_data.price_with_sale == expected_unit_price", "test_calculate_base_line_total_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category, voucher, channel_USD ): # given quantity", "Decimal(10) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value = voucher_percent_value voucher_channel_listing.save() checkout_with_single_item.voucher_code =", ") expected_price = sale_discount(expected_undiscounted_price) assert prices_data.undiscounted_price == expected_undiscounted_price assert prices_data.price_with_sale", "assert not checkout_line_info.voucher variant = checkout_line_info.variant # set category on", "expected_undiscounted_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price def", "= Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing,", "== (expected_unit_price * quantity) - voucher_amount ) def test_base_tax_rate_net_price_zero(): price", "checkout_with_single_item.lines.first() price_override = Decimal(\"20.00\") line.price_override = price_override line.save(update_fields=[\"price_override\"]) checkout_lines_info, _", "import Decimal from prices import Money, TaxedMoney from ...discount import", "prices_data.price_with_discounts == expected_price * quantity def test_calculate_base_line_total_price_with_variant_on_sale( checkout_with_single_item, discount_info, category", "when total = base_checkout_total(subtotal, shipping_price, discount, currency) expected = subtotal", "currency), gross=Money(12, currency)) shipping_price = zero_taxed_money discount = Money(20, currency)", "prices_data.price_with_discounts == (expected_unit_price * quantity) - expected_voucher_amount ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher(", "per order is applied when calculating line total. assert prices_data.price_with_discounts", "def test_base_checkout_total_high_discount(): # given currency = \"USD\" zero_taxed_money = TaxedMoney(net=Money(0,", "pc in checkout_line_info.collections) _, sale_discount = get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info,", "== expected_price * quantity def test_calculate_base_line_total_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD ):", "expected_undiscounted_unit_price * quantity assert prices_data.price_with_sale == expected_price * quantity assert", "= voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount = voucher_amount voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info,", "checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher # when prices_data =", "== expected_undiscounted_unit_price * quantity assert prices_data.price_with_sale == expected_price * quantity", "variant.product.save() checkout_line_info.product = variant.product # when prices_data = calculate_base_line_total_price( checkout_line_info,", "= TaxedMoney(net=Money(0, currency), gross=Money(0, currency)) subtotal = TaxedMoney(net=Money(10, currency), gross=Money(12,", "== expected_price assert prices_data.price_with_discounts == expected_price - expected_voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher_custom_prices(", "/ 100, checkout_with_single_item.currency ) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale", "== expected_price * quantity assert prices_data.price_with_sale == expected_price * quantity", "then expected_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], )", "expected_price assert prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_unit_price_with_fixed_voucher_custom_prices( checkout_with_single_item,", "currency)) subtotal = TaxedMoney(net=Money(10, currency), gross=Money(12, currency)) shipping_price = zero_taxed_money", "= checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.save()", "voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.discount_value_type = DiscountValueType.PERCENTAGE", "per order is applied when calculating line total. assert (", "voucher_percent_value / 100, checkout_with_single_item.currency ) assert prices_data.undiscounted_price == expected_price assert", "expected def test_base_checkout_total_high_discount(): # given currency = \"USD\" zero_taxed_money =", "== expected_unit_price * quantity assert ( prices_data.price_with_discounts == (expected_unit_price -", "= checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.discount_value_type", "line = checkout_with_single_item.lines.first() price_override = Decimal(\"12.22\") line.price_override = price_override line.save(update_fields=[\"price_override\"])", "= calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_unit_price =", "= set(pc.id for pc in checkout_line_info.collections) _, sale_discount = get_product_discount_on_sale(", "variant = checkout_line_info.variant # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel,", "== expected_price assert prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_unit_price_with_fixed_voucher_custom_prices(", "assert prices_data.price_with_discounts == expected_price * quantity def test_calculate_base_line_total_price_with_variant_on_sale( checkout_with_single_item, discount_info,", "Decimal(\"0.0\") def test_base_checkout_total(): # given currency = \"USD\" taxed_money =", "prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_price", "prices_data.price_with_discounts == (expected_unit_price * quantity) - voucher_amount ) def test_base_tax_rate_net_price_zero():", "prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale( checkout_with_single_item,", "= checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency) expected_voucher_amount = Money( price_override", "expected_price = expected_undiscounted_unit_price - sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_unit_price *", "assert checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data = calculate_base_line_total_price(", "assert ( prices_data.price_with_discounts == (expected_unit_price * quantity) - voucher_amount )", "test_calculate_base_line_unit_price(checkout_with_single_item): # given checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0]", "quantity ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher_applied_once( checkout_with_single_item, discount_info, category, voucher, channel_USD ):", "currency = \"USD\" taxed_money = TaxedMoney(net=Money(10, currency), gross=Money(10, currency)) subtotal", "prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category, voucher, channel_USD", "voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount =", "assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts", "checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data = calculate_base_line_total_price( checkout_line_info,", "assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_custom_price(", "prices_data.price_with_discounts == expected_price - expected_voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD", "= quantity checkout_line.save() checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0]", "product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) product_collections = set(pc.id for", "assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_custom_price(checkout_with_single_item): # given line =", "expected_price assert prices_data.price_with_sale == expected_price # apply once per order", "assert prices_data.undiscounted_price == expected_undiscounted_unit_price * quantity assert prices_data.price_with_sale == expected_unit_price", "discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_price = expected_undiscounted_unit_price", "test_calculate_base_line_unit_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category, voucher, channel_USD ): # given checkout_line", "zero_taxed_money discount = Money(20, currency) # when total = base_checkout_total(subtotal,", "voucher_channel_listing.discount_value = voucher_percent_value voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ =", "expected_price * quantity def test_calculate_base_line_total_price_with_variant_on_sale( checkout_with_single_item, discount_info, category ): #", "discounts=[discount_info] ) # then expected_undiscounted_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel,", "checkout_with_single_item, discount_info, category, voucher, channel_USD ): # given quantity =", "expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale( checkout_with_single_item, discount_info, category", "total == expected def test_base_checkout_total_high_discount(): # given currency = \"USD\"", "prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts ==", "* quantity ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher_applied_once( checkout_with_single_item, discount_info, category, voucher, channel_USD", "prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_unit_price_with_fixed_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD", "quantity) - voucher_amount ) def test_base_tax_rate_net_price_zero(): price = TaxedMoney(net=Money(0, \"USD\"),", "base_tax_rate, calculate_base_line_total_price, calculate_base_line_unit_price, ) from ..fetch import fetch_checkout_lines def test_calculate_base_line_unit_price(checkout_with_single_item):", "calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_unit_price = variant.get_price(", "= Decimal(\"12.22\") line.price_override = price_override line.save(update_fields=[\"price_override\"]) checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)", "VoucherType from ...discount.utils import get_product_discount_on_sale from ..base_calculations import ( base_checkout_total,", "voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value =", "= sale_discount(expected_undiscounted_unit_price) expected_unit_price = expected_undiscounted_unit_price - sale_discount_amount assert prices_data.undiscounted_price ==", "def test_calculate_base_line_unit_price_with_custom_price(checkout_with_single_item): # given line = checkout_with_single_item.lines.first() price_override = Decimal(\"12.22\")", "checkout_line_info.voucher variant = checkout_line_info.variant # set category on sale variant.product.category", "= calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_price =", "checkout_lines_info[0] assert checkout_line_info.voucher # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel,", "== expected_price assert prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher(", "variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_price = expected_undiscounted_unit_price - sale_discount_amount", "prices_data.undiscounted_price == expected_price * quantity assert prices_data.price_with_sale == expected_price *", "= taxed_money discount = Money(5, currency) # when total =", "expected_price - voucher_amount def test_calculate_base_line_total_price(checkout_with_single_item): # given quantity = 3", "- expected_voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD", "== expected_undiscounted_unit_price * quantity assert prices_data.price_with_sale == expected_unit_price * quantity", "quantity assert prices_data.price_with_sale == expected_unit_price * quantity assert ( prices_data.price_with_discounts", "given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type =", "variant = checkout_line_info.variant # when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel,", "expected_price * quantity assert prices_data.price_with_sale == expected_price * quantity assert", "discount # then assert total == expected def test_base_checkout_total_high_discount(): #", "= checkout_with_single_item.lines.first() price_override = Decimal(\"20.00\") line.price_override = price_override line.save(update_fields=[\"price_override\"]) checkout_lines_info,", "then currency = checkout_line_info.channel_listing.currency expected_undiscounted_price = Money(price_override, currency) product_collections =", "fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher variant = checkout_line_info.variant", ") # then currency = checkout_line_info.channel_listing.currency expected_undiscounted_price = Money(price_override, currency)", "given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount", "fetch_checkout_lines def test_calculate_base_line_unit_price(checkout_with_single_item): # given checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info", "voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount = voucher_amount voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _", "# given line = checkout_with_single_item.lines.first() price_override = Decimal(\"20.00\") line.price_override =", "Money(price_override, currency) expected_voucher_amount = Money( price_override * voucher_percent_value / 100,", "currency) product_collections = set(pc.id for pc in checkout_line_info.collections) _, sale_discount", "expected_price * quantity assert prices_data.price_with_discounts == expected_price * quantity def", "expected_undiscounted_unit_price - sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_unit_price * quantity assert", "applied when calculating line total. assert ( prices_data.price_with_discounts == (expected_unit_price", "expected_undiscounted_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) product_collections", "discount_info, category ): # given quantity = 3 checkout_line =", "checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency) expected_voucher_amount = Money( price_override *", "checkout_line_info.variant # when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] )", "== expected_price assert prices_data.price_with_discounts == expected_price - expected_voucher_amount def test_calculate_base_line_unit_price_with_discounts_apply_once_per_order(", "- voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD ): # given", "checkout_with_single_item.currency) expected_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], )", "assert prices_data.price_with_sale == expected_unit_price * quantity assert ( prices_data.price_with_discounts ==", "checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_unit_price = variant.get_price( product=checkout_line_info.product,", "== expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_fixed_voucher( checkout_with_single_item, voucher,", "expected = subtotal + shipping_price - discount # then assert", "sale_discount(expected_undiscounted_unit_price) expected_unit_price = expected_undiscounted_unit_price - sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_unit_price", "expected_price assert prices_data.price_with_discounts == expected_price - expected_voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher_custom_prices( checkout_with_single_item,", "discount = Money(20, currency) # when total = base_checkout_total(subtotal, shipping_price,", "( prices_data.price_with_discounts == (expected_unit_price - expected_voucher_amount) * quantity ) def", "order is applied when calculating line total. assert prices_data.price_with_discounts ==", "price_override = Decimal(\"20.00\") line.price_override = price_override line.save(update_fields=[\"price_override\"]) checkout_lines_info, _ =", "checkout_lines_info[0] assert not checkout_line_info.voucher variant = checkout_line_info.variant # set category", "when calculating line total. assert ( prices_data.price_with_discounts == (expected_unit_price *", "checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_unit_price = variant.get_price( product=checkout_line_info.product,", "TaxedMoney(net=Money(10, currency), gross=Money(10, currency)) subtotal = taxed_money shipping_price = taxed_money", ") from ..fetch import fetch_checkout_lines def test_calculate_base_line_unit_price(checkout_with_single_item): # given checkout_lines_info,", "assert prices_data.price_with_discounts == expected_price - expected_voucher_amount def test_calculate_base_line_unit_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher,", "test_base_tax_rate_gross_price_zero(): price = TaxedMoney(net=Money(3, \"USD\"), gross=Money(0, \"USD\")) assert base_tax_rate(price) ==", "price = TaxedMoney(net=Money(3, \"USD\"), gross=Money(0, \"USD\")) assert base_tax_rate(price) == Decimal(\"0.0\")", "currency) # when total = base_checkout_total(subtotal, shipping_price, discount, currency) #", "= expected_undiscounted_price - sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_price assert prices_data.price_with_sale", "assert not checkout_line_info.voucher # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel,", "shipping_price = zero_taxed_money discount = Money(20, currency) # when total", "calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_price = variant.get_price(", "= checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency) assert prices_data.undiscounted_price == expected_price", "sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_unit_price * quantity assert prices_data.price_with_sale ==", "category ): # given line = checkout_with_single_item.lines.first() price_override = Decimal(\"20.00\")", "voucher_percent_value voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info", "not checkout_line_info.voucher variant = checkout_line_info.variant # set category on sale", "assert prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_total_price(checkout_with_single_item): # given", "checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info", "is applied when calculating line total. assert prices_data.price_with_discounts == expected_price", "import Money, TaxedMoney from ...discount import DiscountValueType, VoucherType from ...discount.utils", "category on sale variant.product.category = category variant.product.save() checkout_line_info.product = variant.product", "voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD ):", ") # then expected_voucher_amount = Money(Decimal(\"1\"), checkout_with_single_item.currency) expected_unit_price = variant.get_price(", "gross=Money(10, currency)) subtotal = taxed_money shipping_price = taxed_money discount =", "= checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # set category" ]
[ "JAN 2017\") self.assertIsInstance(date, DateValueTo) self.assertEqual(date.kind, DateValueTypes.TO) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1))", "return (\"julian\", date) def visitHebrew(self, date): if not isinstance(date, HebrewDate):", "DateValue.parse(\"2016\") self.assertEqual(hash(dv1), hash(dv2)) dv1 = DateValue.parse(\"31 DEC 2000\") dv2 =", "2017\": GregorianDate(2017, \"JAN\", 31)} approx = [ (\"ABT\", \"ABOUT\", DateValueAbout,", "TypeError(str(type(date))) return (\"phrase\", date.phrase) class TestDetailDate(unittest.TestCase): \"\"\"Tests for `ged4py.date` module.\"\"\"", "1920 TO 2000\") date = DateValue.parse(\"from mar 1920 to 1", "2017\") self.assertIsInstance(date, DateValueAfter) self.assertEqual(date.kind, DateValueTypes.AFTER) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date),", "2000 TO 1 JAN 2002\") > DateValue.parse(\"BET 1 JAN 2000", "DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor ) class TestDateVisitor(CalendarDateVisitor,", "for `ged4py.date` module.\"\"\" def test_001_cal_date(self): \"\"\"Test date.CalendarDate class.\"\"\" date =", "JAN 2001\")) self.assertTrue(DateValue.parse(\"FROM 1 JAN 1999 TO 1 JAN 2001\")", "nonsense with self.assertRaises(ValueError): date = CalendarDate.parse(\"start of time\") def test_006_cal_date_visitor(self):", "test_007_cal_date_hash(self): \"\"\"Test date.CalendarDate hash.\"\"\" self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9)), hash(GregorianDate(2017, \"OCT\", 9)))", "\"@#DFRENCH R@ 15 GERM 0001\") self.assertEqual(date.calendar, CalendarType.FRENCH_R) date = CalendarDate.parse(\"@#DHEBREW@", "time\") def test_006_cal_date_visitor(self): \"\"\"Test date.CalendarDate.accept method.\"\"\" visitor = TestDateVisitor() date", "date.DateValue class.\"\"\" date = DateValue.parse(\"(some phrase)\") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE)", "\" + datestr) self.assertEqual(date.date, value) def test_015_date_parse_phrase(self): \"\"\"Test date.DateValue class.\"\"\"", "self.assertNotEqual(DateValue.parse(\"1 JAN 2000\"), DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN", "range self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"BET 1 JAN 1999 AND", "self.assertEqual(date.key(), (1084542.5, 0)) date = JulianDate(1000) self.assertEqual(date.key(), (2086672.5, 1)) def", "1\") date = JulianDate(1582, \"OCT\", 5) self.assertEqual(str(date), \"@#DJULIAN@ 5 OCT", "self.assertFalse(date.bc) self.assertEqual(date.year_str, \"5000\") self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.HEBREW) date =", "NSN 5000\") self.assertIsInstance(date, HebrewDate) self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.month, \"NSN\") self.assertEqual(date.month_num,", "= DateValue.parse(\"2016\") self.assertEqual(hash(dv1), hash(dv2)) dv1 = DateValue.parse(\"31 DEC 2000\") dv2", "def visitAbout(self, date): if not isinstance(date, DateValueAbout): raise TypeError(str(type(date))) return", "date = GregorianDate(1699, \"JAN\", 1, dual_year=1700) self.assertEqual(str(date), \"1 JAN 1699/00\")", "= date.accept(visitor) self.assertEqual(value, (\"gregorian\", date)) date = HebrewDate(5000) value =", "\"VEND\", 1))) self.assertEqual(hash(FrenchDate(1)), hash(FrenchDate(1))) def test_010_date_no_date(self): \"\"\"Test date.DateValue class.\"\"\" date", "2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"BET", "compares later than more specific self.assertTrue(DateValue.parse(\"2000\") > DateValue.parse(\"31 DEC 2000\"))", "# -*- coding: utf-8 -*- \"\"\"Tests for `ged4py.date` module.\"\"\" import", "5000\") self.assertIsInstance(date, HebrewDate) self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.month, \"NSN\") self.assertEqual(date.month_num, 8)", "self.assertEqual(str(date), \"TO 1 JAN 2017\") date = DateValue.parse(\"FROM 1920 TO", "> JulianDate(1582, \"OCT\", 5)) self.assertTrue(JulianDate(1582, \"OCT\", 6) > GregorianDate(1582, \"OCT\",", "31), GregorianDate(2000, \"DEC\", 31))) dv = DateValue.parse(\"BET 31 DEC 2000", "@#DJULIAN@ 1600 AND 2000\") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, JulianDate(1600))", "JAN 2000\") > DateValue.parse(\"BEF 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\")", "def test_005_cal_date_parse(self): \"\"\"Test date.CalendarDate.parse method.\"\"\" date = CalendarDate.parse(\"31 MAY 2020\")", "DEC 2000\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2000, \"DEC\",", "DateValueVisitor ) class TestDateVisitor(CalendarDateVisitor, DateValueVisitor): def visitGregorian(self, date): if not", "date = CalendarDate.parse(\"31 MAY 2020\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 2020) self.assertIsNone(date.dual_year)", "15 GERM 0001\") self.assertIsInstance(date, FrenchDate) self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.month, \"GERM\")", "DateValue.parse(\"BEF 1967B.C.\") self.assertIsInstance(date, DateValueBefore) self.assertEqual(date.kind, DateValueTypes.BEFORE) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date),", "GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"AFTER 1 JAN 2017\") date =", "date = DateValue.parse(\"INT @#DGREGORIAN@ 1 JAN 2017 (some phrase)\") self.assertIsInstance(date,", "JAN 2000\") > DateValue.parse(\"BET 1 JAN 1999 AND 1 JAN", "5)) # compare Gregorian and Hebrew dates self.assertTrue(GregorianDate(2020, \"JAN\", 1)", "def test_003_cal_date_cmp(self): \"\"\"Test date.CalendarDate class.\"\"\" self.assertTrue(GregorianDate(2016, \"JAN\", 1) < GregorianDate(2017,", "B.C.\") self.assertIsInstance(date, JulianDate) self.assertEqual(date.year, 100) self.assertTrue(date.bc) self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.original,", "CalendarType.HEBREW) date = FrenchDate(1, \"FRUC\", 1) self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.year_str,", "= CalendarDate.parse(\"@#DGREGORIAN@ 10 MAR 1698/99\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1698) self.assertEqual(date.dual_year,", "\"(not a date)\") def test_012_date_parse_period(self): \"\"\"Test date.DateValue class.\"\"\" date =", "self.assertEqual(date.month, \"GERM\") self.assertEqual(date.month_num, 7) self.assertEqual(date.day, 15) self.assertEqual(date.original, \"@#DFRENCH R@ 15", "if not isinstance(date, DateValueAbout): raise TypeError(str(type(date))) return (\"about\", date.date) def", "< DateValue.parse(\"FROM 1920 TO 1999\")) # comparing simple date with", "(\"calculated\", date1)) value = DateValueEstimated(date1).accept(visitor) self.assertEqual(value, (\"estimated\", date1)) value =", "31))) dv = DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN", "GregorianDate(2000, \"JAN\", 1))) self.assertTrue(DateValue.parse(\"2016\") < DateValue.parse(\"2017\")) self.assertTrue(DateValue.parse(\"2 JAN 2016\") >", "HebrewDate(5000) self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"5000\") self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.calendar,", "def visitRange(self, date): if not isinstance(date, DateValueRange): raise TypeError(str(type(date))) return", "from ged4py.date import ( DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated,", "= DateValue.parse(\"FROM 1920 TO 2000\") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1,", "1999 TO 1 JAN 2001\") < DateValue.parse(\"BET 1 JAN 2000", "def visitHebrew(self, date): if not isinstance(date, HebrewDate): raise TypeError(str(type(date))) return", "2001\") < DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN 2001\"))", "def test_019_date_value_visitor(self): \"\"\"Test date.DateValue class.\"\"\" visitor = TestDateVisitor() date1 =", "DateValueInterpreted(date1, \"phrase\").accept(visitor) self.assertEqual(value, (\"interpreted\", date1, \"phrase\")) value = DateValuePhrase(\"phrase\").accept(visitor) self.assertEqual(value,", "self.assertTrue(DateValue.parse(\"(Could be 1996 or 1998)\") > DateValue.parse(\"2000\")) # \"empty\" date", "(\"CAL\", \"CALCULATED\", DateValueCalculated, DateValueTypes.CALCULATED), (\"EST\", \"ESTIMATED\", DateValueEstimated, DateValueTypes.ESTIMATED) ] for", "= FrenchDate(1, \"FRUC\", 1) self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1\") self.assertEqual(date.month,", "JulianDate(1000) self.assertEqual(date.key(), (2086672.5, 1)) def test_003_cal_date_cmp(self): \"\"\"Test date.CalendarDate class.\"\"\" self.assertTrue(GregorianDate(2016,", "\"OCT\", 15) == JulianDate(1582, \"OCT\", 5)) self.assertTrue(GregorianDate(1582, \"OCT\", 16) >", "self.assertEqual(str(date), \"FROM 1967\") date = DateValue.parse(\"TO 1 JAN 2017\") self.assertIsInstance(date,", "typeEnum in approx: for datestr, value in dates.items(): date =", "date): if not isinstance(date, DateValueBefore): raise TypeError(str(type(date))) return (\"before\", date.date)", "\"@#DJULIAN@ 5 OCT 1582\") def test_005_cal_date_parse(self): \"\"\"Test date.CalendarDate.parse method.\"\"\" date", "R@ 15 GERM 0001\") self.assertIsInstance(date, FrenchDate) self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.month,", "dv2 = DateValue.parse(\"2016\") self.assertEqual(hash(dv1), hash(dv2)) dv1 = DateValue.parse(\"31 DEC 2000\")", "\"10 MAR 1699/00\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"@#DJULIAN@ 100 B.C.\")", "(\"gregorian\", date) def visitJulian(self, date): if not isinstance(date, JulianDate): raise", "import ( CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor )", "9))) self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9, bc=True)), hash(GregorianDate(2017, \"OCT\", 9, bc=True))) self.assertEqual(hash(FrenchDate(1,", "than any regular date self.assertTrue(DateValue.parse(\"(Could be 1996 or 1998)\") >", "JAN 2001\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2001, \"JAN\",", "(GregorianDate(2000, \"DEC\", 31), GregorianDate(2000, \"DEC\", 31))) dv = DateValue.parse(\"BET 31", "self.assertFalse(date.bc) self.assertEqual(date.month, \"MAY\") self.assertEqual(date.month_num, 5) self.assertEqual(date.day, 31) self.assertEqual(date.original, \"31 MAY", "date): if not isinstance(date, DateValueAbout): raise TypeError(str(type(date))) return (\"about\", date.date)", "date = DateValue.parse(\"INT 1967 B.C. (some phrase)\") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind,", "GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2, GregorianDate(2000, \"APR\")) self.assertEqual(str(date), \"BETWEEN MAR 1920 AND", "= DateValueTo(date1).accept(visitor) self.assertEqual(value, (\"to\", date1)) value = DateValuePeriod(date1, date2).accept(visitor) self.assertEqual(value,", "1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"AFT 1 JAN", "class.\"\"\" date = DateValue.parse(\"FROM 1967\") self.assertIsInstance(date, DateValueFrom) self.assertEqual(date.kind, DateValueTypes.FROM) self.assertEqual(date.date,", "2000\") dv2 = DateValue.parse(\"31 DEC 2000\") self.assertEqual(hash(dv1), hash(dv2)) dv1 =", "1, dual_year=1700)) # compare Gregorian and Julian dates self.assertTrue(GregorianDate(1582, \"OCT\",", "R@ 15 GERM 0001\") self.assertEqual(date.calendar, CalendarType.FRENCH_R) date = CalendarDate.parse(\"@#DHEBREW@ 7", "def test_020_date_hash(self): \"\"\"Test date.Date hash\"\"\" dv1 = DateValue.parse(\"2016\") dv2 =", "self.assertTrue(GregorianDate(1792, \"SEP\", 22) == FrenchDate(1, \"VEND\", 1)) self.assertTrue(GregorianDate(1792, \"SEP\", 23)", "self.assertEqual(date.year, 2020) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.month, \"MAY\") self.assertEqual(date.month_num, 5) self.assertEqual(date.day, 31)", "\"OCT\", 9))) self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9, bc=True)), hash(GregorianDate(2017, \"OCT\", 9, bc=True)))", "DateValueEstimated, DateValueTypes.ESTIMATED) ] for appr, fmt, klass, typeEnum in approx:", "date1, \"phrase\")) value = DateValuePhrase(\"phrase\").accept(visitor) self.assertEqual(value, (\"phrase\", \"phrase\")) def test_020_date_hash(self):", "DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"FROM 1", "hash\"\"\" dv1 = DateValue.parse(\"2016\") dv2 = DateValue.parse(\"2016\") self.assertEqual(hash(dv1), hash(dv2)) dv1", "dv1 = DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN 2001\")", "\"1699/00\") self.assertEqual(date.month, \"FEB\") self.assertEqual(date.month_num, 2) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date =", "self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.month, \"NSN\") self.assertEqual(date.month_num, 8) self.assertEqual(date.day, 7) self.assertEqual(date.original,", "self.assertEqual(date.day, 15) self.assertEqual(date.original, \"@#DFRENCH R@ 15 GERM 0001\") self.assertEqual(date.calendar, CalendarType.FRENCH_R)", "31) value = DateValueSimple(date1).accept(visitor) self.assertEqual(value, (\"simple\", date1)) value = DateValueFrom(date1).accept(visitor)", "(2086672.5, 1)) def test_003_cal_date_cmp(self): \"\"\"Test date.CalendarDate class.\"\"\" self.assertTrue(GregorianDate(2016, \"JAN\", 1)", "== GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) != GregorianDate(2017, \"JAN\",", "\"OCT\", bc=True) self.assertEqual(str(date), \"OCT 2017 B.C.\") date = GregorianDate(1699, \"JAN\",", "CalendarDate.parse(\"@#DJULIAN@ 100 B.C.\") self.assertIsInstance(date, JulianDate) self.assertEqual(date.year, 100) self.assertTrue(date.bc) self.assertIsNone(date.month) self.assertIsNone(date.month_num)", "2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"AFT 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1", "dual_year=1700) self.assertEqual(str(date), \"1 JAN 1699/00\") date = HebrewDate(5000) self.assertEqual(str(date), \"@#DHEBREW@", "date.accept(visitor) self.assertEqual(value, (\"gregorian\", date)) date = HebrewDate(5000) value = date.accept(visitor)", "not isinstance(date, DateValueSimple): raise TypeError(str(type(date))) return (\"simple\", date.date) def visitPeriod(self,", "more specific self.assertTrue(DateValue.parse(\"2000\") > DateValue.parse(\"31 DEC 2000\")) self.assertTrue(DateValue.parse(\"DEC 2000\") >", "1600 AND 2000\") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, JulianDate(1600)) self.assertEqual(date.date2,", "apr 2000\") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2,", "GregorianDate(1792, \"SEP\", 22)) self.assertTrue(GregorianDate(2020, \"SEP\", 21) == FrenchDate(228, \"COMP\", 5))", "\"JAN\", 1) == GregorianDate(1699, \"JAN\", 1, dual_year=1700)) # compare Gregorian", "\"31 MAY 2020\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"@#DGREGORIAN@ 10 MAR", "{\"500 B.C.\": GregorianDate(500, bc=True), \"JAN 2017\": GregorianDate(2017, \"JAN\"), \"31 JAN", "before next year self.assertTrue(GregorianDate(2017) > GregorianDate(2017, \"DEC\", 31)) self.assertTrue(GregorianDate(2017) <", "GregorianDate(2017, \"JAN\", 2)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) <= GregorianDate(2017, \"JAN\", 2))", "4)) def test_004_cal_date_str(self): \"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\",", "16) > JulianDate(1582, \"OCT\", 5)) self.assertTrue(JulianDate(1582, \"OCT\", 6) > GregorianDate(1582,", "self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(date.phrase, \"some phrase\") self.assertEqual(str(date), \"INTERPRETED 1", "self.assertEqual(date.day, 9) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = GregorianDate(2017, \"OCT\", bc=True) self.assertEqual(date.year,", "AND 2000\") date = DateValue.parse(\"bet mar 1920 and apr 2000\")", "DateValueTo, DateValueTypes, DateValueVisitor ) class TestDateVisitor(CalendarDateVisitor, DateValueVisitor): def visitGregorian(self, date):", "> DateValue.parse(\"2000\")) # \"empty\" date is always later than any", "self.assertEqual(date.year_str, \"1699/00\") self.assertEqual(date.month, \"FEB\") self.assertEqual(date.month_num, 2) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date", "French dates self.assertTrue(GregorianDate(1792, \"SEP\", 22) == FrenchDate(1, \"VEND\", 1)) self.assertTrue(GregorianDate(1792,", "= DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN 2001\") self.assertEqual(hash(dv1),", "with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DUNKNOWN@ 2020\") # dual year only", "self.assertIsNone(date.phrase) self.assertEqual(str(date), \"\") def test_019_date_value_visitor(self): \"\"\"Test date.DateValue class.\"\"\" visitor =", "> DateValue.parse(\"BEF 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"TO", "(some phrase)\") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(date.phrase,", "DateValueTypes.PHRASE) self.assertIsNone(date.phrase) self.assertEqual(str(date), \"\") def test_019_date_value_visitor(self): \"\"\"Test date.DateValue class.\"\"\" visitor", "OCT 1582\") def test_005_cal_date_parse(self): \"\"\"Test date.CalendarDate.parse method.\"\"\" date = CalendarDate.parse(\"31", "self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"2017\") self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num, 10) self.assertEqual(date.day, 9)", "missing day compares as \"past\" the last day of month,", "class.\"\"\" dates = {\"500 B.C.\": GregorianDate(500, bc=True), \"JAN 2017\": GregorianDate(2017,", "(\"before\", date.date) def visitAfter(self, date): if not isinstance(date, DateValueAfter): raise", "B.C.\") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date), \"1967", "\"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"BEF 1967B.C.\") self.assertIsInstance(date, DateValueBefore) self.assertEqual(date.kind,", "5000\") date = FrenchDate(1, \"VEND\", 1) self.assertEqual(str(date), \"@#DFRENCH R@ 1", "2002\") > DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN 2001\"))", "\"5 B.C.\") self.assertEqual(date.month, \"JAN\") self.assertEqual(date.month_num, 1) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.JULIAN) def", "> GregorianDate(1582, \"OCT\", 15)) self.assertTrue(GregorianDate(2000, \"JAN\", 14) == JulianDate(2000, \"JAN\",", "self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.HEBREW) date = FrenchDate(1, \"FRUC\", 1) self.assertEqual(date.year,", "self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700) self.assertEqual(date.original, \"10 MAR 1699/00\") self.assertEqual(date.calendar, CalendarType.GREGORIAN)", "9) self.assertEqual(str(date), \"9 OCT 2017\") date = GregorianDate(2017, \"OCT\", bc=True)", "10) self.assertEqual(date.original, \"@#DGREGORIAN@ 10 MAR 1698/99\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date =", "self.assertEqual(value, (\"french\", date)) date = JulianDate(1582, \"OCT\", 5) value =", "100 B.C.\") self.assertIsInstance(date, JulianDate) self.assertEqual(date.year, 100) self.assertTrue(date.bc) self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day)", "MAR 1699/00\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"@#DJULIAN@ 100 B.C.\") self.assertIsInstance(date,", "phrase)\") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(date.phrase, \"some", "\" \" + datestr) self.assertEqual(date.date, value) def test_015_date_parse_phrase(self): \"\"\"Test date.DateValue", "date.date1, date.date2) def visitFrom(self, date): if not isinstance(date, DateValueFrom): raise", "\"phrase\").accept(visitor) self.assertEqual(value, (\"interpreted\", date1, \"phrase\")) value = DateValuePhrase(\"phrase\").accept(visitor) self.assertEqual(value, (\"phrase\",", "0)) date = GregorianDate(1699, \"FEB\", 1, dual_year=1700) self.assertEqual(date.key(), (2342003.5, 0))", "TypeError(str(type(date))) return (\"to\", date.date) def visitRange(self, date): if not isinstance(date,", "date.accept(visitor) self.assertEqual(value, (\"french\", date)) date = JulianDate(1582, \"OCT\", 5) value", "self.assertEqual(value, (\"phrase\", \"phrase\")) def test_020_date_hash(self): \"\"\"Test date.Date hash\"\"\" dv1 =", "date self.assertTrue(DateValue.parse(\"\") > DateValue.parse(\"2000\")) def test_018_date_parse_empty(self): \"\"\"Test date.DateValue class.\"\"\" for", "@#DJULIAN@ 1600 AND 2000\") date = DateValue.parse(\"bet mar 1920 and", "DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), \"FROM 1920 TO 2000\")", "\"1967 B.C.\") date = DateValue.parse(\"@#DGREGORIAN@ 1 JAN 2017\") self.assertIsInstance(date, DateValueSimple)", "DateValue.parse(\"BET @#DJULIAN@ 1600 AND 2000\") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1,", "\"\"\"Tests for `ged4py.date` module.\"\"\" def test_001_cal_date(self): \"\"\"Test date.CalendarDate class.\"\"\" date", "# comparing simple date with range self.assertTrue(DateValue.parse(\"1 JAN 2000\") >", "DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(date.phrase, \"some phrase\") self.assertEqual(str(date), \"INTERPRETED", "self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"1 JAN 2017\") def test_017_date_cmp(self):", "not isinstance(date, DateValueAfter): raise TypeError(str(type(date))) return (\"after\", date.date) def visitAbout(self,", "self.assertIsInstance(date, FrenchDate) self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.month, \"GERM\") self.assertEqual(date.month_num, 7) self.assertEqual(date.day,", "DateValue.parse(\"31 DEC 2000\")) # phrase is always later than any", "VEND 1\") date = JulianDate(1582, \"OCT\", 5) self.assertEqual(str(date), \"@#DJULIAN@ 5", "month compares as \"past\" the last day of year, but", "return (\"before\", date.date) def visitAfter(self, date): if not isinstance(date, DateValueAfter):", "DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor ) class TestDateVisitor(CalendarDateVisitor, DateValueVisitor): def", "self.assertFalse(date.bc) self.assertEqual(date.month, \"GERM\") self.assertEqual(date.month_num, 7) self.assertEqual(date.day, 15) self.assertEqual(date.original, \"@#DFRENCH R@", "be 1996 or 1998)\") > DateValue.parse(\"2000\")) # \"empty\" date is", "\"FEB\", 1, dual_year=1700) self.assertEqual(date.key(), (2342003.5, 0)) date = FrenchDate(2017, \"VENT\",", "self.assertEqual(value, (\"gregorian\", date)) date = HebrewDate(5000) value = date.accept(visitor) self.assertEqual(value,", "phrase)\") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(date.phrase,", "date): if not isinstance(date, DateValuePhrase): raise TypeError(str(type(date))) return (\"phrase\", date.phrase)", "GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) == GregorianDate(2017, \"JAN\", 1))", "date = GregorianDate(1699, \"FEB\", 1, dual_year=1700) self.assertEqual(date.key(), (2342003.5, 0)) date", "date = HebrewDate(5000) self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"5000\") self.assertIsNone(date.month) self.assertIsNone(date.month_num)", "Gregorian and Hebrew dates self.assertTrue(GregorianDate(2020, \"JAN\", 1) == HebrewDate(5780, \"SVN\",", "class TestDetailDate(unittest.TestCase): \"\"\"Tests for `ged4py.date` module.\"\"\" def test_001_cal_date(self): \"\"\"Test date.CalendarDate", "2020\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 2020) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.month, \"MAY\") self.assertEqual(date.month_num,", "self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(date.phrase, \"some phrase\") self.assertEqual(str(date),", "5) self.assertEqual(date.day, 31) self.assertEqual(date.original, \"31 MAY 2020\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date", "return (\"about\", date.date) def visitCalculated(self, date): if not isinstance(date, DateValueCalculated):", "if not isinstance(date, DateValueInterpreted): raise TypeError(str(type(date))) return (\"interpreted\", date.date, date.phrase)", "1) < GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) < GregorianDate(2017,", "month self.assertTrue(GregorianDate(2017, \"JAN\") > GregorianDate(2017, \"JAN\", 31)) self.assertTrue(GregorianDate(2017, \"JAN\") <", "self.assertTrue(GregorianDate(1582, \"OCT\", 16) > JulianDate(1582, \"OCT\", 5)) self.assertTrue(JulianDate(1582, \"OCT\", 6)", "if not isinstance(date, FrenchDate): raise TypeError(str(type(date))) return (\"french\", date) def", "return (\"to\", date.date) def visitRange(self, date): if not isinstance(date, DateValueRange):", "\"OCT\", 16) > JulianDate(1582, \"OCT\", 5)) self.assertTrue(JulianDate(1582, \"OCT\", 6) >", "22) == FrenchDate(1, \"VEND\", 1)) self.assertTrue(GregorianDate(1792, \"SEP\", 23) > FrenchDate(1,", "self.assertEqual(date.year_str, \"1\") self.assertEqual(date.month, \"FRUC\") self.assertEqual(date.month_num, 12) self.assertEqual(date.day, 1) self.assertEqual(date.calendar, CalendarType.FRENCH_R)", "GregorianDate) self.assertEqual(date.year, 2020) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.month, \"MAY\") self.assertEqual(date.month_num, 5) self.assertEqual(date.day,", "dates self.assertTrue(GregorianDate(1792, \"SEP\", 22) == FrenchDate(1, \"VEND\", 1)) self.assertTrue(GregorianDate(1792, \"SEP\",", "2020/21\") # cannot parse nonsense with self.assertRaises(ValueError): date = CalendarDate.parse(\"start", "self.assertTrue(GregorianDate(2017, \"JAN\") > GregorianDate(2017, \"JAN\", 31)) self.assertTrue(GregorianDate(2017, \"JAN\") < GregorianDate(2017,", "2017) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"2017\") self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num, 10) self.assertEqual(date.day,", "self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = HebrewDate(5000) self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.year_str,", "1 JAN 2017\") self.assertIsInstance(date, DateValueTo) self.assertEqual(date.kind, DateValueTypes.TO) self.assertEqual(date.date, GregorianDate(2017, \"JAN\",", "date.phrase) class TestDetailDate(unittest.TestCase): \"\"\"Tests for `ged4py.date` module.\"\"\" def test_001_cal_date(self): \"\"\"Test", "self.assertEqual(date.original, \"@#DGREGORIAN@ 10 MAR 1698/99\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"10", "2000\") < DateValue.parse(\"FROM 1 JAN 2000\")) # comparing ranges self.assertEqual(DateValue.parse(\"FROM", "\"phrase\")) value = DateValuePhrase(\"phrase\").accept(visitor) self.assertEqual(value, (\"phrase\", \"phrase\")) def test_020_date_hash(self): \"\"\"Test", "1 JAN 2001\") < DateValue.parse(\"BET 1 JAN 2000 AND 1", "value = DateValueAfter(date1).accept(visitor) self.assertEqual(value, (\"after\", date1)) value = DateValueRange(date1, date2).accept(visitor)", "DateValue.parse(\"TO 1 JAN 2017\") self.assertIsInstance(date, DateValueTo) self.assertEqual(date.kind, DateValueTypes.TO) self.assertEqual(date.date, GregorianDate(2017,", "DateValue.parse(\"AFT 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"FROM 1", "self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2001, \"JAN\", 1))) #", "1967 B.C. (some phrase)\") date = DateValue.parse(\"INT @#DGREGORIAN@ 1 JAN", "self.assertEqual(date.key(), (2342003.5, 0)) date = FrenchDate(2017, \"VENT\", bc=True) self.assertEqual(date.key(), (1638959.5,", "(\"ABT\", \"ABOUT\", DateValueAbout, DateValueTypes.ABOUT), (\"CAL\", \"CALCULATED\", DateValueCalculated, DateValueTypes.CALCULATED), (\"EST\", \"ESTIMATED\",", "next year self.assertTrue(GregorianDate(2017) > GregorianDate(2017, \"DEC\", 31)) self.assertTrue(GregorianDate(2017) < GregorianDate(2018,", "-*- coding: utf-8 -*- \"\"\"Tests for `ged4py.date` module.\"\"\" import unittest", "self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), \"FROM 1920 TO", "self.assertEqual(date.calendar, CalendarType.JULIAN) date = CalendarDate.parse(\"@#DFRENCH R@ 15 GERM 0001\") self.assertIsInstance(date,", "JAN 2001\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"BEF 1 JAN 2000\"))", "class.\"\"\" for value in (None, \"\"): date = DateValue.parse(value) self.assertIsInstance(date,", "\"\"\"Test date.Date hash\"\"\" dv1 = DateValue.parse(\"2016\") dv2 = DateValue.parse(\"2016\") self.assertEqual(hash(dv1),", "AND 2000\") < DateValue.parse(\"FROM 1920 TO 1999\")) # comparing simple", "AND 1 JAN 2001\")) # Less specific date compares later", "JAN 2017\") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1))", "not isinstance(date, JulianDate): raise TypeError(str(type(date))) return (\"julian\", date) def visitHebrew(self,", "1)) self.assertTrue(GregorianDate(1792, \"SEP\", 23) > FrenchDate(1, \"VEND\", 1)) self.assertTrue(FrenchDate(1, \"VEND\",", "14) == JulianDate(2000, \"JAN\", 1)) # compare Gregorian and French", "(\"to\", date1)) value = DateValuePeriod(date1, date2).accept(visitor) self.assertEqual(value, (\"period\", date1, date2))", "1 JAN 2001\")) # Less specific date compares later than", "self.assertTrue(GregorianDate(2017, \"JAN\") < GregorianDate(2017, \"FEB\", 1)) # missing month compares", "dual date self.assertTrue(GregorianDate(1700, \"JAN\", 1) == GregorianDate(1699, \"JAN\", 1, dual_year=1700))", "GregorianDate(2017, \"JAN\", 2)) # missing day compares as \"past\" the", "fmt + \" \" + datestr) self.assertEqual(date.date, value) def test_015_date_parse_phrase(self):", "hash(FrenchDate(1, \"VEND\", 1))) self.assertEqual(hash(FrenchDate(1)), hash(FrenchDate(1))) def test_010_date_no_date(self): \"\"\"Test date.DateValue class.\"\"\"", "DEC 2000\") self.assertEqual(hash(dv1), hash(dv2)) dv1 = DateValue.parse(\"BET 31 DEC 2000", "1967\") self.assertIsInstance(date, DateValueFrom) self.assertEqual(date.kind, DateValueTypes.FROM) self.assertEqual(date.date, GregorianDate(1967)) self.assertEqual(str(date), \"FROM 1967\")", "self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"10 MAR 1699/00\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year,", "2000\") < DateValue.parse(\"AFT 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") <", "date = HebrewDate(2017, \"TSH\", 22) self.assertEqual(date.key(), (1084542.5, 0)) date =", "date1 = GregorianDate(2017, \"JAN\", 1) date2 = GregorianDate(2017, \"DEC\", 31)", "\"DEC\", 31) value = DateValueSimple(date1).accept(visitor) self.assertEqual(value, (\"simple\", date1)) value =", "1 JAN 2000 AND 1 JAN 2001\")) # Less specific", "> FrenchDate(1, \"VEND\", 1)) self.assertTrue(FrenchDate(1, \"VEND\", 2) > GregorianDate(1792, \"SEP\",", "raise TypeError(str(type(date))) return (\"hebrew\", date) def visitFrench(self, date): if not", "GregorianDate(2017, \"JAN\", 1) date2 = GregorianDate(2017, \"DEC\", 31) value =", "isinstance(date, FrenchDate): raise TypeError(str(type(date))) return (\"french\", date) def visitSimple(self, date):", "1) < GregorianDate(2017, \"JAN\", 2)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) <= GregorianDate(2017,", "self.assertEqual(str(date), \"FROM 1920 TO 2000\") date = DateValue.parse(\"from mar 1920", "\"\"\"Test date.DateValue class.\"\"\" for value in (None, \"\"): date =", "JAN 2001\") < DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN", "2000\")) self.assertTrue(DateValue.parse(\"DEC 2000\") > DateValue.parse(\"31 DEC 2000\")) # phrase is", "FrenchDate(1, \"FRUC\", 1) self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1\") self.assertEqual(date.month, \"FRUC\")", "\"VEND\", 1)) self.assertTrue(GregorianDate(1792, \"SEP\", 23) > FrenchDate(1, \"VEND\", 1)) self.assertTrue(FrenchDate(1,", "isinstance(date, DateValueSimple): raise TypeError(str(type(date))) return (\"simple\", date.date) def visitPeriod(self, date):", "TypeError(str(type(date))) return (\"hebrew\", date) def visitFrench(self, date): if not isinstance(date,", "return (\"from\", date.date) def visitTo(self, date): if not isinstance(date, DateValueTo):", "GregorianDate(2017, \"FEB\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) < GregorianDate(2017, \"JAN\", 2))", "date)\") def test_012_date_parse_period(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"FROM 1967\")", "CalendarDate.parse(\"@#DJULIAN@ 2020/21\") # cannot parse nonsense with self.assertRaises(ValueError): date =", "a date)\") def test_012_date_parse_period(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"FROM", "value = DateValueCalculated(date1).accept(visitor) self.assertEqual(value, (\"calculated\", date1)) value = DateValueEstimated(date1).accept(visitor) self.assertEqual(value,", "dates is messed up dv = DateValue.parse(\"BET 31 DEC 2000", "TypeError(str(type(date))) return (\"after\", date.date) def visitAbout(self, date): if not isinstance(date,", "year self.assertTrue(GregorianDate(2017) > GregorianDate(2017, \"DEC\", 31)) self.assertTrue(GregorianDate(2017) < GregorianDate(2018, \"JAN\",", "2017\": GregorianDate(2017, \"JAN\"), \"31 JAN 2017\": GregorianDate(2017, \"JAN\", 31)} approx", "bc=True) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertTrue(date.bc) self.assertEqual(date.year_str, \"2017 B.C.\") self.assertEqual(date.month, \"OCT\")", "== FrenchDate(1, \"VEND\", 1)) self.assertTrue(GregorianDate(1792, \"SEP\", 23) > FrenchDate(1, \"VEND\",", "date self.assertTrue(DateValue.parse(\"(Could be 1996 or 1998)\") > DateValue.parse(\"2000\")) # \"empty\"", "date = DateValue.parse(\"from mar 1920 to 1 apr 2000\") self.assertIsInstance(date,", "(\"hebrew\", date) def visitFrench(self, date): if not isinstance(date, FrenchDate): raise", "JulianDate(2000, \"JAN\", 1)) # compare Gregorian and French dates self.assertTrue(GregorianDate(1792,", "APR 2000\") def test_014_date_parse_approx(self): \"\"\"Test date.DateValue class.\"\"\" dates = {\"500", "date.CalendarDate.accept method.\"\"\" visitor = TestDateVisitor() date = GregorianDate(2017, \"OCT\", 9)", "\"OCT\", 5) value = date.accept(visitor) self.assertEqual(value, (\"julian\", date)) def test_007_cal_date_hash(self):", "before next month self.assertTrue(GregorianDate(2017, \"JAN\") > GregorianDate(2017, \"JAN\", 31)) self.assertTrue(GregorianDate(2017,", "DateValue.parse(\"@#DGREGORIAN@ 1 JAN 2017\") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(2017,", "AND 1 JAN 2000\")) self.assertNotEqual(DateValue.parse(\"1 JAN 2000\"), DateValue.parse(\"BET 1 JAN", "# comparing ranges self.assertEqual(DateValue.parse(\"FROM 1 JAN 2000 TO 1 JAN", "date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(date.key(), (2458035.5, 0))", "0)) date = FrenchDate(2017, \"VENT\", bc=True) self.assertEqual(date.key(), (1638959.5, 1)) date", "# dual date self.assertTrue(GregorianDate(1700, \"JAN\", 1) == GregorianDate(1699, \"JAN\", 1,", "DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(date.phrase, \"some phrase\") self.assertEqual(str(date), \"INTERPRETED 1967", "with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DROMAN@ 2020\") # cannot handle UNKNOWN", "MAY 2020\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"@#DGREGORIAN@ 10 MAR 1698/99\")", "9)), hash(GregorianDate(2017, \"OCT\", 9))) self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9, bc=True)), hash(GregorianDate(2017, \"OCT\",", "CalendarDate.parse(\"31 MAY 2020\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 2020) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.month,", "1))) self.assertEqual(hash(FrenchDate(1)), hash(FrenchDate(1))) def test_010_date_no_date(self): \"\"\"Test date.DateValue class.\"\"\" date =", "self.assertIsNone(date.dual_year) self.assertTrue(date.bc) self.assertEqual(date.year_str, \"2017 B.C.\") self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num, 10) self.assertIsNone(date.day)", "comparing ranges self.assertEqual(DateValue.parse(\"FROM 1 JAN 2000 TO 1 JAN 2001\"),", "self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, JulianDate(1600)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), \"BETWEEN @#DJULIAN@ 1600", "DateValueTypes.AFTER) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"AFTER 1 JAN 2017\")", "def visitPhrase(self, date): if not isinstance(date, DateValuePhrase): raise TypeError(str(type(date))) return", "test_006_cal_date_visitor(self): \"\"\"Test date.CalendarDate.accept method.\"\"\" visitor = TestDateVisitor() date = GregorianDate(2017,", "self.assertEqual(value, (\"to\", date1)) value = DateValuePeriod(date1, date2).accept(visitor) self.assertEqual(value, (\"period\", date1,", "31)) self.assertTrue(GregorianDate(2017, \"JAN\") < GregorianDate(2017, \"FEB\", 1)) # missing month", "JAN 1999 TO 1 JAN 2001\") < DateValue.parse(\"BET 1 JAN", "\"DEC\", 31), GregorianDate(2000, \"DEC\", 31))) dv = DateValue.parse(\"BET 31 DEC", "\"OCT\", 9, bc=True)), hash(GregorianDate(2017, \"OCT\", 9, bc=True))) self.assertEqual(hash(FrenchDate(1, \"VEND\", 1)),", "tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2000, \"JAN\", 1))) self.assertTrue(DateValue.parse(\"2016\") <", "JAN 2017\") self.assertIsInstance(date, DateValueAfter) self.assertEqual(date.kind, DateValueTypes.AFTER) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1))", "DateValueTypes.ABOUT), (\"CAL\", \"CALCULATED\", DateValueCalculated, DateValueTypes.CALCULATED), (\"EST\", \"ESTIMATED\", DateValueEstimated, DateValueTypes.ESTIMATED) ]", "= CalendarDate.parse(\"@#DHEBREW@ 7 NSN 5000\") self.assertIsInstance(date, HebrewDate) self.assertEqual(date.year, 5000) self.assertFalse(date.bc)", "\"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"(some phrase)\") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind,", "1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"TO 1 JAN", "any regular date self.assertTrue(DateValue.parse(\"(Could be 1996 or 1998)\") > DateValue.parse(\"2000\"))", "is messed up dv = DateValue.parse(\"BET 31 DEC 2000 AND", "1 JAN 2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"FROM 1 JAN", "value = date.accept(visitor) self.assertEqual(value, (\"gregorian\", date)) date = HebrewDate(5000) value", "= CalendarDate.parse(\"@#DUNKNOWN@ 2020\") # dual year only works for GREGORIAN", "visitAbout(self, date): if not isinstance(date, DateValueAbout): raise TypeError(str(type(date))) return (\"about\",", "hash(GregorianDate(2017, \"OCT\", 9, bc=True))) self.assertEqual(hash(FrenchDate(1, \"VEND\", 1)), hash(FrenchDate(1, \"VEND\", 1)))", "test_018_date_parse_empty(self): \"\"\"Test date.DateValue class.\"\"\" for value in (None, \"\"): date", "\"JAN\", 1) == GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) !=", "DEC 2000 AND 1 JAN 2000\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000,", "\"OCT\", 5)) self.assertTrue(GregorianDate(1582, \"OCT\", 16) > JulianDate(1582, \"OCT\", 5)) self.assertTrue(JulianDate(1582,", "GERM 0001\") self.assertIsInstance(date, FrenchDate) self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.month, \"GERM\") self.assertEqual(date.month_num,", "(\"after\", date1)) value = DateValueRange(date1, date2).accept(visitor) self.assertEqual(value, (\"range\", date1, date2))", "self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2000, \"JAN\", 1))) self.assertTrue(DateValue.parse(\"2016\")", "date1)) value = DateValueInterpreted(date1, \"phrase\").accept(visitor) self.assertEqual(value, (\"interpreted\", date1, \"phrase\")) value", "+ \" \" + datestr) self.assertIsInstance(date, klass) self.assertEqual(date.kind, typeEnum) self.assertEqual(str(date),", "< GregorianDate(2017, \"FEB\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) < GregorianDate(2017, \"JAN\",", "date): if not isinstance(date, GregorianDate): raise TypeError(str(type(date))) return (\"gregorian\", date)", "if not isinstance(date, DateValuePeriod): raise TypeError(str(type(date))) return (\"period\", date.date1, date.date2)", "5) self.assertEqual(str(date), \"@#DJULIAN@ 5 OCT 1582\") def test_005_cal_date_parse(self): \"\"\"Test date.CalendarDate.parse", "raise TypeError(str(type(date))) return (\"gregorian\", date) def visitJulian(self, date): if not", "TypeError(str(type(date))) return (\"before\", date.date) def visitAfter(self, date): if not isinstance(date,", "date = HebrewDate(5000) value = date.accept(visitor) self.assertEqual(value, (\"hebrew\", date)) date", "\"ABOUT\", DateValueAbout, DateValueTypes.ABOUT), (\"CAL\", \"CALCULATED\", DateValueCalculated, DateValueTypes.CALCULATED), (\"EST\", \"ESTIMATED\", DateValueEstimated,", "missing month compares as \"past\" the last day of year,", "dv = DateValue.parse(\"2016\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2016), GregorianDate(2016))) dv =", "(\"period\", date.date1, date.date2) def visitFrom(self, date): if not isinstance(date, DateValueFrom):", "if not isinstance(date, DateValueAfter): raise TypeError(str(type(date))) return (\"after\", date.date) def", "GregorianDate(2017, \"FEB\", 1)) # missing month compares as \"past\" the", "date)) date = FrenchDate(1, \"VEND\", 1) value = date.accept(visitor) self.assertEqual(value,", "TO 1999\")) # comparing simple date with range self.assertTrue(DateValue.parse(\"1 JAN", "= DateValue.parse(appr + \" \" + datestr) self.assertIsInstance(date, klass) self.assertEqual(date.kind,", "dv = DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN 2000\")", "self.assertEqual(str(date), \"INTERPRETED 1 JAN 2017 (some phrase)\") def test_016_date_parse_simple(self): \"\"\"Test", "date.date) def visitAfter(self, date): if not isinstance(date, DateValueAfter): raise TypeError(str(type(date)))", "DateValueVisitor): def visitGregorian(self, date): if not isinstance(date, GregorianDate): raise TypeError(str(type(date)))", "self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.original, \"@#DJULIAN@ 100 B.C.\") self.assertEqual(date.calendar, CalendarType.JULIAN) date", "parse nonsense with self.assertRaises(ValueError): date = CalendarDate.parse(\"start of time\") def", "JAN 2017 (some phrase)\") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(2017,", "if not isinstance(date, DateValueEstimated): raise TypeError(str(type(date))) return (\"estimated\", date.date) def", "Gregorian and Julian dates self.assertTrue(GregorianDate(1582, \"OCT\", 15) == JulianDate(1582, \"OCT\",", "DateValue.parse(\"INT 1967 B.C. (some phrase)\") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date,", "isinstance(date, JulianDate): raise TypeError(str(type(date))) return (\"julian\", date) def visitHebrew(self, date):", "1920 and apr 2000\") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, GregorianDate(1920,", "== GregorianDate(1699, \"JAN\", 1, dual_year=1700)) # compare Gregorian and Julian", "2020\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"@#DGREGORIAN@ 10 MAR 1698/99\") self.assertIsInstance(date,", "\"FEB\") self.assertEqual(date.month_num, 2) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = HebrewDate(5000) self.assertEqual(date.year,", "1 JAN 2000\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2000,", "(None, \"\"): date = DateValue.parse(value) self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertIsNone(date.phrase)", "\"BEFORE 1967 B.C.\") date = DateValue.parse(\"AFT 1 JAN 2017\") self.assertIsInstance(date,", "= DateValuePeriod(date1, date2).accept(visitor) self.assertEqual(value, (\"period\", date1, date2)) value = DateValueBefore(date1).accept(visitor)", "\"JAN\", 1) date2 = GregorianDate(2017, \"DEC\", 31) value = DateValueSimple(date1).accept(visitor)", "date) def visitFrench(self, date): if not isinstance(date, FrenchDate): raise TypeError(str(type(date)))", "GregorianDate(2017, \"OCT\", bc=True) self.assertEqual(str(date), \"OCT 2017 B.C.\") date = GregorianDate(1699,", "# cannot handle UNKNOWN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DUNKNOWN@ 2020\")", "self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2, GregorianDate(2000, \"APR\")) self.assertEqual(str(date), \"BETWEEN", "GregorianDate(2017, \"OCT\", bc=True) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertTrue(date.bc) self.assertEqual(date.year_str, \"2017 B.C.\")", "self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"AFT 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN", "test_004_cal_date_str(self): \"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(str(date),", "= DateValueCalculated(date1).accept(visitor) self.assertEqual(value, (\"calculated\", date1)) value = DateValueEstimated(date1).accept(visitor) self.assertEqual(value, (\"estimated\",", "self.assertTrue(date.bc) self.assertEqual(date.year_str, \"5 B.C.\") self.assertEqual(date.month, \"JAN\") self.assertEqual(date.month_num, 1) self.assertIsNone(date.day) self.assertEqual(date.calendar,", "def test_017_date_cmp(self): \"\"\"Test date.Date class.\"\"\" dv = DateValue.parse(\"2016\") self.assertIsInstance(dv.key(), tuple)", "(\"julian\", date) def visitHebrew(self, date): if not isinstance(date, HebrewDate): raise", "def test_007_cal_date_hash(self): \"\"\"Test date.CalendarDate hash.\"\"\" self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9)), hash(GregorianDate(2017, \"OCT\",", "(\"calculated\", date.date) def visitEstimated(self, date): if not isinstance(date, DateValueEstimated): raise", "date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(str(date), \"9 OCT 2017\") date", "date = CalendarDate.parse(\"@#DUNKNOWN@ 2020\") # dual year only works for", "\"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) != GregorianDate(2017, \"JAN\", 2)) #", "`ged4py.date` module.\"\"\" import unittest from ged4py.calendar import ( CalendarType, CalendarDate,", "1)) # dual date self.assertTrue(GregorianDate(1700, \"JAN\", 1) == GregorianDate(1699, \"JAN\",", "date.accept(visitor) self.assertEqual(value, (\"hebrew\", date)) date = FrenchDate(1, \"VEND\", 1) value", "JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"TO 1 JAN 2000\"))", "date): if not isinstance(date, DateValueInterpreted): raise TypeError(str(type(date))) return (\"interpreted\", date.date,", "self.assertEqual(date.dual_year, 1700) self.assertEqual(date.original, \"10 MAR 1699/00\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date =", "DateValue.parse(\"2000\")) def test_018_date_parse_empty(self): \"\"\"Test date.DateValue class.\"\"\" for value in (None,", "self.assertEqual(date.year, 5) self.assertTrue(date.bc) self.assertEqual(date.year_str, \"5 B.C.\") self.assertEqual(date.month, \"JAN\") self.assertEqual(date.month_num, 1)", "date.DateValue class.\"\"\" for value in (None, \"\"): date = DateValue.parse(value)", "CalendarType.GREGORIAN) date = GregorianDate(1699, \"FEB\", dual_year=1700) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700)", "\"INTERPRETED 1 JAN 2017 (some phrase)\") def test_016_date_parse_simple(self): \"\"\"Test date.DateValue", "date = DateValue.parse(\"1967 B.C.\") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(1967,", "klass, typeEnum in approx: for datestr, value in dates.items(): date", "date = JulianDate(5, \"JAN\", bc=True) self.assertEqual(date.year, 5) self.assertTrue(date.bc) self.assertEqual(date.year_str, \"5", "self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"not a date\") self.assertEqual(str(date), \"(not a date)\")", "2000 AND 1 JAN 2001\") dv2 = DateValue.parse(\"BET 31 DEC", "self.assertEqual(str(date), \"@#DJULIAN@ 5 OCT 1582\") def test_005_cal_date_parse(self): \"\"\"Test date.CalendarDate.parse method.\"\"\"", "and apr 2000\") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, GregorianDate(1920, \"MAR\"))", "22) self.assertEqual(date.key(), (1084542.5, 0)) date = JulianDate(1000) self.assertEqual(date.key(), (2086672.5, 1))", "= CalendarDate.parse(\"31 MAY 2020\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 2020) self.assertIsNone(date.dual_year) self.assertFalse(date.bc)", "(some phrase)\") def test_016_date_parse_simple(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"1967", "CalendarType.HEBREW) # cannot handle ROMAN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DROMAN@", "2000\") > DateValue.parse(\"BET 1 JAN 1999 AND 1 JAN 2000\"))", "bc=True) self.assertEqual(str(date), \"OCT 2017 B.C.\") date = GregorianDate(1699, \"JAN\", 1,", "date = GregorianDate(2017, \"OCT\", 9) value = date.accept(visitor) self.assertEqual(value, (\"gregorian\",", "value = DateValueRange(date1, date2).accept(visitor) self.assertEqual(value, (\"range\", date1, date2)) value =", "1 JAN 2001\")) self.assertTrue(DateValue.parse(\"FROM 1 JAN 2000 TO 1 JAN", "< DateValue.parse(\"2017\")) self.assertTrue(DateValue.parse(\"2 JAN 2016\") > DateValue.parse(\"1 JAN 2016\")) self.assertTrue(DateValue.parse(\"BET", "MAR 1698/99\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1698) self.assertEqual(date.dual_year, 1699) self.assertFalse(date.bc) self.assertEqual(date.month,", "DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor", "date.date2) def visitBefore(self, date): if not isinstance(date, DateValueBefore): raise TypeError(str(type(date)))", "JulianDate) self.assertEqual(date.year, 100) self.assertTrue(date.bc) self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.original, \"@#DJULIAN@ 100", "# dual year only works for GREGORIAN with self.assertRaises(ValueError): date", "= CalendarDate.parse(\"10 MAR 1699/00\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700)", "\"FEB\", 1)) # missing month compares as \"past\" the last", "(\"phrase\", date.phrase) class TestDetailDate(unittest.TestCase): \"\"\"Tests for `ged4py.date` module.\"\"\" def test_001_cal_date(self):", "class.\"\"\" visitor = TestDateVisitor() date1 = GregorianDate(2017, \"JAN\", 1) date2", "5) self.assertTrue(date.bc) self.assertEqual(date.year_str, \"5 B.C.\") self.assertEqual(date.month, \"JAN\") self.assertEqual(date.month_num, 1) self.assertIsNone(date.day)", "self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertIsNone(date.phrase) self.assertEqual(str(date), \"\") def test_019_date_value_visitor(self): \"\"\"Test", "== FrenchDate(228, \"COMP\", 5)) # compare Gregorian and Hebrew dates", "DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2, GregorianDate(2000, \"APR\")) self.assertEqual(str(date),", "(\"french\", date)) date = JulianDate(1582, \"OCT\", 5) value = date.accept(visitor)", "2)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) <= GregorianDate(2017, \"JAN\", 2)) self.assertTrue(GregorianDate(2017, \"JAN\",", "dv = DateValue.parse(\"31 DEC 2000\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\",", "HebrewDate(2017, \"TSH\", 22) self.assertEqual(date.key(), (1084542.5, 0)) date = JulianDate(1000) self.assertEqual(date.key(),", "coding: utf-8 -*- \"\"\"Tests for `ged4py.date` module.\"\"\" import unittest from", "= CalendarDate.parse(\"@#DJULIAN@ 100 B.C.\") self.assertIsInstance(date, JulianDate) self.assertEqual(date.year, 100) self.assertTrue(date.bc) self.assertIsNone(date.month)", "\"SEP\", 22) == FrenchDate(1, \"VEND\", 1)) self.assertTrue(GregorianDate(1792, \"SEP\", 23) >", "> DateValue.parse(\"1 JAN 2016\")) self.assertTrue(DateValue.parse(\"BET 1900 AND 2000\") < DateValue.parse(\"FROM", "phrase)\") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"some phrase\") date =", "value = DateValueFrom(date1).accept(visitor) self.assertEqual(value, (\"from\", date1)) value = DateValueTo(date1).accept(visitor) self.assertEqual(value,", "self.assertTrue(GregorianDate(1700, \"JAN\", 1) == GregorianDate(1699, \"JAN\", 1, dual_year=1700)) # compare", "self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(date.phrase, \"some phrase\") self.assertEqual(str(date), \"INTERPRETED 1967 B.C.", "(GregorianDate(2000, \"DEC\", 31), GregorianDate(2001, \"JAN\", 1))) # order of dates", "date): if not isinstance(date, DateValueCalculated): raise TypeError(str(type(date))) return (\"calculated\", date.date)", "JAN 2000\"), DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN 2001\"))", "GregorianDate(2017, \"OCT\", 9) value = date.accept(visitor) self.assertEqual(value, (\"gregorian\", date)) date", "test_020_date_hash(self): \"\"\"Test date.Date hash\"\"\" dv1 = DateValue.parse(\"2016\") dv2 = DateValue.parse(\"2016\")", "date.CalendarDate.parse method.\"\"\" date = CalendarDate.parse(\"31 MAY 2020\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year,", "self.assertTrue(GregorianDate(2017, \"JAN\", 2) >= GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1)", "(\"from\", date.date) def visitTo(self, date): if not isinstance(date, DateValueTo): raise", "= DateValue.parse(value) self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertIsNone(date.phrase) self.assertEqual(str(date), \"\") def", "[ (\"ABT\", \"ABOUT\", DateValueAbout, DateValueTypes.ABOUT), (\"CAL\", \"CALCULATED\", DateValueCalculated, DateValueTypes.CALCULATED), (\"EST\",", "JulianDate): raise TypeError(str(type(date))) return (\"julian\", date) def visitHebrew(self, date): if", "\"MAR\")) self.assertEqual(date.date2, GregorianDate(2000, \"APR\")) self.assertEqual(str(date), \"BETWEEN MAR 1920 AND APR", "== JulianDate(2000, \"JAN\", 1)) # compare Gregorian and French dates", "\"GERM\") self.assertEqual(date.month_num, 7) self.assertEqual(date.day, 15) self.assertEqual(date.original, \"@#DFRENCH R@ 15 GERM", "self.assertTrue(date.bc) self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.original, \"@#DJULIAN@ 100 B.C.\") self.assertEqual(date.calendar, CalendarType.JULIAN)", "= DateValueBefore(date1).accept(visitor) self.assertEqual(value, (\"before\", date1)) value = DateValueAfter(date1).accept(visitor) self.assertEqual(value, (\"after\",", "= date.accept(visitor) self.assertEqual(value, (\"hebrew\", date)) date = FrenchDate(1, \"VEND\", 1)", "1582\") def test_005_cal_date_parse(self): \"\"\"Test date.CalendarDate.parse method.\"\"\" date = CalendarDate.parse(\"31 MAY", "\"DEC\", 31), GregorianDate(2001, \"JAN\", 1))) # order of dates is", "self.assertEqual(date.year, 1698) self.assertEqual(date.dual_year, 1699) self.assertFalse(date.bc) self.assertEqual(date.month, \"MAR\") self.assertEqual(date.month_num, 3) self.assertEqual(date.day,", "self.assertEqual(value, (\"after\", date1)) value = DateValueRange(date1, date2).accept(visitor) self.assertEqual(value, (\"range\", date1,", "self.assertEqual(date.kind, typeEnum) self.assertEqual(str(date), fmt + \" \" + datestr) self.assertEqual(date.date,", ">= GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) == GregorianDate(2017, \"JAN\",", "date = DateValue.parse(\"@#DGREGORIAN@ 1 JAN 2017\") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE)", "self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.original, \"@#DJULIAN@ 100 B.C.\") self.assertEqual(date.calendar, CalendarType.JULIAN) date =", "class.\"\"\" date = DateValue.parse(\"BEF 1967B.C.\") self.assertIsInstance(date, DateValueBefore) self.assertEqual(date.kind, DateValueTypes.BEFORE) self.assertEqual(date.date,", "self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"1 JAN 2017\")", "1) == GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) != GregorianDate(2017,", "JAN 2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"FROM 1 JAN 1999", "compare Gregorian and Hebrew dates self.assertTrue(GregorianDate(2020, \"JAN\", 1) == HebrewDate(5780,", "date = CalendarDate.parse(\"@#DJULIAN@ 2020/21\") # cannot parse nonsense with self.assertRaises(ValueError):", "self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2016), GregorianDate(2016))) dv = DateValue.parse(\"31 DEC 2000\")", "1 JAN 2001\"), DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN", "value = DateValueBefore(date1).accept(visitor) self.assertEqual(value, (\"before\", date1)) value = DateValueAfter(date1).accept(visitor) self.assertEqual(value,", "2) > GregorianDate(1792, \"SEP\", 22)) self.assertTrue(GregorianDate(2020, \"SEP\", 21) == FrenchDate(228,", "15) self.assertEqual(date.original, \"@#DFRENCH R@ 15 GERM 0001\") self.assertEqual(date.calendar, CalendarType.FRENCH_R) date", "(\"to\", date.date) def visitRange(self, date): if not isinstance(date, DateValueRange): raise", "2) > GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 2) >= GregorianDate(2017,", "value = DateValueEstimated(date1).accept(visitor) self.assertEqual(value, (\"estimated\", date1)) value = DateValueInterpreted(date1, \"phrase\").accept(visitor)", "(\"simple\", date.date) def visitPeriod(self, date): if not isinstance(date, DateValuePeriod): raise", "GregorianDate(1920)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), \"FROM 1920 TO 2000\") date =", "TestDetailDate(unittest.TestCase): \"\"\"Tests for `ged4py.date` module.\"\"\" def test_001_cal_date(self): \"\"\"Test date.CalendarDate class.\"\"\"", "31 DEC 2000 AND 1 JAN 2000\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(),", "value = DateValuePhrase(\"phrase\").accept(visitor) self.assertEqual(value, (\"phrase\", \"phrase\")) def test_020_date_hash(self): \"\"\"Test date.Date", "date)) def test_007_cal_date_hash(self): \"\"\"Test date.CalendarDate hash.\"\"\" self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9)), hash(GregorianDate(2017,", "(some phrase)\") date = DateValue.parse(\"INT @#DGREGORIAN@ 1 JAN 2017 (some", "10) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = GregorianDate(1699, \"FEB\", dual_year=1700) self.assertEqual(date.year,", "2000 AND 1 JAN 2000\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\",", "self.assertEqual(date.dual_year, 1699) self.assertFalse(date.bc) self.assertEqual(date.month, \"MAR\") self.assertEqual(date.month_num, 3) self.assertEqual(date.day, 10) self.assertEqual(date.original,", "\"MAY\") self.assertEqual(date.month_num, 5) self.assertEqual(date.day, 31) self.assertEqual(date.original, \"31 MAY 2020\") self.assertEqual(date.calendar,", "\"DEC\", 31)) self.assertTrue(GregorianDate(2017) < GregorianDate(2018, \"JAN\", 1)) # dual date", "2017\") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date),", "not isinstance(date, DateValueCalculated): raise TypeError(str(type(date))) return (\"calculated\", date.date) def visitEstimated(self,", "date) def visitSimple(self, date): if not isinstance(date, DateValueSimple): raise TypeError(str(type(date)))", "TypeError(str(type(date))) return (\"about\", date.date) def visitCalculated(self, date): if not isinstance(date,", "date.DateValue class.\"\"\" visitor = TestDateVisitor() date1 = GregorianDate(2017, \"JAN\", 1)", "date1)) value = DateValueEstimated(date1).accept(visitor) self.assertEqual(value, (\"estimated\", date1)) value = DateValueInterpreted(date1,", "FrenchDate): raise TypeError(str(type(date))) return (\"french\", date) def visitSimple(self, date): if", "DateValueAbout(date1).accept(visitor) self.assertEqual(value, (\"about\", date1)) value = DateValueCalculated(date1).accept(visitor) self.assertEqual(value, (\"calculated\", date1))", "day of month, but before next month self.assertTrue(GregorianDate(2017, \"JAN\") >", "2000\")) self.assertNotEqual(DateValue.parse(\"1 JAN 2000\"), DateValue.parse(\"BET 1 JAN 2000 AND 1", "CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor ) from ged4py.date import", "\"VEND\", 1) value = date.accept(visitor) self.assertEqual(value, (\"french\", date)) date =", "AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"FROM 1 JAN 2000 TO 1", "method.\"\"\" visitor = TestDateVisitor() date = GregorianDate(2017, \"OCT\", 9) value", "raise TypeError(str(type(date))) return (\"after\", date.date) def visitAbout(self, date): if not", "31 DEC 2000 AND 1 JAN 2001\") dv2 = DateValue.parse(\"BET", "for appr, fmt, klass, typeEnum in approx: for datestr, value", "python # -*- coding: utf-8 -*- \"\"\"Tests for `ged4py.date` module.\"\"\"", "GregorianDate(1699, \"FEB\", 1, dual_year=1700) self.assertEqual(date.key(), (2342003.5, 0)) date = FrenchDate(2017,", "TO 2000\") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920)) self.assertEqual(date.date2, GregorianDate(2000))", "OCT 2017\") date = GregorianDate(2017, \"OCT\", bc=True) self.assertEqual(str(date), \"OCT 2017", "2000\") < DateValue.parse(\"FROM 1920 TO 1999\")) # comparing simple date", "= DateValue.parse(\"@#DGREGORIAN@ 1 JAN 2017\") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date,", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\"Tests for `ged4py.date`", "\"BETWEEN @#DJULIAN@ 1600 AND 2000\") date = DateValue.parse(\"bet mar 1920", "JAN 2017\") date = DateValue.parse(\"BET @#DJULIAN@ 1600 AND 2000\") self.assertIsInstance(date,", "+ \" \" + datestr) self.assertEqual(date.date, value) def test_015_date_parse_phrase(self): \"\"\"Test", "1 APR 2000\") def test_013_date_parse_range(self): \"\"\"Test date.DateValue class.\"\"\" date =", "\"empty\" date is always later than any regular date self.assertTrue(DateValue.parse(\"\")", "AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"BET 1", "value = date.accept(visitor) self.assertEqual(value, (\"julian\", date)) def test_007_cal_date_hash(self): \"\"\"Test date.CalendarDate", "CalendarDate.parse(\"@#DFRENCH R@ 15 GERM 0001\") self.assertIsInstance(date, FrenchDate) self.assertEqual(date.year, 1) self.assertFalse(date.bc)", "DEC 2000 AND 1 JAN 2001\") dv2 = DateValue.parse(\"BET 31", "and Hebrew dates self.assertTrue(GregorianDate(2020, \"JAN\", 1) == HebrewDate(5780, \"SVN\", 4))", "2017 B.C.\") date = GregorianDate(1699, \"JAN\", 1, dual_year=1700) self.assertEqual(str(date), \"1", "visitBefore(self, date): if not isinstance(date, DateValueBefore): raise TypeError(str(type(date))) return (\"before\",", "self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = HebrewDate(5000) self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"5000\")", "3) self.assertEqual(date.day, 10) self.assertEqual(date.original, \"@#DGREGORIAN@ 10 MAR 1698/99\") self.assertEqual(date.calendar, CalendarType.GREGORIAN)", "class.\"\"\" date = DateValue.parse(\"not a date\") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE)", "self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"BET 1 JAN 1999 AND 1", "in approx: for datestr, value in dates.items(): date = DateValue.parse(appr", "self.assertEqual(date.month_num, 5) self.assertEqual(date.day, 31) self.assertEqual(date.original, \"31 MAY 2020\") self.assertEqual(date.calendar, CalendarType.GREGORIAN)", "1) date2 = GregorianDate(2017, \"DEC\", 31) value = DateValueSimple(date1).accept(visitor) self.assertEqual(value,", "DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor ) class", "JAN 2017\") def test_017_date_cmp(self): \"\"\"Test date.Date class.\"\"\" dv = DateValue.parse(\"2016\")", "date = FrenchDate(2017, \"VENT\", bc=True) self.assertEqual(date.key(), (1638959.5, 1)) date =", "self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date), \"1967 B.C.\")", "raise TypeError(str(type(date))) return (\"range\", date.date1, date.date2) def visitBefore(self, date): if", "date1)) value = DateValueCalculated(date1).accept(visitor) self.assertEqual(value, (\"calculated\", date1)) value = DateValueEstimated(date1).accept(visitor)", "self.assertEqual(date.month, \"FRUC\") self.assertEqual(date.month_num, 12) self.assertEqual(date.day, 1) self.assertEqual(date.calendar, CalendarType.FRENCH_R) date =", "\"JAN\", 1)) # dual date self.assertTrue(GregorianDate(1700, \"JAN\", 1) == GregorianDate(1699,", "self.assertEqual(date.calendar, CalendarType.FRENCH_R) date = CalendarDate.parse(\"@#DHEBREW@ 7 NSN 5000\") self.assertIsInstance(date, HebrewDate)", "DateValueTo) self.assertEqual(date.kind, DateValueTypes.TO) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"TO 1", "GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor ) from ged4py.date import ( DateValue,", "MAY 2020\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 2020) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.month, \"MAY\")", "21) == FrenchDate(228, \"COMP\", 5)) # compare Gregorian and Hebrew", "\"JAN\", 2)) self.assertTrue(GregorianDate(2017, \"JAN\", 2) > GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017,", "DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN 2001\") dv2 =", "\"APR\", 1)) self.assertEqual(str(date), \"FROM MAR 1920 TO 1 APR 2000\")", "1698/99\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"10 MAR 1699/00\") self.assertIsInstance(date, GregorianDate)", "date1)) value = DateValueTo(date1).accept(visitor) self.assertEqual(value, (\"to\", date1)) value = DateValuePeriod(date1,", "method.\"\"\" date = CalendarDate.parse(\"31 MAY 2020\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 2020)", "5)) self.assertTrue(JulianDate(1582, \"OCT\", 6) > GregorianDate(1582, \"OCT\", 15)) self.assertTrue(GregorianDate(2000, \"JAN\",", "date2)) value = DateValueBefore(date1).accept(visitor) self.assertEqual(value, (\"before\", date1)) value = DateValueAfter(date1).accept(visitor)", "self.assertEqual(str(date), \"1 JAN 2017\") def test_017_date_cmp(self): \"\"\"Test date.Date class.\"\"\" dv", "(some phrase)\") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1))", "simple date with range self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"BET 1", "tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2000, \"DEC\", 31))) dv =", "self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1698) self.assertEqual(date.dual_year, 1699) self.assertFalse(date.bc) self.assertEqual(date.month, \"MAR\") self.assertEqual(date.month_num,", "\"JAN\", 2)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) <= GregorianDate(2017, \"JAN\", 2)) self.assertTrue(GregorianDate(2017,", "< GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) < GregorianDate(2017, \"FEB\",", "2000\") > DateValue.parse(\"TO 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") <", "15 GERM 0001\") self.assertEqual(date.calendar, CalendarType.FRENCH_R) date = CalendarDate.parse(\"@#DHEBREW@ 7 NSN", "self.assertEqual(str(date), \"9 OCT 2017\") date = GregorianDate(2017, \"OCT\", bc=True) self.assertEqual(str(date),", "DateValueTypes.RANGE) self.assertEqual(date.date1, JulianDate(1600)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), \"BETWEEN @#DJULIAN@ 1600 AND", "= DateValue.parse(\"2016\") dv2 = DateValue.parse(\"2016\") self.assertEqual(hash(dv1), hash(dv2)) dv1 = DateValue.parse(\"31", "(\"from\", date1)) value = DateValueTo(date1).accept(visitor) self.assertEqual(value, (\"to\", date1)) value =", "GregorianDate(1699, \"FEB\", dual_year=1700) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1699/00\")", "\"JAN\", 1) <= GregorianDate(2017, \"JAN\", 2)) self.assertTrue(GregorianDate(2017, \"JAN\", 2) >", "date = JulianDate(1582, \"OCT\", 5) self.assertEqual(str(date), \"@#DJULIAN@ 5 OCT 1582\")", "if not isinstance(date, DateValueTo): raise TypeError(str(type(date))) return (\"to\", date.date) def", "DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN 2000\") self.assertIsInstance(dv.key(), tuple)", "1 JAN 2002\") > DateValue.parse(\"BET 1 JAN 2000 AND 1", "\"JAN\", 2) > GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 2) >=", "module.\"\"\" import unittest from ged4py.calendar import ( CalendarType, CalendarDate, FrenchDate,", "B.C.\") self.assertEqual(date.calendar, CalendarType.JULIAN) date = CalendarDate.parse(\"@#DFRENCH R@ 15 GERM 0001\")", "\"JAN\", 1, dual_year=1700)) # compare Gregorian and Julian dates self.assertTrue(GregorianDate(1582,", "not isinstance(date, DateValueRange): raise TypeError(str(type(date))) return (\"range\", date.date1, date.date2) def", "1)), hash(FrenchDate(1, \"VEND\", 1))) self.assertEqual(hash(FrenchDate(1)), hash(FrenchDate(1))) def test_010_date_no_date(self): \"\"\"Test date.DateValue", "DateValueTypes.RANGE) self.assertEqual(date.date1, GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2, GregorianDate(2000, \"APR\")) self.assertEqual(str(date), \"BETWEEN MAR", "JAN 2000\")) # comparing ranges self.assertEqual(DateValue.parse(\"FROM 1 JAN 2000 TO", "\"JAN 2017\": GregorianDate(2017, \"JAN\"), \"31 JAN 2017\": GregorianDate(2017, \"JAN\", 31)}", "up dv = DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN", "self.assertEqual(hash(FrenchDate(1, \"VEND\", 1)), hash(FrenchDate(1, \"VEND\", 1))) self.assertEqual(hash(FrenchDate(1)), hash(FrenchDate(1))) def test_010_date_no_date(self):", "DateValue.parse(\"2000\")) # \"empty\" date is always later than any regular", "2000\") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, JulianDate(1600)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date),", "NSN 5000\") self.assertEqual(date.calendar, CalendarType.HEBREW) # cannot handle ROMAN with self.assertRaises(ValueError):", "regular date self.assertTrue(DateValue.parse(\"\") > DateValue.parse(\"2000\")) def test_018_date_parse_empty(self): \"\"\"Test date.DateValue class.\"\"\"", "last day of month, but before next month self.assertTrue(GregorianDate(2017, \"JAN\")", "phrase\") self.assertEqual(str(date), \"INTERPRETED 1 JAN 2017 (some phrase)\") def test_016_date_parse_simple(self):", "\"JAN\", 2)) # missing day compares as \"past\" the last", "date = DateValue.parse(\"AFT 1 JAN 2017\") self.assertIsInstance(date, DateValueAfter) self.assertEqual(date.kind, DateValueTypes.AFTER)", "7) self.assertEqual(date.original, \"@#DHEBREW@ 7 NSN 5000\") self.assertEqual(date.calendar, CalendarType.HEBREW) # cannot", "\"FRUC\") self.assertEqual(date.month_num, 12) self.assertEqual(date.day, 1) self.assertEqual(date.calendar, CalendarType.FRENCH_R) date = JulianDate(5,", "DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"1 JAN", "self.assertIsInstance(date, klass) self.assertEqual(date.kind, typeEnum) self.assertEqual(str(date), fmt + \" \" +", "self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"AFTER 1 JAN 2017\") date", "visitHebrew(self, date): if not isinstance(date, HebrewDate): raise TypeError(str(type(date))) return (\"hebrew\",", "DateValueEstimated(date1).accept(visitor) self.assertEqual(value, (\"estimated\", date1)) value = DateValueInterpreted(date1, \"phrase\").accept(visitor) self.assertEqual(value, (\"interpreted\",", "self.assertEqual(value, (\"period\", date1, date2)) value = DateValueBefore(date1).accept(visitor) self.assertEqual(value, (\"before\", date1))", "# order of dates is messed up dv = DateValue.parse(\"BET", "self.assertEqual(date.month, \"JAN\") self.assertEqual(date.month_num, 1) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.JULIAN) def test_002_cal_date_key(self): \"\"\"Test", "\"JAN\", 14) == JulianDate(2000, \"JAN\", 1)) # compare Gregorian and", "(\"phrase\", \"phrase\")) def test_020_date_hash(self): \"\"\"Test date.Date hash\"\"\" dv1 = DateValue.parse(\"2016\")", "self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"TO 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN", "self.assertTrue(GregorianDate(2017, \"JAN\", 1) <= GregorianDate(2017, \"JAN\", 2)) self.assertTrue(GregorianDate(2017, \"JAN\", 2)", "GregorianDate(2000, \"DEC\", 31))) dv = DateValue.parse(\"BET 31 DEC 2000 AND", "self.assertTrue(GregorianDate(2017, \"JAN\", 2) > GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 2)", "DateValue.parse(\"2016\") dv2 = DateValue.parse(\"2016\") self.assertEqual(hash(dv1), hash(dv2)) dv1 = DateValue.parse(\"31 DEC", "date = CalendarDate.parse(\"10 MAR 1699/00\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year,", "return (\"interpreted\", date.date, date.phrase) def visitPhrase(self, date): if not isinstance(date,", "date.DateValue class.\"\"\" date = DateValue.parse(\"not a date\") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind,", "self.assertEqual(str(date), fmt + \" \" + datestr) self.assertEqual(date.date, value) def", "\"JAN\") > GregorianDate(2017, \"JAN\", 31)) self.assertTrue(GregorianDate(2017, \"JAN\") < GregorianDate(2017, \"FEB\",", "compare Gregorian and Julian dates self.assertTrue(GregorianDate(1582, \"OCT\", 15) == JulianDate(1582,", "TypeError(str(type(date))) return (\"period\", date.date1, date.date2) def visitFrom(self, date): if not", "1) self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1\") self.assertEqual(date.month, \"FRUC\") self.assertEqual(date.month_num, 12)", "date.date) def visitInterpreted(self, date): if not isinstance(date, DateValueInterpreted): raise TypeError(str(type(date)))", "1698/99\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1698) self.assertEqual(date.dual_year, 1699) self.assertFalse(date.bc) self.assertEqual(date.month, \"MAR\")", ") class TestDateVisitor(CalendarDateVisitor, DateValueVisitor): def visitGregorian(self, date): if not isinstance(date,", "2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"BEF", "GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) != GregorianDate(2017, \"JAN\", 2))", "self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"@#DJULIAN@ 100 B.C.\") self.assertIsInstance(date, JulianDate) self.assertEqual(date.year,", "TypeError(str(type(date))) return (\"estimated\", date.date) def visitInterpreted(self, date): if not isinstance(date,", "def visitSimple(self, date): if not isinstance(date, DateValueSimple): raise TypeError(str(type(date))) return", "not isinstance(date, DateValueBefore): raise TypeError(str(type(date))) return (\"before\", date.date) def visitAfter(self,", "test_014_date_parse_approx(self): \"\"\"Test date.DateValue class.\"\"\" dates = {\"500 B.C.\": GregorianDate(500, bc=True),", "\"5000\") self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.HEBREW) date = FrenchDate(1, \"FRUC\",", "7) self.assertEqual(date.day, 15) self.assertEqual(date.original, \"@#DFRENCH R@ 15 GERM 0001\") self.assertEqual(date.calendar,", "1 JAN 2017 (some phrase)\") def test_016_date_parse_simple(self): \"\"\"Test date.DateValue class.\"\"\"", "date = FrenchDate(1, \"VEND\", 1) self.assertEqual(str(date), \"@#DFRENCH R@ 1 VEND", "self.assertEqual(dv.key(), (GregorianDate(2016), GregorianDate(2016))) dv = DateValue.parse(\"31 DEC 2000\") self.assertIsInstance(dv.key(), tuple)", "1 JAN 2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\")", "= DateValue.parse(\"2016\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2016), GregorianDate(2016))) dv = DateValue.parse(\"31", "= GregorianDate(2017, \"DEC\", 31) value = DateValueSimple(date1).accept(visitor) self.assertEqual(value, (\"simple\", date1))", "date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.year_str,", "date compares later than more specific self.assertTrue(DateValue.parse(\"2000\") > DateValue.parse(\"31 DEC", "isinstance(date, DateValueAbout): raise TypeError(str(type(date))) return (\"about\", date.date) def visitCalculated(self, date):", "self.assertEqual(date.date, GregorianDate(1967)) self.assertEqual(str(date), \"FROM 1967\") date = DateValue.parse(\"TO 1 JAN", "self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num, 10) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = GregorianDate(1699,", "< DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"1", "DateValueTypes.TO) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"TO 1 JAN 2017\")", "not isinstance(date, DateValueAbout): raise TypeError(str(type(date))) return (\"about\", date.date) def visitCalculated(self,", "\"not a date\") self.assertEqual(str(date), \"(not a date)\") def test_012_date_parse_period(self): \"\"\"Test", "visitCalculated(self, date): if not isinstance(date, DateValueCalculated): raise TypeError(str(type(date))) return (\"calculated\",", "date = CalendarDate.parse(\"start of time\") def test_006_cal_date_visitor(self): \"\"\"Test date.CalendarDate.accept method.\"\"\"", "= DateValueInterpreted(date1, \"phrase\").accept(visitor) self.assertEqual(value, (\"interpreted\", date1, \"phrase\")) value = DateValuePhrase(\"phrase\").accept(visitor)", "# compare Gregorian and French dates self.assertTrue(GregorianDate(1792, \"SEP\", 22) ==", "a date\") self.assertEqual(str(date), \"(not a date)\") def test_012_date_parse_period(self): \"\"\"Test date.DateValue", "self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1699/00\") self.assertEqual(date.month, \"FEB\") self.assertEqual(date.month_num, 2) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN)", "TO 1 JAN 2002\") > DateValue.parse(\"BET 1 JAN 2000 AND", "\"VEND\", 2) > GregorianDate(1792, \"SEP\", 22)) self.assertTrue(GregorianDate(2020, \"SEP\", 21) ==", "2020\") # cannot handle UNKNOWN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DUNKNOWN@", "self.assertEqual(date.year_str, \"5 B.C.\") self.assertEqual(date.month, \"JAN\") self.assertEqual(date.month_num, 1) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.JULIAN)", "GregorianDate) self.assertEqual(date.year, 1698) self.assertEqual(date.dual_year, 1699) self.assertFalse(date.bc) self.assertEqual(date.month, \"MAR\") self.assertEqual(date.month_num, 3)", "1, dual_year=1700) self.assertEqual(str(date), \"1 JAN 1699/00\") date = HebrewDate(5000) self.assertEqual(str(date),", "self.assertEqual(date.phrase, \"some phrase\") date = DateValue.parse(\"INT 1967 B.C. (some phrase)\")", "test_019_date_value_visitor(self): \"\"\"Test date.DateValue class.\"\"\" visitor = TestDateVisitor() date1 = GregorianDate(2017,", "1) self.assertFalse(date.bc) self.assertEqual(date.month, \"GERM\") self.assertEqual(date.month_num, 7) self.assertEqual(date.day, 15) self.assertEqual(date.original, \"@#DFRENCH", "self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9, bc=True)), hash(GregorianDate(2017, \"OCT\", 9, bc=True))) self.assertEqual(hash(FrenchDate(1, \"VEND\",", "\"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"FROM 1967\") self.assertIsInstance(date, DateValueFrom) self.assertEqual(date.kind,", "date): if not isinstance(date, DateValueSimple): raise TypeError(str(type(date))) return (\"simple\", date.date)", "value = DateValueSimple(date1).accept(visitor) self.assertEqual(value, (\"simple\", date1)) value = DateValueFrom(date1).accept(visitor) self.assertEqual(value,", "GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2, GregorianDate(2000, \"APR\", 1)) self.assertEqual(str(date), \"FROM MAR 1920", "= DateValue.parse(\"31 DEC 2000\") self.assertEqual(hash(dv1), hash(dv2)) dv1 = DateValue.parse(\"BET 31", "= DateValue.parse(\"from mar 1920 to 1 apr 2000\") self.assertIsInstance(date, DateValuePeriod)", "DateValueBefore) self.assertEqual(date.kind, DateValueTypes.BEFORE) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date), \"BEFORE 1967 B.C.\")", "self.assertEqual(hash(dv1), hash(dv2)) dv1 = DateValue.parse(\"BET 31 DEC 2000 AND 1", "visitJulian(self, date): if not isinstance(date, JulianDate): raise TypeError(str(type(date))) return (\"julian\",", "DateValueAfter): raise TypeError(str(type(date))) return (\"after\", date.date) def visitAbout(self, date): if", "\"DEC\", 31))) dv = DateValue.parse(\"BET 31 DEC 2000 AND 1", "DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase,", "self.assertTrue(GregorianDate(2020, \"JAN\", 1) == HebrewDate(5780, \"SVN\", 4)) def test_004_cal_date_str(self): \"\"\"Test", "CalendarType.GREGORIAN) date = GregorianDate(2017, \"OCT\", bc=True) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertTrue(date.bc)", "test_010_date_no_date(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"not a date\") self.assertIsInstance(date,", "< GregorianDate(2018, \"JAN\", 1)) # dual date self.assertTrue(GregorianDate(1700, \"JAN\", 1)", "\"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) == GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017,", "self.assertEqual(date.phrase, \"not a date\") self.assertEqual(str(date), \"(not a date)\") def test_012_date_parse_period(self):", "GregorianDate(1582, \"OCT\", 15)) self.assertTrue(GregorianDate(2000, \"JAN\", 14) == JulianDate(2000, \"JAN\", 1))", "self.assertTrue(GregorianDate(2017, \"JAN\", 1) < GregorianDate(2017, \"FEB\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1)", "1) <= GregorianDate(2017, \"JAN\", 2)) self.assertTrue(GregorianDate(2017, \"JAN\", 2) > GregorianDate(2017,", "(1084542.5, 0)) date = JulianDate(1000) self.assertEqual(date.key(), (2086672.5, 1)) def test_003_cal_date_cmp(self):", "2001\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2001, \"JAN\", 1)))", "DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple,", "DateValueRange): raise TypeError(str(type(date))) return (\"range\", date.date1, date.date2) def visitBefore(self, date):", "GregorianDate(2017, \"JAN\", 2)) self.assertTrue(GregorianDate(2017, \"JAN\", 2) > GregorianDate(2017, \"JAN\", 1))", "2000\") def test_014_date_parse_approx(self): \"\"\"Test date.DateValue class.\"\"\" dates = {\"500 B.C.\":", "= TestDateVisitor() date = GregorianDate(2017, \"OCT\", 9) value = date.accept(visitor)", "CalendarDate.parse(\"@#DGREGORIAN@ 10 MAR 1698/99\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1698) self.assertEqual(date.dual_year, 1699)", "self.assertEqual(str(date), \"INTERPRETED 1967 B.C. (some phrase)\") date = DateValue.parse(\"INT @#DGREGORIAN@", "self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2, GregorianDate(2000, \"APR\", 1)) self.assertEqual(str(date),", "\"FEB\", dual_year=1700) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1699/00\") self.assertEqual(date.month,", "(2458035.5, 0)) date = GregorianDate(1699, \"FEB\", 1, dual_year=1700) self.assertEqual(date.key(), (2342003.5,", "JAN 2001\"), DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN 2001\"))", "= DateValueRange(date1, date2).accept(visitor) self.assertEqual(value, (\"range\", date1, date2)) value = DateValueAbout(date1).accept(visitor)", "DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor ) class TestDateVisitor(CalendarDateVisitor, DateValueVisitor): def visitGregorian(self,", "(\"estimated\", date1)) value = DateValueInterpreted(date1, \"phrase\").accept(visitor) self.assertEqual(value, (\"interpreted\", date1, \"phrase\"))", "isinstance(date, DateValueEstimated): raise TypeError(str(type(date))) return (\"estimated\", date.date) def visitInterpreted(self, date):", "date.DateValue class.\"\"\" date = DateValue.parse(\"BEF 1967B.C.\") self.assertIsInstance(date, DateValueBefore) self.assertEqual(date.kind, DateValueTypes.BEFORE)", "return (\"gregorian\", date) def visitJulian(self, date): if not isinstance(date, JulianDate):", "2000 AND 1 JAN 2001\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\",", "GregorianDate(2000, \"APR\")) self.assertEqual(str(date), \"BETWEEN MAR 1920 AND APR 2000\") def", "JulianDate(1582, \"OCT\", 5)) self.assertTrue(JulianDate(1582, \"OCT\", 6) > GregorianDate(1582, \"OCT\", 15))", "date = CalendarDate.parse(\"@#DFRENCH R@ 15 GERM 0001\") self.assertIsInstance(date, FrenchDate) self.assertEqual(date.year,", "CalendarType.GREGORIAN) date = CalendarDate.parse(\"@#DGREGORIAN@ 10 MAR 1698/99\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year,", "date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year)", "self.assertTrue(GregorianDate(2017, \"JAN\", 1) < GregorianDate(2017, \"JAN\", 2)) self.assertTrue(GregorianDate(2017, \"JAN\", 1)", "DateValue.parse(\"(some phrase)\") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"some phrase\") date", "9, bc=True))) self.assertEqual(hash(FrenchDate(1, \"VEND\", 1)), hash(FrenchDate(1, \"VEND\", 1))) self.assertEqual(hash(FrenchDate(1)), hash(FrenchDate(1)))", "DateValueTypes.FROM) self.assertEqual(date.date, GregorianDate(1967)) self.assertEqual(str(date), \"FROM 1967\") date = DateValue.parse(\"TO 1", "10) self.assertEqual(date.day, 9) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = GregorianDate(2017, \"OCT\", bc=True)", "self.assertIsInstance(date, DateValueAfter) self.assertEqual(date.kind, DateValueTypes.AFTER) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"AFTER", "self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9)), hash(GregorianDate(2017, \"OCT\", 9))) self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9, bc=True)),", "1)) self.assertEqual(str(date), \"TO 1 JAN 2017\") date = DateValue.parse(\"FROM 1920", "(\"simple\", date1)) value = DateValueFrom(date1).accept(visitor) self.assertEqual(value, (\"from\", date1)) value =", "hash(dv2)) dv1 = DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN", "date = GregorianDate(2017, \"OCT\", bc=True) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertTrue(date.bc) self.assertEqual(date.year_str,", "raise TypeError(str(type(date))) return (\"simple\", date.date) def visitPeriod(self, date): if not", "1))) # order of dates is messed up dv =", "# cannot handle ROMAN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DROMAN@ 2020\")", "self.assertEqual(date.calendar, CalendarType.HEBREW) date = FrenchDate(1, \"FRUC\", 1) self.assertEqual(date.year, 1) self.assertFalse(date.bc)", "CalendarType.GREGORIAN) date = HebrewDate(5000) self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"5000\") self.assertIsNone(date.month)", "> DateValue.parse(\"2000\")) def test_018_date_parse_empty(self): \"\"\"Test date.DateValue class.\"\"\" for value in", "TypeError(str(type(date))) return (\"interpreted\", date.date, date.phrase) def visitPhrase(self, date): if not", "self.assertTrue(GregorianDate(1582, \"OCT\", 15) == JulianDate(1582, \"OCT\", 5)) self.assertTrue(GregorianDate(1582, \"OCT\", 16)", "phrase\") date = DateValue.parse(\"INT 1967 B.C. (some phrase)\") self.assertIsInstance(date, DateValueInterpreted)", "visitSimple(self, date): if not isinstance(date, DateValueSimple): raise TypeError(str(type(date))) return (\"simple\",", "dates = {\"500 B.C.\": GregorianDate(500, bc=True), \"JAN 2017\": GregorianDate(2017, \"JAN\"),", "self.assertTrue(GregorianDate(2000, \"JAN\", 14) == JulianDate(2000, \"JAN\", 1)) # compare Gregorian", "FrenchDate) self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.month, \"GERM\") self.assertEqual(date.month_num, 7) self.assertEqual(date.day, 15)", "handle ROMAN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DROMAN@ 2020\") # cannot", "HebrewDate, JulianDate, CalendarDateVisitor ) from ged4py.date import ( DateValue, DateValueAbout,", "year, but before next year self.assertTrue(GregorianDate(2017) > GregorianDate(2017, \"DEC\", 31))", "TO 2000\") date = DateValue.parse(\"from mar 1920 to 1 apr", "2020\") # dual year only works for GREGORIAN with self.assertRaises(ValueError):", "date = GregorianDate(2017, \"OCT\", bc=True) self.assertEqual(str(date), \"OCT 2017 B.C.\") date", "date1, date2)) value = DateValueBefore(date1).accept(visitor) self.assertEqual(value, (\"before\", date1)) value =", "1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"FROM 1 JAN", "\"1 JAN 2017\") def test_017_date_cmp(self): \"\"\"Test date.Date class.\"\"\" dv =", "self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(date.phrase, \"some", "1 JAN 2017\") date = DateValue.parse(\"BET @#DJULIAN@ 1600 AND 2000\")", "= DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN 2000\") self.assertIsInstance(dv.key(),", "date.CalendarDate class.\"\"\" self.assertTrue(GregorianDate(2016, \"JAN\", 1) < GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017,", "self.assertEqual(date.month_num, 8) self.assertEqual(date.day, 7) self.assertEqual(date.original, \"@#DHEBREW@ 7 NSN 5000\") self.assertEqual(date.calendar,", "def visitTo(self, date): if not isinstance(date, DateValueTo): raise TypeError(str(type(date))) return", "any regular date self.assertTrue(DateValue.parse(\"\") > DateValue.parse(\"2000\")) def test_018_date_parse_empty(self): \"\"\"Test date.DateValue", "date2).accept(visitor) self.assertEqual(value, (\"period\", date1, date2)) value = DateValueBefore(date1).accept(visitor) self.assertEqual(value, (\"before\",", "self.assertEqual(value, (\"estimated\", date1)) value = DateValueInterpreted(date1, \"phrase\").accept(visitor) self.assertEqual(value, (\"interpreted\", date1,", "GregorianDate(2017, \"JAN\", 31)} approx = [ (\"ABT\", \"ABOUT\", DateValueAbout, DateValueTypes.ABOUT),", "raise TypeError(str(type(date))) return (\"from\", date.date) def visitTo(self, date): if not", "= GregorianDate(2017, \"OCT\", 9) value = date.accept(visitor) self.assertEqual(value, (\"gregorian\", date))", "date is always later than any regular date self.assertTrue(DateValue.parse(\"\") >", "date) def visitHebrew(self, date): if not isinstance(date, HebrewDate): raise TypeError(str(type(date)))", "# phrase is always later than any regular date self.assertTrue(DateValue.parse(\"(Could", "2017\") def test_017_date_cmp(self): \"\"\"Test date.Date class.\"\"\" dv = DateValue.parse(\"2016\") self.assertIsInstance(dv.key(),", "self.assertEqual(date.year_str, \"5000\") self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.HEBREW) date = FrenchDate(1,", "self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"some phrase\") date = DateValue.parse(\"INT", "self.assertEqual(date.original, \"10 MAR 1699/00\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"@#DJULIAN@ 100", "month, but before next month self.assertTrue(GregorianDate(2017, \"JAN\") > GregorianDate(2017, \"JAN\",", "bc=True)) self.assertEqual(date.phrase, \"some phrase\") self.assertEqual(str(date), \"INTERPRETED 1967 B.C. (some phrase)\")", "return (\"french\", date) def visitSimple(self, date): if not isinstance(date, DateValueSimple):", "JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"FROM 1 JAN 2000\"))", "DateValueTypes.CALCULATED), (\"EST\", \"ESTIMATED\", DateValueEstimated, DateValueTypes.ESTIMATED) ] for appr, fmt, klass,", "< DateValue.parse(\"FROM 1 JAN 2000\")) # comparing ranges self.assertEqual(DateValue.parse(\"FROM 1", "2000\")) # phrase is always later than any regular date", "GregorianDate(2018, \"JAN\", 1)) # dual date self.assertTrue(GregorianDate(1700, \"JAN\", 1) ==", "] for appr, fmt, klass, typeEnum in approx: for datestr,", "9) self.assertEqual(date.key(), (2458035.5, 0)) date = GregorianDate(1699, \"FEB\", 1, dual_year=1700)", "0)) date = JulianDate(1000) self.assertEqual(date.key(), (2086672.5, 1)) def test_003_cal_date_cmp(self): \"\"\"Test", "< DateValue.parse(\"AFT 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"FROM", "\"@#DGREGORIAN@ 10 MAR 1698/99\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"10 MAR", "date1)) value = DateValueRange(date1, date2).accept(visitor) self.assertEqual(value, (\"range\", date1, date2)) value", "\"\"\"Test date.CalendarDate class.\"\"\" self.assertTrue(GregorianDate(2016, \"JAN\", 1) < GregorianDate(2017, \"JAN\", 1))", "= date.accept(visitor) self.assertEqual(value, (\"french\", date)) date = JulianDate(1582, \"OCT\", 5)", "date): if not isinstance(date, HebrewDate): raise TypeError(str(type(date))) return (\"hebrew\", date)", "DateValueAfter) self.assertEqual(date.kind, DateValueTypes.AFTER) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"AFTER 1", "CalendarType.GREGORIAN) date = CalendarDate.parse(\"@#DJULIAN@ 100 B.C.\") self.assertIsInstance(date, JulianDate) self.assertEqual(date.year, 100)", "FrenchDate(1, \"VEND\", 1) value = date.accept(visitor) self.assertEqual(value, (\"french\", date)) date", "DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN 2001\") self.assertEqual(hash(dv1), hash(dv2))", "2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"TO 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1", "B.C.\") self.assertEqual(date.month, \"JAN\") self.assertEqual(date.month_num, 1) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.JULIAN) def test_002_cal_date_key(self):", "= DateValue.parse(\"not a date\") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"not", "isinstance(date, DateValueRange): raise TypeError(str(type(date))) return (\"range\", date.date1, date.date2) def visitBefore(self,", "self.assertTrue(DateValue.parse(\"FROM 1 JAN 2000 TO 1 JAN 2002\") > DateValue.parse(\"BET", "\"2017\") self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num, 10) self.assertEqual(date.day, 9) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date", "self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), \"BETWEEN @#DJULIAN@ 1600 AND 2000\") date =", "FrenchDate(228, \"COMP\", 5)) # compare Gregorian and Hebrew dates self.assertTrue(GregorianDate(2020,", "\"TSH\", 22) self.assertEqual(date.key(), (1084542.5, 0)) date = JulianDate(1000) self.assertEqual(date.key(), (2086672.5,", "for GREGORIAN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DJULIAN@ 2020/21\") # cannot", "9) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = GregorianDate(2017, \"OCT\", bc=True) self.assertEqual(date.year, 2017)", "self.assertEqual(date.day, 10) self.assertEqual(date.original, \"@#DGREGORIAN@ 10 MAR 1698/99\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date", "visitor = TestDateVisitor() date = GregorianDate(2017, \"OCT\", 9) value =", "DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"1 JAN", "dates.items(): date = DateValue.parse(appr + \" \" + datestr) self.assertIsInstance(date,", "hash(dv2)) dv1 = DateValue.parse(\"31 DEC 2000\") dv2 = DateValue.parse(\"31 DEC", "GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 2) >= GregorianDate(2017, \"JAN\", 1))", "JAN 2000\") < DateValue.parse(\"FROM 1 JAN 2000\")) # comparing ranges", "dual_year=1700) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1699/00\") self.assertEqual(date.month, \"FEB\")", "date.date2) def visitFrom(self, date): if not isinstance(date, DateValueFrom): raise TypeError(str(type(date)))", "import unittest from ged4py.calendar import ( CalendarType, CalendarDate, FrenchDate, GregorianDate,", "not isinstance(date, DateValuePhrase): raise TypeError(str(type(date))) return (\"phrase\", date.phrase) class TestDetailDate(unittest.TestCase):", "raise TypeError(str(type(date))) return (\"interpreted\", date.date, date.phrase) def visitPhrase(self, date): if", "1998)\") > DateValue.parse(\"2000\")) # \"empty\" date is always later than", "GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) < GregorianDate(2017, \"FEB\", 1))", "cannot handle UNKNOWN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DUNKNOWN@ 2020\") #", "the last day of year, but before next year self.assertTrue(GregorianDate(2017)", "date = FrenchDate(1, \"FRUC\", 1) self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1\")", "date = FrenchDate(1, \"VEND\", 1) value = date.accept(visitor) self.assertEqual(value, (\"french\",", "JAN 1699/00\") date = HebrewDate(5000) self.assertEqual(str(date), \"@#DHEBREW@ 5000\") date =", "UNKNOWN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DUNKNOWN@ 2020\") # dual year", "def test_016_date_parse_simple(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"1967 B.C.\") self.assertIsInstance(date,", "1 JAN 2000\")) # comparing ranges self.assertEqual(DateValue.parse(\"FROM 1 JAN 2000", "date\") self.assertEqual(str(date), \"(not a date)\") def test_012_date_parse_period(self): \"\"\"Test date.DateValue class.\"\"\"", "FrenchDate(1, \"VEND\", 1) self.assertEqual(str(date), \"@#DFRENCH R@ 1 VEND 1\") date", "self.assertEqual(value, (\"calculated\", date1)) value = DateValueEstimated(date1).accept(visitor) self.assertEqual(value, (\"estimated\", date1)) value", "self.assertEqual(date.calendar, CalendarType.HEBREW) # cannot handle ROMAN with self.assertRaises(ValueError): date =", "for value in (None, \"\"): date = DateValue.parse(value) self.assertIsInstance(date, DateValuePhrase)", "(\"period\", date1, date2)) value = DateValueBefore(date1).accept(visitor) self.assertEqual(value, (\"before\", date1)) value", "\"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(str(date), \"9", "\"\") def test_019_date_value_visitor(self): \"\"\"Test date.DateValue class.\"\"\" visitor = TestDateVisitor() date1", "2001\")) self.assertTrue(DateValue.parse(\"FROM 1 JAN 1999 TO 1 JAN 2001\") <", "self.assertTrue(DateValue.parse(\"2016\") < DateValue.parse(\"2017\")) self.assertTrue(DateValue.parse(\"2 JAN 2016\") > DateValue.parse(\"1 JAN 2016\"))", "1)) self.assertTrue(FrenchDate(1, \"VEND\", 2) > GregorianDate(1792, \"SEP\", 22)) self.assertTrue(GregorianDate(2020, \"SEP\",", "\"CALCULATED\", DateValueCalculated, DateValueTypes.CALCULATED), (\"EST\", \"ESTIMATED\", DateValueEstimated, DateValueTypes.ESTIMATED) ] for appr,", "klass) self.assertEqual(date.kind, typeEnum) self.assertEqual(str(date), fmt + \" \" + datestr)", "self.assertTrue(GregorianDate(2016, \"JAN\", 1) < GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1)", "phrase)\") date = DateValue.parse(\"INT @#DGREGORIAN@ 1 JAN 2017 (some phrase)\")", "next month self.assertTrue(GregorianDate(2017, \"JAN\") > GregorianDate(2017, \"JAN\", 31)) self.assertTrue(GregorianDate(2017, \"JAN\")", "def visitEstimated(self, date): if not isinstance(date, DateValueEstimated): raise TypeError(str(type(date))) return", "GregorianDate(1699, \"JAN\", 1, dual_year=1700) self.assertEqual(str(date), \"1 JAN 1699/00\") date =", "of dates is messed up dv = DateValue.parse(\"BET 31 DEC", "phrase\") self.assertEqual(str(date), \"INTERPRETED 1967 B.C. (some phrase)\") date = DateValue.parse(\"INT", "TestDateVisitor(CalendarDateVisitor, DateValueVisitor): def visitGregorian(self, date): if not isinstance(date, GregorianDate): raise", "1920 TO 1999\")) # comparing simple date with range self.assertTrue(DateValue.parse(\"1", "DateValue.parse(\"AFT 1 JAN 2017\") self.assertIsInstance(date, DateValueAfter) self.assertEqual(date.kind, DateValueTypes.AFTER) self.assertEqual(date.date, GregorianDate(2017,", "self.assertEqual(date.day, 31) self.assertEqual(date.original, \"31 MAY 2020\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date =", "year only works for GREGORIAN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DJULIAN@", "visitGregorian(self, date): if not isinstance(date, GregorianDate): raise TypeError(str(type(date))) return (\"gregorian\",", "JulianDate(1582, \"OCT\", 5) value = date.accept(visitor) self.assertEqual(value, (\"julian\", date)) def", "date.Date class.\"\"\" dv = DateValue.parse(\"2016\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2016), GregorianDate(2016)))", "= GregorianDate(2017, \"JAN\", 1) date2 = GregorianDate(2017, \"DEC\", 31) value", "class.\"\"\" date = DateValue.parse(\"(some phrase)\") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase,", "dates self.assertTrue(GregorianDate(2020, \"JAN\", 1) == HebrewDate(5780, \"SVN\", 4)) def test_004_cal_date_str(self):", "TypeError(str(type(date))) return (\"julian\", date) def visitHebrew(self, date): if not isinstance(date,", "datestr, value in dates.items(): date = DateValue.parse(appr + \" \"", "2017\") self.assertIsInstance(date, DateValueTo) self.assertEqual(date.kind, DateValueTypes.TO) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date),", "self.assertTrue(date.bc) self.assertEqual(date.year_str, \"2017 B.C.\") self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num, 10) self.assertIsNone(date.day) self.assertEqual(date.calendar,", "= GregorianDate(1699, \"FEB\", dual_year=1700) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700) self.assertFalse(date.bc) self.assertEqual(date.year_str,", "(\"before\", date1)) value = DateValueAfter(date1).accept(visitor) self.assertEqual(value, (\"after\", date1)) value =", "CalendarDate.parse(\"@#DHEBREW@ 7 NSN 5000\") self.assertIsInstance(date, HebrewDate) self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.month,", "self.assertEqual(date.kind, DateValueTypes.TO) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"TO 1 JAN", "date2)) value = DateValueAbout(date1).accept(visitor) self.assertEqual(value, (\"about\", date1)) value = DateValueCalculated(date1).accept(visitor)", "DateValue.parse(\"FROM 1920 TO 2000\") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920))", "self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2, GregorianDate(2000, \"APR\",", "DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor ) class TestDateVisitor(CalendarDateVisitor, DateValueVisitor):", "date = DateValue.parse(\"not a date\") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase,", "unittest from ged4py.calendar import ( CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate,", "GregorianDate(2017, \"OCT\", 9) self.assertEqual(str(date), \"9 OCT 2017\") date = GregorianDate(2017,", "DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"not a date\") self.assertEqual(str(date), \"(not a", "= DateValue.parse(\"BEF 1967B.C.\") self.assertIsInstance(date, DateValueBefore) self.assertEqual(date.kind, DateValueTypes.BEFORE) self.assertEqual(date.date, GregorianDate(1967, bc=True))", "> DateValue.parse(\"TO 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"AFT", "15)) self.assertTrue(GregorianDate(2000, \"JAN\", 14) == JulianDate(2000, \"JAN\", 1)) # compare", "self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.HEBREW) date = FrenchDate(1, \"FRUC\", 1)", "\"\"\"Test date.CalendarDate.parse method.\"\"\" date = CalendarDate.parse(\"31 MAY 2020\") self.assertIsInstance(date, GregorianDate)", "not isinstance(date, DateValueTo): raise TypeError(str(type(date))) return (\"to\", date.date) def visitRange(self,", "DateValueRange(date1, date2).accept(visitor) self.assertEqual(value, (\"range\", date1, date2)) value = DateValueAbout(date1).accept(visitor) self.assertEqual(value,", "\"\"\"Tests for `ged4py.date` module.\"\"\" import unittest from ged4py.calendar import (", "not isinstance(date, DateValueEstimated): raise TypeError(str(type(date))) return (\"estimated\", date.date) def visitInterpreted(self,", "visitAfter(self, date): if not isinstance(date, DateValueAfter): raise TypeError(str(type(date))) return (\"after\",", "self.assertEqual(str(date), \"BETWEEN MAR 1920 AND APR 2000\") def test_014_date_parse_approx(self): \"\"\"Test", "value in (None, \"\"): date = DateValue.parse(value) self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind,", "self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1\") self.assertEqual(date.month, \"FRUC\") self.assertEqual(date.month_num, 12) self.assertEqual(date.day,", "\"1\") self.assertEqual(date.month, \"FRUC\") self.assertEqual(date.month_num, 12) self.assertEqual(date.day, 1) self.assertEqual(date.calendar, CalendarType.FRENCH_R) date", "date = DateValue.parse(\"(some phrase)\") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"some", "DateValue.parse(\"INT @#DGREGORIAN@ 1 JAN 2017 (some phrase)\") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind,", "(\"after\", date.date) def visitAbout(self, date): if not isinstance(date, DateValueAbout): raise", "Julian dates self.assertTrue(GregorianDate(1582, \"OCT\", 15) == JulianDate(1582, \"OCT\", 5)) self.assertTrue(GregorianDate(1582,", "later than any regular date self.assertTrue(DateValue.parse(\"\") > DateValue.parse(\"2000\")) def test_018_date_parse_empty(self):", "self.assertEqual(date.date1, JulianDate(1600)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), \"BETWEEN @#DJULIAN@ 1600 AND 2000\")", "\"@#DHEBREW@ 5000\") date = FrenchDate(1, \"VEND\", 1) self.assertEqual(str(date), \"@#DFRENCH R@", "self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DROMAN@ 2020\") # cannot handle UNKNOWN with", "\"SEP\", 21) == FrenchDate(228, \"COMP\", 5)) # compare Gregorian and", "dv1 = DateValue.parse(\"2016\") dv2 = DateValue.parse(\"2016\") self.assertEqual(hash(dv1), hash(dv2)) dv1 =", "self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2000, \"DEC\", 31))) dv = DateValue.parse(\"BET", "date1)) value = DateValueFrom(date1).accept(visitor) self.assertEqual(value, (\"from\", date1)) value = DateValueTo(date1).accept(visitor)", "visitPeriod(self, date): if not isinstance(date, DateValuePeriod): raise TypeError(str(type(date))) return (\"period\",", "31) self.assertEqual(date.original, \"31 MAY 2020\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"@#DGREGORIAN@", "self.assertEqual(date.original, \"@#DFRENCH R@ 15 GERM 0001\") self.assertEqual(date.calendar, CalendarType.FRENCH_R) date =", "date2).accept(visitor) self.assertEqual(value, (\"range\", date1, date2)) value = DateValueAbout(date1).accept(visitor) self.assertEqual(value, (\"about\",", "GregorianDate(2017, \"DEC\", 31)) self.assertTrue(GregorianDate(2017) < GregorianDate(2018, \"JAN\", 1)) # dual", "FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor ) from ged4py.date import (", "1 JAN 2017\") date = DateValue.parse(\"FROM 1920 TO 2000\") self.assertIsInstance(date,", "GregorianDate(1967)) self.assertEqual(str(date), \"FROM 1967\") date = DateValue.parse(\"TO 1 JAN 2017\")", "CalendarType.FRENCH_R) date = JulianDate(5, \"JAN\", bc=True) self.assertEqual(date.year, 5) self.assertTrue(date.bc) self.assertEqual(date.year_str,", "self.assertEqual(value, (\"julian\", date)) def test_007_cal_date_hash(self): \"\"\"Test date.CalendarDate hash.\"\"\" self.assertEqual(hash(GregorianDate(2017, \"OCT\",", "\"JAN\", 1)) self.assertEqual(str(date), \"1 JAN 2017\") def test_017_date_cmp(self): \"\"\"Test date.Date", "self.assertEqual(date.month, \"NSN\") self.assertEqual(date.month_num, 8) self.assertEqual(date.day, 7) self.assertEqual(date.original, \"@#DHEBREW@ 7 NSN", "\"DEC\", 31), GregorianDate(2000, \"JAN\", 1))) self.assertTrue(DateValue.parse(\"2016\") < DateValue.parse(\"2017\")) self.assertTrue(DateValue.parse(\"2 JAN", "\"@#DFRENCH R@ 1 VEND 1\") date = JulianDate(1582, \"OCT\", 5)", "1))) self.assertTrue(DateValue.parse(\"2016\") < DateValue.parse(\"2017\")) self.assertTrue(DateValue.parse(\"2 JAN 2016\") > DateValue.parse(\"1 JAN", "self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"2017\") self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num, 10)", "self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2001, \"JAN\", 1))) # order of", "TO 1 JAN 2001\") < DateValue.parse(\"BET 1 JAN 2000 AND", "date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(date.key(), (2458035.5, 0)) date =", "\"\"\"Test date.CalendarDate.accept method.\"\"\" visitor = TestDateVisitor() date = GregorianDate(2017, \"OCT\",", "self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.month, \"MAY\") self.assertEqual(date.month_num, 5) self.assertEqual(date.day, 31) self.assertEqual(date.original, \"31", "JAN 2000\")) self.assertNotEqual(DateValue.parse(\"1 JAN 2000\"), DateValue.parse(\"BET 1 JAN 2000 AND", "JAN 1999 AND 1 JAN 2000\")) self.assertNotEqual(DateValue.parse(\"1 JAN 2000\"), DateValue.parse(\"BET", "self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2000, \"DEC\", 31))) dv", "DEC 2000 AND 1 JAN 2001\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000,", "== HebrewDate(5780, \"SVN\", 4)) def test_004_cal_date_str(self): \"\"\"Test date.CalendarDate class.\"\"\" date", "DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, JulianDate(1600)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), \"BETWEEN @#DJULIAN@", "GregorianDate(2017, \"JAN\", 1)) self.assertEqual(date.phrase, \"some phrase\") self.assertEqual(str(date), \"INTERPRETED 1 JAN", "1699/00\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"@#DJULIAN@ 100 B.C.\") self.assertIsInstance(date, JulianDate)", "return (\"hebrew\", date) def visitFrench(self, date): if not isinstance(date, FrenchDate):", "\"\"\"Test date.DateValue class.\"\"\" dates = {\"500 B.C.\": GregorianDate(500, bc=True), \"JAN", "DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"some phrase\") date = DateValue.parse(\"INT 1967", "APR 2000\") def test_013_date_parse_range(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"BEF", "value = date.accept(visitor) self.assertEqual(value, (\"french\", date)) date = JulianDate(1582, \"OCT\",", "isinstance(date, DateValueInterpreted): raise TypeError(str(type(date))) return (\"interpreted\", date.date, date.phrase) def visitPhrase(self,", "1) == GregorianDate(1699, \"JAN\", 1, dual_year=1700)) # compare Gregorian and", "date): if not isinstance(date, FrenchDate): raise TypeError(str(type(date))) return (\"french\", date)", "\"JAN\", 1))) self.assertTrue(DateValue.parse(\"2016\") < DateValue.parse(\"2017\")) self.assertTrue(DateValue.parse(\"2 JAN 2016\") > DateValue.parse(\"1", "DateValue.parse(\"1967 B.C.\") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date),", "only works for GREGORIAN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DJULIAN@ 2020/21\")", "B.C.\") self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num, 10) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date =", "GregorianDate(1699, \"JAN\", 1, dual_year=1700)) # compare Gregorian and Julian dates", "self.assertEqual(str(date), \"@#DHEBREW@ 5000\") date = FrenchDate(1, \"VEND\", 1) self.assertEqual(str(date), \"@#DFRENCH", "R@ 1 VEND 1\") date = JulianDate(1582, \"OCT\", 5) self.assertEqual(str(date),", "self.assertEqual(date.date, value) def test_015_date_parse_phrase(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"(some", "12) self.assertEqual(date.day, 1) self.assertEqual(date.calendar, CalendarType.FRENCH_R) date = JulianDate(5, \"JAN\", bc=True)", "\"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) < GregorianDate(2017, \"FEB\", 1)) self.assertTrue(GregorianDate(2017,", "CalendarType.JULIAN) def test_002_cal_date_key(self): \"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\",", "1 JAN 1999 TO 1 JAN 2001\") < DateValue.parse(\"BET 1", "date.Date hash\"\"\" dv1 = DateValue.parse(\"2016\") dv2 = DateValue.parse(\"2016\") self.assertEqual(hash(dv1), hash(dv2))", "DateValueCalculated, DateValueTypes.CALCULATED), (\"EST\", \"ESTIMATED\", DateValueEstimated, DateValueTypes.ESTIMATED) ] for appr, fmt,", "# \"empty\" date is always later than any regular date", "JAN 2000\") < DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN", "\"VEND\", 1) self.assertEqual(str(date), \"@#DFRENCH R@ 1 VEND 1\") date =", "1699) self.assertEqual(date.dual_year, 1700) self.assertEqual(date.original, \"10 MAR 1699/00\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date", "DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2, GregorianDate(2000, \"APR\", 1))", "= JulianDate(5, \"JAN\", bc=True) self.assertEqual(date.year, 5) self.assertTrue(date.bc) self.assertEqual(date.year_str, \"5 B.C.\")", "(\"gregorian\", date)) date = HebrewDate(5000) value = date.accept(visitor) self.assertEqual(value, (\"hebrew\",", "date = DateValue.parse(\"FROM 1967\") self.assertIsInstance(date, DateValueFrom) self.assertEqual(date.kind, DateValueTypes.FROM) self.assertEqual(date.date, GregorianDate(1967))", "2000\") self.assertEqual(hash(dv1), hash(dv2)) dv1 = DateValue.parse(\"BET 31 DEC 2000 AND", "date\") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"not a date\") self.assertEqual(str(date),", "DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes,", "\"JAN\", 1, dual_year=1700) self.assertEqual(str(date), \"1 JAN 1699/00\") date = HebrewDate(5000)", "DateValueCalculated): raise TypeError(str(type(date))) return (\"calculated\", date.date) def visitEstimated(self, date): if", "GregorianDate(2000, \"APR\", 1)) self.assertEqual(str(date), \"FROM MAR 1920 TO 1 APR", "if not isinstance(date, DateValueSimple): raise TypeError(str(type(date))) return (\"simple\", date.date) def", "CalendarType.FRENCH_R) date = CalendarDate.parse(\"@#DHEBREW@ 7 NSN 5000\") self.assertIsInstance(date, HebrewDate) self.assertEqual(date.year,", "self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"some phrase\") date = DateValue.parse(\"INT 1967 B.C.", "2017\") date = DateValue.parse(\"BET @#DJULIAN@ 1600 AND 2000\") self.assertIsInstance(date, DateValueRange)", "JAN 2001\")) self.assertTrue(DateValue.parse(\"FROM 1 JAN 2000 TO 1 JAN 2002\")", "self.assertEqual(date.key(), (2458035.5, 0)) date = GregorianDate(1699, \"FEB\", 1, dual_year=1700) self.assertEqual(date.key(),", "DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"some phrase\") date = DateValue.parse(\"INT 1967 B.C. (some", "DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(date.phrase, \"some phrase\")", "HebrewDate(5000) value = date.accept(visitor) self.assertEqual(value, (\"hebrew\", date)) date = FrenchDate(1,", "JAN 2000\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2000, \"JAN\",", "self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(date.phrase, \"some phrase\")", "TypeError(str(type(date))) return (\"calculated\", date.date) def visitEstimated(self, date): if not isinstance(date,", "\"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 2) >= GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017,", "JulianDate(5, \"JAN\", bc=True) self.assertEqual(date.year, 5) self.assertTrue(date.bc) self.assertEqual(date.year_str, \"5 B.C.\") self.assertEqual(date.month,", "\"past\" the last day of year, but before next year", "2000\") def test_013_date_parse_range(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"BEF 1967B.C.\")", "JAN 2000 AND 1 JAN 2001\")) # Less specific date", "isinstance(date, DateValuePhrase): raise TypeError(str(type(date))) return (\"phrase\", date.phrase) class TestDetailDate(unittest.TestCase): \"\"\"Tests", "1920 TO 2000\") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920)) self.assertEqual(date.date2,", "= GregorianDate(2017, \"OCT\", 9) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"2017\")", "test_002_cal_date_key(self): \"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(date.key(),", "\"JAN\", 1) != GregorianDate(2017, \"JAN\", 2)) # missing day compares", "self.assertIsInstance(date, DateValueFrom) self.assertEqual(date.kind, DateValueTypes.FROM) self.assertEqual(date.date, GregorianDate(1967)) self.assertEqual(str(date), \"FROM 1967\") date", "visitor = TestDateVisitor() date1 = GregorianDate(2017, \"JAN\", 1) date2 =", "order of dates is messed up dv = DateValue.parse(\"BET 31", "with self.assertRaises(ValueError): date = CalendarDate.parse(\"start of time\") def test_006_cal_date_visitor(self): \"\"\"Test", "self.assertEqual(str(date), \"BETWEEN @#DJULIAN@ 1600 AND 2000\") date = DateValue.parse(\"bet mar", "def visitGregorian(self, date): if not isinstance(date, GregorianDate): raise TypeError(str(type(date))) return", "self.assertEqual(date.month_num, 10) self.assertEqual(date.day, 9) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = GregorianDate(2017, \"OCT\",", "# missing day compares as \"past\" the last day of", "self.assertEqual(date.date2, GregorianDate(2000, \"APR\")) self.assertEqual(str(date), \"BETWEEN MAR 1920 AND APR 2000\")", "> GregorianDate(2017, \"DEC\", 31)) self.assertTrue(GregorianDate(2017) < GregorianDate(2018, \"JAN\", 1)) #", "= CalendarDate.parse(\"@#DFRENCH R@ 15 GERM 0001\") self.assertIsInstance(date, FrenchDate) self.assertEqual(date.year, 1)", "2000\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2000, \"JAN\", 1)))", "= GregorianDate(2017, \"OCT\", 9) self.assertEqual(date.key(), (2458035.5, 0)) date = GregorianDate(1699,", "= DateValue.parse(\"BET @#DJULIAN@ 1600 AND 2000\") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE)", "2017 (some phrase)\") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(2017, \"JAN\",", "is always later than any regular date self.assertTrue(DateValue.parse(\"(Could be 1996", "CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor ) from ged4py.date", "2000\") < DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN 2001\"))", "self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.HEBREW) date = FrenchDate(1, \"FRUC\", 1) self.assertEqual(date.year, 1)", "TO 1 APR 2000\") def test_013_date_parse_range(self): \"\"\"Test date.DateValue class.\"\"\" date", "DEC 2000\")) self.assertTrue(DateValue.parse(\"DEC 2000\") > DateValue.parse(\"31 DEC 2000\")) # phrase", "DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN 2001\") self.assertIsInstance(dv.key(), tuple)", "23) > FrenchDate(1, \"VEND\", 1)) self.assertTrue(FrenchDate(1, \"VEND\", 2) > GregorianDate(1792,", "\"31 JAN 2017\": GregorianDate(2017, \"JAN\", 31)} approx = [ (\"ABT\",", "self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 2020) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.month, \"MAY\") self.assertEqual(date.month_num, 5)", "(\"julian\", date)) def test_007_cal_date_hash(self): \"\"\"Test date.CalendarDate hash.\"\"\" self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9)),", "regular date self.assertTrue(DateValue.parse(\"(Could be 1996 or 1998)\") > DateValue.parse(\"2000\")) #", "\"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"1967 B.C.\") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind,", "tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2001, \"JAN\", 1))) # order", "self.assertEqual(value, (\"range\", date1, date2)) value = DateValueAbout(date1).accept(visitor) self.assertEqual(value, (\"about\", date1))", "TO 1 JAN 2001\"), DateValue.parse(\"BET 1 JAN 2000 AND 1", "if not isinstance(date, HebrewDate): raise TypeError(str(type(date))) return (\"hebrew\", date) def", "\"phrase\")) def test_020_date_hash(self): \"\"\"Test date.Date hash\"\"\" dv1 = DateValue.parse(\"2016\") dv2", "self.assertEqual(date.month_num, 12) self.assertEqual(date.day, 1) self.assertEqual(date.calendar, CalendarType.FRENCH_R) date = JulianDate(5, \"JAN\",", "\"SEP\", 23) > FrenchDate(1, \"VEND\", 1)) self.assertTrue(FrenchDate(1, \"VEND\", 2) >", "JAN 2000 TO 1 JAN 2001\"), DateValue.parse(\"BET 1 JAN 2000", "isinstance(date, GregorianDate): raise TypeError(str(type(date))) return (\"gregorian\", date) def visitJulian(self, date):", "\"OCT\", 9) value = date.accept(visitor) self.assertEqual(value, (\"gregorian\", date)) date =", "= DateValueAbout(date1).accept(visitor) self.assertEqual(value, (\"about\", date1)) value = DateValueCalculated(date1).accept(visitor) self.assertEqual(value, (\"calculated\",", "date = DateValue.parse(\"BEF 1967B.C.\") self.assertIsInstance(date, DateValueBefore) self.assertEqual(date.kind, DateValueTypes.BEFORE) self.assertEqual(date.date, GregorianDate(1967,", "date): if not isinstance(date, DateValueEstimated): raise TypeError(str(type(date))) return (\"estimated\", date.date)", "value = DateValueTo(date1).accept(visitor) self.assertEqual(value, (\"to\", date1)) value = DateValuePeriod(date1, date2).accept(visitor)", "DateValueFrom): raise TypeError(str(type(date))) return (\"from\", date.date) def visitTo(self, date): if", "utf-8 -*- \"\"\"Tests for `ged4py.date` module.\"\"\" import unittest from ged4py.calendar", "CalendarDate.parse(\"@#DROMAN@ 2020\") # cannot handle UNKNOWN with self.assertRaises(ValueError): date =", "7 NSN 5000\") self.assertEqual(date.calendar, CalendarType.HEBREW) # cannot handle ROMAN with", "self.assertEqual(date.month_num, 7) self.assertEqual(date.day, 15) self.assertEqual(date.original, \"@#DFRENCH R@ 15 GERM 0001\")", "hash.\"\"\" self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9)), hash(GregorianDate(2017, \"OCT\", 9))) self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9,", "\"JAN\", 1)) # compare Gregorian and French dates self.assertTrue(GregorianDate(1792, \"SEP\",", "= CalendarDate.parse(\"@#DROMAN@ 2020\") # cannot handle UNKNOWN with self.assertRaises(ValueError): date", "5000) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"5000\") self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.HEBREW) date", "100 B.C.\") self.assertEqual(date.calendar, CalendarType.JULIAN) date = CalendarDate.parse(\"@#DFRENCH R@ 15 GERM", "GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"1 JAN 2017\") def test_017_date_cmp(self): \"\"\"Test", "JAN 2001\") dv2 = DateValue.parse(\"BET 31 DEC 2000 AND 1", "bc=True)) self.assertEqual(str(date), \"BEFORE 1967 B.C.\") date = DateValue.parse(\"AFT 1 JAN", "raise TypeError(str(type(date))) return (\"french\", date) def visitSimple(self, date): if not", "mar 1920 and apr 2000\") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1,", "1 JAN 2017 (some phrase)\") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date,", "1699/00\") date = HebrewDate(5000) self.assertEqual(str(date), \"@#DHEBREW@ 5000\") date = FrenchDate(1,", "date self.assertTrue(GregorianDate(1700, \"JAN\", 1) == GregorianDate(1699, \"JAN\", 1, dual_year=1700)) #", "31), GregorianDate(2000, \"JAN\", 1))) self.assertTrue(DateValue.parse(\"2016\") < DateValue.parse(\"2017\")) self.assertTrue(DateValue.parse(\"2 JAN 2016\")", "2000\") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date),", "date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(str(date), \"9 OCT", "\"BETWEEN MAR 1920 AND APR 2000\") def test_014_date_parse_approx(self): \"\"\"Test date.DateValue", "def test_018_date_parse_empty(self): \"\"\"Test date.DateValue class.\"\"\" for value in (None, \"\"):", "raise TypeError(str(type(date))) return (\"estimated\", date.date) def visitInterpreted(self, date): if not", "\"JAN\", 1)) self.assertEqual(str(date), \"TO 1 JAN 2017\") date = DateValue.parse(\"FROM", "def visitAfter(self, date): if not isinstance(date, DateValueAfter): raise TypeError(str(type(date))) return", "dual_year=1700)) # compare Gregorian and Julian dates self.assertTrue(GregorianDate(1582, \"OCT\", 15)", "self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"@#DGREGORIAN@ 10 MAR 1698/99\") self.assertIsInstance(date, GregorianDate)", "= DateValue.parse(\"(some phrase)\") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"some phrase\")", "1)) self.assertEqual(date.phrase, \"some phrase\") self.assertEqual(str(date), \"INTERPRETED 1 JAN 2017 (some", "DateValueTypes, DateValueVisitor ) class TestDateVisitor(CalendarDateVisitor, DateValueVisitor): def visitGregorian(self, date): if", "1 JAN 2000\")) self.assertNotEqual(DateValue.parse(\"1 JAN 2000\"), DateValue.parse(\"BET 1 JAN 2000", "Less specific date compares later than more specific self.assertTrue(DateValue.parse(\"2000\") >", "AND 1 JAN 2001\") dv2 = DateValue.parse(\"BET 31 DEC 2000", "= DateValue.parse(\"INT 1967 B.C. (some phrase)\") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED)", "GregorianDate(1967, bc=True)) self.assertEqual(date.phrase, \"some phrase\") self.assertEqual(str(date), \"INTERPRETED 1967 B.C. (some", "1 JAN 2000 TO 1 JAN 2001\"), DateValue.parse(\"BET 1 JAN", "visitTo(self, date): if not isinstance(date, DateValueTo): raise TypeError(str(type(date))) return (\"to\",", "if not isinstance(date, DateValueBefore): raise TypeError(str(type(date))) return (\"before\", date.date) def", "return (\"phrase\", date.phrase) class TestDetailDate(unittest.TestCase): \"\"\"Tests for `ged4py.date` module.\"\"\" def", "= DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN 2001\") dv2", "1 JAN 2001\") dv2 = DateValue.parse(\"BET 31 DEC 2000 AND", "1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) != GregorianDate(2017, \"JAN\", 2)) # missing", "bc=True))) self.assertEqual(hash(FrenchDate(1, \"VEND\", 1)), hash(FrenchDate(1, \"VEND\", 1))) self.assertEqual(hash(FrenchDate(1)), hash(FrenchDate(1))) def", "raise TypeError(str(type(date))) return (\"calculated\", date.date) def visitEstimated(self, date): if not", "= HebrewDate(5000) self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"5000\") self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day)", "= JulianDate(1582, \"OCT\", 5) self.assertEqual(str(date), \"@#DJULIAN@ 5 OCT 1582\") def", "test_012_date_parse_period(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"FROM 1967\") self.assertIsInstance(date, DateValueFrom)", "\"JAN\", 1) < GregorianDate(2017, \"JAN\", 2)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) <=", "ged4py.calendar import ( CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor", "date = CalendarDate.parse(\"@#DGREGORIAN@ 10 MAR 1698/99\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1698)", "2000\"), DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"1", "< GregorianDate(2017, \"FEB\", 1)) # missing month compares as \"past\"", "> GregorianDate(1792, \"SEP\", 22)) self.assertTrue(GregorianDate(2020, \"SEP\", 21) == FrenchDate(228, \"COMP\",", "DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo,", "self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DJULIAN@ 2020/21\") # cannot parse nonsense with", "\"OCT\", 6) > GregorianDate(1582, \"OCT\", 15)) self.assertTrue(GregorianDate(2000, \"JAN\", 14) ==", "dv2 = DateValue.parse(\"31 DEC 2000\") self.assertEqual(hash(dv1), hash(dv2)) dv1 = DateValue.parse(\"BET", "DateValueTo): raise TypeError(str(type(date))) return (\"to\", date.date) def visitRange(self, date): if", "DateValueTo(date1).accept(visitor) self.assertEqual(value, (\"to\", date1)) value = DateValuePeriod(date1, date2).accept(visitor) self.assertEqual(value, (\"period\",", "-*- \"\"\"Tests for `ged4py.date` module.\"\"\" import unittest from ged4py.calendar import", "isinstance(date, HebrewDate): raise TypeError(str(type(date))) return (\"hebrew\", date) def visitFrench(self, date):", "def visitJulian(self, date): if not isinstance(date, JulianDate): raise TypeError(str(type(date))) return", "self.assertTrue(GregorianDate(2017, \"JAN\", 1) == GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1)", "of time\") def test_006_cal_date_visitor(self): \"\"\"Test date.CalendarDate.accept method.\"\"\" visitor = TestDateVisitor()", "\"JAN\", 31)} approx = [ (\"ABT\", \"ABOUT\", DateValueAbout, DateValueTypes.ABOUT), (\"CAL\",", "JAN 2002\") > DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN", "DateValuePeriod(date1, date2).accept(visitor) self.assertEqual(value, (\"period\", date1, date2)) value = DateValueBefore(date1).accept(visitor) self.assertEqual(value,", "self.assertIsInstance(date, HebrewDate) self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.month, \"NSN\") self.assertEqual(date.month_num, 8) self.assertEqual(date.day,", "visitInterpreted(self, date): if not isinstance(date, DateValueInterpreted): raise TypeError(str(type(date))) return (\"interpreted\",", "10 MAR 1698/99\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"10 MAR 1699/00\")", "cannot parse nonsense with self.assertRaises(ValueError): date = CalendarDate.parse(\"start of time\")", "self.assertEqual(date.month, \"MAY\") self.assertEqual(date.month_num, 5) self.assertEqual(date.day, 31) self.assertEqual(date.original, \"31 MAY 2020\")", "typeEnum) self.assertEqual(str(date), fmt + \" \" + datestr) self.assertEqual(date.date, value)", "\"some phrase\") self.assertEqual(str(date), \"INTERPRETED 1967 B.C. (some phrase)\") date =", "self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertTrue(date.bc) self.assertEqual(date.year_str, \"2017 B.C.\") self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num,", "DateValueEstimated): raise TypeError(str(type(date))) return (\"estimated\", date.date) def visitInterpreted(self, date): if", "def visitBefore(self, date): if not isinstance(date, DateValueBefore): raise TypeError(str(type(date))) return", "isinstance(date, DateValueTo): raise TypeError(str(type(date))) return (\"to\", date.date) def visitRange(self, date):", "= GregorianDate(2017, \"OCT\", bc=True) self.assertEqual(str(date), \"OCT 2017 B.C.\") date =", "GregorianDate(500, bc=True), \"JAN 2017\": GregorianDate(2017, \"JAN\"), \"31 JAN 2017\": GregorianDate(2017,", "self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = GregorianDate(2017, \"OCT\", bc=True) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year)", "raise TypeError(str(type(date))) return (\"before\", date.date) def visitAfter(self, date): if not", "isinstance(date, DateValueCalculated): raise TypeError(str(type(date))) return (\"calculated\", date.date) def visitEstimated(self, date):", "self.assertEqual(hash(FrenchDate(1)), hash(FrenchDate(1))) def test_010_date_no_date(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"not", "2000\") date = DateValue.parse(\"from mar 1920 to 1 apr 2000\")", "FrenchDate(1, \"VEND\", 1)) self.assertTrue(GregorianDate(1792, \"SEP\", 23) > FrenchDate(1, \"VEND\", 1))", "1)) def test_003_cal_date_cmp(self): \"\"\"Test date.CalendarDate class.\"\"\" self.assertTrue(GregorianDate(2016, \"JAN\", 1) <", "GREGORIAN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DJULIAN@ 2020/21\") # cannot parse", "self.assertEqual(date.year_str, \"2017\") self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num, 10) self.assertEqual(date.day, 9) self.assertEqual(date.calendar, CalendarType.GREGORIAN)", "or 1998)\") > DateValue.parse(\"2000\")) # \"empty\" date is always later", "messed up dv = DateValue.parse(\"BET 31 DEC 2000 AND 1", "visitFrom(self, date): if not isinstance(date, DateValueFrom): raise TypeError(str(type(date))) return (\"from\",", "(\"french\", date) def visitSimple(self, date): if not isinstance(date, DateValueSimple): raise", "B.C.\") date = DateValue.parse(\"AFT 1 JAN 2017\") self.assertIsInstance(date, DateValueAfter) self.assertEqual(date.kind,", "\" + datestr) self.assertIsInstance(date, klass) self.assertEqual(date.kind, typeEnum) self.assertEqual(str(date), fmt +", "module.\"\"\" def test_001_cal_date(self): \"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\",", "self.assertEqual(date.key(), (1638959.5, 1)) date = HebrewDate(2017, \"TSH\", 22) self.assertEqual(date.key(), (1084542.5,", "2001\"), DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"FROM", "DateValue.parse(\"not a date\") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"not a", "<= GregorianDate(2017, \"JAN\", 2)) self.assertTrue(GregorianDate(2017, \"JAN\", 2) > GregorianDate(2017, \"JAN\",", "DateValue.parse(\"2017\")) self.assertTrue(DateValue.parse(\"2 JAN 2016\") > DateValue.parse(\"1 JAN 2016\")) self.assertTrue(DateValue.parse(\"BET 1900", "def test_004_cal_date_str(self): \"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\", 9)", "a date\") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"not a date\")", "date.date) def visitRange(self, date): if not isinstance(date, DateValueRange): raise TypeError(str(type(date)))", "date.date) def visitAbout(self, date): if not isinstance(date, DateValueAbout): raise TypeError(str(type(date)))", "1)) date = HebrewDate(2017, \"TSH\", 22) self.assertEqual(date.key(), (1084542.5, 0)) date", "1967B.C.\") self.assertIsInstance(date, DateValueBefore) self.assertEqual(date.kind, DateValueTypes.BEFORE) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date), \"BEFORE", "not isinstance(date, FrenchDate): raise TypeError(str(type(date))) return (\"french\", date) def visitSimple(self,", "= DateValuePhrase(\"phrase\").accept(visitor) self.assertEqual(value, (\"phrase\", \"phrase\")) def test_020_date_hash(self): \"\"\"Test date.Date hash\"\"\"", "of month, but before next month self.assertTrue(GregorianDate(2017, \"JAN\") > GregorianDate(2017,", "in dates.items(): date = DateValue.parse(appr + \" \" + datestr)", "GregorianDate(2001, \"JAN\", 1))) # order of dates is messed up", "bc=True) self.assertEqual(date.key(), (1638959.5, 1)) date = HebrewDate(2017, \"TSH\", 22) self.assertEqual(date.key(),", "1 JAN 2001\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"BET 1 JAN", "self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.month, \"GERM\") self.assertEqual(date.month_num, 7) self.assertEqual(date.day, 15) self.assertEqual(date.original,", "DateValue.parse(\"FROM 1 JAN 2000\")) # comparing ranges self.assertEqual(DateValue.parse(\"FROM 1 JAN", "not isinstance(date, DateValueInterpreted): raise TypeError(str(type(date))) return (\"interpreted\", date.date, date.phrase) def", "TestDateVisitor() date = GregorianDate(2017, \"OCT\", 9) value = date.accept(visitor) self.assertEqual(value,", "1600 AND 2000\") date = DateValue.parse(\"bet mar 1920 and apr", "DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(date.phrase, \"some phrase\") self.assertEqual(str(date),", "self.assertEqual(date.day, 1) self.assertEqual(date.calendar, CalendarType.FRENCH_R) date = JulianDate(5, \"JAN\", bc=True) self.assertEqual(date.year,", "GregorianDate(2017, \"OCT\", 9) self.assertEqual(date.key(), (2458035.5, 0)) date = GregorianDate(1699, \"FEB\",", "= TestDateVisitor() date1 = GregorianDate(2017, \"JAN\", 1) date2 = GregorianDate(2017,", "dual year only works for GREGORIAN with self.assertRaises(ValueError): date =", "JAN 2000\") < DateValue.parse(\"AFT 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\")", "= date.accept(visitor) self.assertEqual(value, (\"julian\", date)) def test_007_cal_date_hash(self): \"\"\"Test date.CalendarDate hash.\"\"\"", "= JulianDate(1000) self.assertEqual(date.key(), (2086672.5, 1)) def test_003_cal_date_cmp(self): \"\"\"Test date.CalendarDate class.\"\"\"", "B.C. (some phrase)\") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(1967, bc=True))", "TypeError(str(type(date))) return (\"range\", date.date1, date.date2) def visitBefore(self, date): if not", "apr 2000\") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2,", "self.assertTrue(GregorianDate(2020, \"SEP\", 21) == FrenchDate(228, \"COMP\", 5)) # compare Gregorian", "CalendarDate.parse(\"10 MAR 1699/00\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700) self.assertEqual(date.original,", "always later than any regular date self.assertTrue(DateValue.parse(\"\") > DateValue.parse(\"2000\")) def", "datestr) self.assertEqual(date.date, value) def test_015_date_parse_phrase(self): \"\"\"Test date.DateValue class.\"\"\" date =", "1996 or 1998)\") > DateValue.parse(\"2000\")) # \"empty\" date is always", "self.assertEqual(str(date), \"(not a date)\") def test_012_date_parse_period(self): \"\"\"Test date.DateValue class.\"\"\" date", "JAN 2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") >", "1) != GregorianDate(2017, \"JAN\", 2)) # missing day compares as", "self.assertEqual(date.kind, DateValueTypes.BEFORE) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date), \"BEFORE 1967 B.C.\") date", "6) > GregorianDate(1582, \"OCT\", 15)) self.assertTrue(GregorianDate(2000, \"JAN\", 14) == JulianDate(2000,", "def visitPeriod(self, date): if not isinstance(date, DateValuePeriod): raise TypeError(str(type(date))) return", "appr, fmt, klass, typeEnum in approx: for datestr, value in", "\"JAN\") self.assertEqual(date.month_num, 1) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.JULIAN) def test_002_cal_date_key(self): \"\"\"Test date.CalendarDate", "class.\"\"\" date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(date.key(), (2458035.5, 0)) date", "fmt, klass, typeEnum in approx: for datestr, value in dates.items():", "self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700) self.assertEqual(date.original, \"10 MAR 1699/00\")", "DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"1 JAN 2017\") def", "if not isinstance(date, DateValueCalculated): raise TypeError(str(type(date))) return (\"calculated\", date.date) def", "= DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN 2001\") self.assertIsInstance(dv.key(),", "and French dates self.assertTrue(GregorianDate(1792, \"SEP\", 22) == FrenchDate(1, \"VEND\", 1))", "\"JAN\", 1) < GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) <", "test_005_cal_date_parse(self): \"\"\"Test date.CalendarDate.parse method.\"\"\" date = CalendarDate.parse(\"31 MAY 2020\") self.assertIsInstance(date,", "B.C. (some phrase)\") date = DateValue.parse(\"INT @#DGREGORIAN@ 1 JAN 2017", "self.assertEqual(value, (\"interpreted\", date1, \"phrase\")) value = DateValuePhrase(\"phrase\").accept(visitor) self.assertEqual(value, (\"phrase\", \"phrase\"))", "date = CalendarDate.parse(\"@#DJULIAN@ 100 B.C.\") self.assertIsInstance(date, JulianDate) self.assertEqual(date.year, 100) self.assertTrue(date.bc)", "last day of year, but before next year self.assertTrue(GregorianDate(2017) >", "2020) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.month, \"MAY\") self.assertEqual(date.month_num, 5) self.assertEqual(date.day, 31) self.assertEqual(date.original,", "1)) self.assertEqual(str(date), \"1 JAN 2017\") def test_017_date_cmp(self): \"\"\"Test date.Date class.\"\"\"", "day of year, but before next year self.assertTrue(GregorianDate(2017) > GregorianDate(2017,", "self.assertTrue(GregorianDate(2017) > GregorianDate(2017, \"DEC\", 31)) self.assertTrue(GregorianDate(2017) < GregorianDate(2018, \"JAN\", 1))", "5)) self.assertTrue(GregorianDate(1582, \"OCT\", 16) > JulianDate(1582, \"OCT\", 5)) self.assertTrue(JulianDate(1582, \"OCT\",", "= CalendarDate.parse(\"start of time\") def test_006_cal_date_visitor(self): \"\"\"Test date.CalendarDate.accept method.\"\"\" visitor", "DateValue.parse(\"BEF 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"TO 1", "is always later than any regular date self.assertTrue(DateValue.parse(\"\") > DateValue.parse(\"2000\"))", "1)) self.assertEqual(str(date), \"AFTER 1 JAN 2017\") date = DateValue.parse(\"BET @#DJULIAN@", "= DateValue.parse(\"INT @#DGREGORIAN@ 1 JAN 2017 (some phrase)\") self.assertIsInstance(date, DateValueInterpreted)", "self.assertEqual(date.year, 100) self.assertTrue(date.bc) self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.original, \"@#DJULIAN@ 100 B.C.\")", "self.assertEqual(date.date1, GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2, GregorianDate(2000, \"APR\")) self.assertEqual(str(date), \"BETWEEN MAR 1920", "DateValueTypes.ESTIMATED) ] for appr, fmt, klass, typeEnum in approx: for", "isinstance(date, DateValueAfter): raise TypeError(str(type(date))) return (\"after\", date.date) def visitAbout(self, date):", "self.assertEqual(date.calendar, CalendarType.FRENCH_R) date = JulianDate(5, \"JAN\", bc=True) self.assertEqual(date.year, 5) self.assertTrue(date.bc)", "self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2, GregorianDate(2000, \"APR\"))", "DateValueFrom) self.assertEqual(date.kind, DateValueTypes.FROM) self.assertEqual(date.date, GregorianDate(1967)) self.assertEqual(str(date), \"FROM 1967\") date =", "1900 AND 2000\") < DateValue.parse(\"FROM 1920 TO 1999\")) # comparing", "return (\"estimated\", date.date) def visitInterpreted(self, date): if not isinstance(date, DateValueInterpreted):", "DateValue.parse(\"31 DEC 2000\")) self.assertTrue(DateValue.parse(\"DEC 2000\") > DateValue.parse(\"31 DEC 2000\")) #", "7 NSN 5000\") self.assertIsInstance(date, HebrewDate) self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.month, \"NSN\")", "self.assertTrue(DateValue.parse(\"\") > DateValue.parse(\"2000\")) def test_018_date_parse_empty(self): \"\"\"Test date.DateValue class.\"\"\" for value", "date1)) value = DateValueAfter(date1).accept(visitor) self.assertEqual(value, (\"after\", date1)) value = DateValueRange(date1,", "class.\"\"\" date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(str(date), \"9 OCT 2017\")", "= JulianDate(1582, \"OCT\", 5) value = date.accept(visitor) self.assertEqual(value, (\"julian\", date))", "B.C.\") date = DateValue.parse(\"@#DGREGORIAN@ 1 JAN 2017\") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind,", "DateValue.parse(\"BET 1 JAN 1999 AND 1 JAN 2000\")) self.assertNotEqual(DateValue.parse(\"1 JAN", "2)) self.assertTrue(GregorianDate(2017, \"JAN\", 2) > GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\",", "DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date), \"1967 B.C.\") date", "self.assertEqual(date.date1, GregorianDate(1920)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), \"FROM 1920 TO 2000\") date", "JulianDate(1600)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), \"BETWEEN @#DJULIAN@ 1600 AND 2000\") date", "\"\"): date = DateValue.parse(value) self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertIsNone(date.phrase) self.assertEqual(str(date),", "\"AFTER 1 JAN 2017\") date = DateValue.parse(\"BET @#DJULIAN@ 1600 AND", "\"JAN\", 1)) self.assertEqual(str(date), \"AFTER 1 JAN 2017\") date = DateValue.parse(\"BET", "GregorianDate): raise TypeError(str(type(date))) return (\"gregorian\", date) def visitJulian(self, date): if", "DateValue.parse(value) self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertIsNone(date.phrase) self.assertEqual(str(date), \"\") def test_019_date_value_visitor(self):", "2016\")) self.assertTrue(DateValue.parse(\"BET 1900 AND 2000\") < DateValue.parse(\"FROM 1920 TO 1999\"))", "\"JAN\", 1) == HebrewDate(5780, \"SVN\", 4)) def test_004_cal_date_str(self): \"\"\"Test date.CalendarDate", "DateValue.parse(\"1 JAN 2016\")) self.assertTrue(DateValue.parse(\"BET 1900 AND 2000\") < DateValue.parse(\"FROM 1920", "`ged4py.date` module.\"\"\" def test_001_cal_date(self): \"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017,", "self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(date.phrase, \"some phrase\") self.assertEqual(str(date), \"INTERPRETED", "self.assertEqual(date.phrase, \"some phrase\") self.assertEqual(str(date), \"INTERPRETED 1 JAN 2017 (some phrase)\")", "DateValueBefore(date1).accept(visitor) self.assertEqual(value, (\"before\", date1)) value = DateValueAfter(date1).accept(visitor) self.assertEqual(value, (\"after\", date1))", "2) >= GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) == GregorianDate(2017,", "self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DUNKNOWN@ 2020\") # dual year only works", "self.assertEqual(str(date), \"\") def test_019_date_value_visitor(self): \"\"\"Test date.DateValue class.\"\"\" visitor = TestDateVisitor()", "= HebrewDate(2017, \"TSH\", 22) self.assertEqual(date.key(), (1084542.5, 0)) date = JulianDate(1000)", "\"\"\"Test date.CalendarDate hash.\"\"\" self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9)), hash(GregorianDate(2017, \"OCT\", 9))) self.assertEqual(hash(GregorianDate(2017,", "than more specific self.assertTrue(DateValue.parse(\"2000\") > DateValue.parse(\"31 DEC 2000\")) self.assertTrue(DateValue.parse(\"DEC 2000\")", "\"FEB\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) < GregorianDate(2017, \"JAN\", 2)) self.assertTrue(GregorianDate(2017,", "DEC 2000\") dv2 = DateValue.parse(\"31 DEC 2000\") self.assertEqual(hash(dv1), hash(dv2)) dv1", "self.assertEqual(date.month, \"FEB\") self.assertEqual(date.month_num, 2) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = HebrewDate(5000)", "1700) self.assertEqual(date.original, \"10 MAR 1699/00\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"@#DJULIAN@", "\"OCT\", 9) self.assertEqual(str(date), \"9 OCT 2017\") date = GregorianDate(2017, \"OCT\",", "self.assertIsInstance(date, DateValueBefore) self.assertEqual(date.kind, DateValueTypes.BEFORE) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date), \"BEFORE 1967", "date)) date = HebrewDate(5000) value = date.accept(visitor) self.assertEqual(value, (\"hebrew\", date))", "(\"estimated\", date.date) def visitInterpreted(self, date): if not isinstance(date, DateValueInterpreted): raise", "not isinstance(date, HebrewDate): raise TypeError(str(type(date))) return (\"hebrew\", date) def visitFrench(self,", "\"INTERPRETED 1967 B.C. (some phrase)\") date = DateValue.parse(\"INT @#DGREGORIAN@ 1", "not isinstance(date, GregorianDate): raise TypeError(str(type(date))) return (\"gregorian\", date) def visitJulian(self,", "1, dual_year=1700) self.assertEqual(date.key(), (2342003.5, 0)) date = FrenchDate(2017, \"VENT\", bc=True)", "\"1 JAN 1699/00\") date = HebrewDate(5000) self.assertEqual(str(date), \"@#DHEBREW@ 5000\") date", "2000\") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2, GregorianDate(2000,", "self.assertEqual(date.phrase, \"some phrase\") self.assertEqual(str(date), \"INTERPRETED 1967 B.C. (some phrase)\") date", "MAR 1698/99\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"10 MAR 1699/00\") self.assertIsInstance(date,", "5 OCT 1582\") def test_005_cal_date_parse(self): \"\"\"Test date.CalendarDate.parse method.\"\"\" date =", "self.assertTrue(JulianDate(1582, \"OCT\", 6) > GregorianDate(1582, \"OCT\", 15)) self.assertTrue(GregorianDate(2000, \"JAN\", 14)", "self.assertIsNone(date.day) self.assertEqual(date.original, \"@#DJULIAN@ 100 B.C.\") self.assertEqual(date.calendar, CalendarType.JULIAN) date = CalendarDate.parse(\"@#DFRENCH", "not isinstance(date, DateValueFrom): raise TypeError(str(type(date))) return (\"from\", date.date) def visitTo(self,", "= {\"500 B.C.\": GregorianDate(500, bc=True), \"JAN 2017\": GregorianDate(2017, \"JAN\"), \"31", "# compare Gregorian and Julian dates self.assertTrue(GregorianDate(1582, \"OCT\", 15) ==", "HebrewDate) self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.month, \"NSN\") self.assertEqual(date.month_num, 8) self.assertEqual(date.day, 7)", "1 JAN 2017\") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(2017, \"JAN\",", "self.assertEqual(DateValue.parse(\"FROM 1 JAN 2000 TO 1 JAN 2001\"), DateValue.parse(\"BET 1", "date): if not isinstance(date, JulianDate): raise TypeError(str(type(date))) return (\"julian\", date)", "class.\"\"\" date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertFalse(date.bc)", "9) value = date.accept(visitor) self.assertEqual(value, (\"gregorian\", date)) date = HebrewDate(5000)", "than any regular date self.assertTrue(DateValue.parse(\"\") > DateValue.parse(\"2000\")) def test_018_date_parse_empty(self): \"\"\"Test", "( DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod,", "Hebrew dates self.assertTrue(GregorianDate(2020, \"JAN\", 1) == HebrewDate(5780, \"SVN\", 4)) def", "\"TO 1 JAN 2017\") date = DateValue.parse(\"FROM 1920 TO 2000\")", "date.date1, date.date2) def visitBefore(self, date): if not isinstance(date, DateValueBefore): raise", "date = DateValue.parse(\"FROM 1920 TO 2000\") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD)", "DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor )", "self.assertEqual(value, (\"hebrew\", date)) date = FrenchDate(1, \"VEND\", 1) value =", "GregorianDate(2016))) dv = DateValue.parse(\"31 DEC 2000\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000,", "specific self.assertTrue(DateValue.parse(\"2000\") > DateValue.parse(\"31 DEC 2000\")) self.assertTrue(DateValue.parse(\"DEC 2000\") > DateValue.parse(\"31", "< DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"FROM", "CalendarDate.parse(\"@#DUNKNOWN@ 2020\") # dual year only works for GREGORIAN with", "def test_001_cal_date(self): \"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\", 9)", "class.\"\"\" self.assertTrue(GregorianDate(2016, \"JAN\", 1) < GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\",", "# compare Gregorian and Hebrew dates self.assertTrue(GregorianDate(2020, \"JAN\", 1) ==", "works for GREGORIAN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DJULIAN@ 2020/21\") #", "self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), \"FROM", "\"OCT\", 9, bc=True))) self.assertEqual(hash(FrenchDate(1, \"VEND\", 1)), hash(FrenchDate(1, \"VEND\", 1))) self.assertEqual(hash(FrenchDate(1)),", "visitRange(self, date): if not isinstance(date, DateValueRange): raise TypeError(str(type(date))) return (\"range\",", "\"VENT\", bc=True) self.assertEqual(date.key(), (1638959.5, 1)) date = HebrewDate(2017, \"TSH\", 22)", "2017\") date = DateValue.parse(\"FROM 1920 TO 2000\") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind,", "\"JAN\"), \"31 JAN 2017\": GregorianDate(2017, \"JAN\", 31)} approx = [", "1699) self.assertFalse(date.bc) self.assertEqual(date.month, \"MAR\") self.assertEqual(date.month_num, 3) self.assertEqual(date.day, 10) self.assertEqual(date.original, \"@#DGREGORIAN@", "dv = DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN 2001\")", "(\"interpreted\", date.date, date.phrase) def visitPhrase(self, date): if not isinstance(date, DateValuePhrase):", "= DateValueFrom(date1).accept(visitor) self.assertEqual(value, (\"from\", date1)) value = DateValueTo(date1).accept(visitor) self.assertEqual(value, (\"to\",", "test_016_date_parse_simple(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"1967 B.C.\") self.assertIsInstance(date, DateValueSimple)", "in (None, \"\"): date = DateValue.parse(value) self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE)", "self.assertEqual(str(date), \"BEFORE 1967 B.C.\") date = DateValue.parse(\"AFT 1 JAN 2017\")", "self.assertEqual(str(date), \"1 JAN 1699/00\") date = HebrewDate(5000) self.assertEqual(str(date), \"@#DHEBREW@ 5000\")", "\"some phrase\") self.assertEqual(str(date), \"INTERPRETED 1 JAN 2017 (some phrase)\") def", "date): if not isinstance(date, DateValueRange): raise TypeError(str(type(date))) return (\"range\", date.date1,", "self.assertEqual(str(date), \"FROM MAR 1920 TO 1 APR 2000\") def test_013_date_parse_range(self):", "2016\") > DateValue.parse(\"1 JAN 2016\")) self.assertTrue(DateValue.parse(\"BET 1900 AND 2000\") <", "from ged4py.calendar import ( CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate,", "(\"range\", date1, date2)) value = DateValueAbout(date1).accept(visitor) self.assertEqual(value, (\"about\", date1)) value", "self.assertEqual(str(date), \"@#DFRENCH R@ 1 VEND 1\") date = JulianDate(1582, \"OCT\",", "GregorianDate(2017, \"DEC\", 31) value = DateValueSimple(date1).accept(visitor) self.assertEqual(value, (\"simple\", date1)) value", "(2342003.5, 0)) date = FrenchDate(2017, \"VENT\", bc=True) self.assertEqual(date.key(), (1638959.5, 1))", "date.accept(visitor) self.assertEqual(value, (\"julian\", date)) def test_007_cal_date_hash(self): \"\"\"Test date.CalendarDate hash.\"\"\" self.assertEqual(hash(GregorianDate(2017,", "DateValueSimple(date1).accept(visitor) self.assertEqual(value, (\"simple\", date1)) value = DateValueFrom(date1).accept(visitor) self.assertEqual(value, (\"from\", date1))", "self.assertEqual(date.year_str, \"2017 B.C.\") self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num, 10) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN)", "(GregorianDate(2016), GregorianDate(2016))) dv = DateValue.parse(\"31 DEC 2000\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(),", "DateValue.parse(\"31 DEC 2000\") dv2 = DateValue.parse(\"31 DEC 2000\") self.assertEqual(hash(dv1), hash(dv2))", "1) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.JULIAN) def test_002_cal_date_key(self): \"\"\"Test date.CalendarDate class.\"\"\" date", "self.assertTrue(GregorianDate(2017, \"JAN\", 1) != GregorianDate(2017, \"JAN\", 2)) # missing day", "def test_013_date_parse_range(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"BEF 1967B.C.\") self.assertIsInstance(date,", "0001\") self.assertIsInstance(date, FrenchDate) self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.month, \"GERM\") self.assertEqual(date.month_num, 7)", "# missing month compares as \"past\" the last day of", "raise TypeError(str(type(date))) return (\"to\", date.date) def visitRange(self, date): if not", "\"JAN\", bc=True) self.assertEqual(date.year, 5) self.assertTrue(date.bc) self.assertEqual(date.year_str, \"5 B.C.\") self.assertEqual(date.month, \"JAN\")", "phrase)\") def test_016_date_parse_simple(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"1967 B.C.\")", "date = DateValue.parse(\"TO 1 JAN 2017\") self.assertIsInstance(date, DateValueTo) self.assertEqual(date.kind, DateValueTypes.TO)", "date with range self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"BET 1 JAN", "= GregorianDate(2017, \"OCT\", 9) self.assertEqual(str(date), \"9 OCT 2017\") date =", "date = HebrewDate(5000) self.assertEqual(str(date), \"@#DHEBREW@ 5000\") date = FrenchDate(1, \"VEND\",", "5000\") self.assertEqual(date.calendar, CalendarType.HEBREW) # cannot handle ROMAN with self.assertRaises(ValueError): date", "self.assertEqual(date.kind, DateValueTypes.FROM) self.assertEqual(date.date, GregorianDate(1967)) self.assertEqual(str(date), \"FROM 1967\") date = DateValue.parse(\"TO", "\"OCT\", bc=True) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertTrue(date.bc) self.assertEqual(date.year_str, \"2017 B.C.\") self.assertEqual(date.month,", "1 JAN 2001\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"BEF 1 JAN", "\"@#DHEBREW@ 7 NSN 5000\") self.assertEqual(date.calendar, CalendarType.HEBREW) # cannot handle ROMAN", "TypeError(str(type(date))) return (\"gregorian\", date) def visitJulian(self, date): if not isinstance(date,", "\"OCT\") self.assertEqual(date.month_num, 10) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = GregorianDate(1699, \"FEB\",", "2001\")) # Less specific date compares later than more specific", "hash(GregorianDate(2017, \"OCT\", 9))) self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9, bc=True)), hash(GregorianDate(2017, \"OCT\", 9,", "DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2, GregorianDate(2000, \"APR\", 1)) self.assertEqual(str(date), \"FROM", "datestr) self.assertIsInstance(date, klass) self.assertEqual(date.kind, typeEnum) self.assertEqual(str(date), fmt + \" \"", "return (\"simple\", date.date) def visitPeriod(self, date): if not isinstance(date, DateValuePeriod):", "\"MAR\")) self.assertEqual(date.date2, GregorianDate(2000, \"APR\", 1)) self.assertEqual(str(date), \"FROM MAR 1920 TO", "HebrewDate(5000) self.assertEqual(str(date), \"@#DHEBREW@ 5000\") date = FrenchDate(1, \"VEND\", 1) self.assertEqual(str(date),", "def visitCalculated(self, date): if not isinstance(date, DateValueCalculated): raise TypeError(str(type(date))) return", "JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"AFT 1 JAN 2000\"))", "self.assertEqual(date.month_num, 1) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.JULIAN) def test_002_cal_date_key(self): \"\"\"Test date.CalendarDate class.\"\"\"", "(1638959.5, 1)) date = HebrewDate(2017, \"TSH\", 22) self.assertEqual(date.key(), (1084542.5, 0))", "self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num, 10) self.assertEqual(date.day, 9) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date =", "> GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 2) >= GregorianDate(2017, \"JAN\",", "\"JAN\", 1) < GregorianDate(2017, \"FEB\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) <", "1920 to 1 apr 2000\") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1,", "self.assertEqual(date.month_num, 10) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = GregorianDate(1699, \"FEB\", dual_year=1700)", "\"MAR\") self.assertEqual(date.month_num, 3) self.assertEqual(date.day, 10) self.assertEqual(date.original, \"@#DGREGORIAN@ 10 MAR 1698/99\")", "== JulianDate(1582, \"OCT\", 5)) self.assertTrue(GregorianDate(1582, \"OCT\", 16) > JulianDate(1582, \"OCT\",", "mar 1920 to 1 apr 2000\") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD)", "= [ (\"ABT\", \"ABOUT\", DateValueAbout, DateValueTypes.ABOUT), (\"CAL\", \"CALCULATED\", DateValueCalculated, DateValueTypes.CALCULATED),", "class.\"\"\" date = DateValue.parse(\"1967 B.C.\") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date,", "self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"FROM 1 JAN 2000\")) # comparing", "\"some phrase\") date = DateValue.parse(\"INT 1967 B.C. (some phrase)\") self.assertIsInstance(date,", "isinstance(date, DateValueFrom): raise TypeError(str(type(date))) return (\"from\", date.date) def visitTo(self, date):", "tuple) self.assertEqual(dv.key(), (GregorianDate(2016), GregorianDate(2016))) dv = DateValue.parse(\"31 DEC 2000\") self.assertIsInstance(dv.key(),", "1)) # missing month compares as \"past\" the last day", "JAN 2016\") > DateValue.parse(\"1 JAN 2016\")) self.assertTrue(DateValue.parse(\"BET 1900 AND 2000\")", "JAN 2017\": GregorianDate(2017, \"JAN\", 31)} approx = [ (\"ABT\", \"ABOUT\",", "DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), \"FROM 1920", "self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"1", "self.assertFalse(date.bc) self.assertEqual(date.month, \"NSN\") self.assertEqual(date.month_num, 8) self.assertEqual(date.day, 7) self.assertEqual(date.original, \"@#DHEBREW@ 7", "15) == JulianDate(1582, \"OCT\", 5)) self.assertTrue(GregorianDate(1582, \"OCT\", 16) > JulianDate(1582,", "22)) self.assertTrue(GregorianDate(2020, \"SEP\", 21) == FrenchDate(228, \"COMP\", 5)) # compare", "self.assertEqual(hash(dv1), hash(dv2)) dv1 = DateValue.parse(\"31 DEC 2000\") dv2 = DateValue.parse(\"31", "self.assertEqual(str(date), \"1967 B.C.\") date = DateValue.parse(\"@#DGREGORIAN@ 1 JAN 2017\") self.assertIsInstance(date,", "self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, JulianDate(1600)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), \"BETWEEN", "the last day of month, but before next month self.assertTrue(GregorianDate(2017,", "2000\") date = DateValue.parse(\"bet mar 1920 and apr 2000\") self.assertIsInstance(date,", "> DateValue.parse(\"BET 1 JAN 1999 AND 1 JAN 2000\")) self.assertNotEqual(DateValue.parse(\"1", "= DateValueSimple(date1).accept(visitor) self.assertEqual(value, (\"simple\", date1)) value = DateValueFrom(date1).accept(visitor) self.assertEqual(value, (\"from\",", "value = date.accept(visitor) self.assertEqual(value, (\"hebrew\", date)) date = FrenchDate(1, \"VEND\",", "1)) self.assertTrue(GregorianDate(2017, \"JAN\", 2) >= GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\",", "\"SEP\", 22)) self.assertTrue(GregorianDate(2020, \"SEP\", 21) == FrenchDate(228, \"COMP\", 5)) #", "1 VEND 1\") date = JulianDate(1582, \"OCT\", 5) self.assertEqual(str(date), \"@#DJULIAN@", "1) < GregorianDate(2017, \"FEB\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) < GregorianDate(2017,", "GregorianDate(2017, \"OCT\", 9) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"2017\") self.assertEqual(date.month,", "def test_015_date_parse_phrase(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"(some phrase)\") self.assertIsInstance(date,", "2000\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2000, \"DEC\", 31)))", "self.assertTrue(FrenchDate(1, \"VEND\", 2) > GregorianDate(1792, \"SEP\", 22)) self.assertTrue(GregorianDate(2020, \"SEP\", 21)", "isinstance(date, DateValuePeriod): raise TypeError(str(type(date))) return (\"period\", date.date1, date.date2) def visitFrom(self,", "1699/00\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700) self.assertEqual(date.original, \"10 MAR", "FrenchDate(1, \"VEND\", 1)) self.assertTrue(FrenchDate(1, \"VEND\", 2) > GregorianDate(1792, \"SEP\", 22))", "2000\") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2, GregorianDate(2000,", "= FrenchDate(1, \"VEND\", 1) self.assertEqual(str(date), \"@#DFRENCH R@ 1 VEND 1\")", "bc=True)), hash(GregorianDate(2017, \"OCT\", 9, bc=True))) self.assertEqual(hash(FrenchDate(1, \"VEND\", 1)), hash(FrenchDate(1, \"VEND\",", "always later than any regular date self.assertTrue(DateValue.parse(\"(Could be 1996 or", "value = DateValueAbout(date1).accept(visitor) self.assertEqual(value, (\"about\", date1)) value = DateValueCalculated(date1).accept(visitor) self.assertEqual(value,", "self.assertFalse(date.bc) self.assertEqual(date.month, \"MAR\") self.assertEqual(date.month_num, 3) self.assertEqual(date.day, 10) self.assertEqual(date.original, \"@#DGREGORIAN@ 10", "= DateValue.parse(\"31 DEC 2000\") dv2 = DateValue.parse(\"31 DEC 2000\") self.assertEqual(hash(dv1),", "self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"not a date\") self.assertEqual(str(date), \"(not", "1) value = date.accept(visitor) self.assertEqual(value, (\"french\", date)) date = JulianDate(1582,", "self.assertIsInstance(date, DateValueTo) self.assertEqual(date.kind, DateValueTypes.TO) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"TO", "2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"FROM 1 JAN 2000\")) #", "self.assertEqual(date.calendar, CalendarType.JULIAN) def test_002_cal_date_key(self): \"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017,", "DateValue.parse(\"2016\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2016), GregorianDate(2016))) dv = DateValue.parse(\"31 DEC", "GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"TO 1 JAN 2017\") date =", "DateValueCalculated(date1).accept(visitor) self.assertEqual(value, (\"calculated\", date1)) value = DateValueEstimated(date1).accept(visitor) self.assertEqual(value, (\"estimated\", date1))", "2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"FROM 1 JAN 2000 TO", "= HebrewDate(5000) value = date.accept(visitor) self.assertEqual(value, (\"hebrew\", date)) date =", "= DateValue.parse(\"bet mar 1920 and apr 2000\") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind,", "hash(FrenchDate(1))) def test_010_date_no_date(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"not a", "= DateValue.parse(\"FROM 1967\") self.assertIsInstance(date, DateValueFrom) self.assertEqual(date.kind, DateValueTypes.FROM) self.assertEqual(date.date, GregorianDate(1967)) self.assertEqual(str(date),", "comparing simple date with range self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"BET", "if not isinstance(date, DateValuePhrase): raise TypeError(str(type(date))) return (\"phrase\", date.phrase) class", "self.assertEqual(value, (\"simple\", date1)) value = DateValueFrom(date1).accept(visitor) self.assertEqual(value, (\"from\", date1)) value", "(\"range\", date.date1, date.date2) def visitBefore(self, date): if not isinstance(date, DateValueBefore):", "DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange,", "ged4py.date import ( DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom,", "= CalendarDate.parse(\"@#DJULIAN@ 2020/21\") # cannot parse nonsense with self.assertRaises(ValueError): date", "= DateValue.parse(\"31 DEC 2000\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31),", "1999 AND 1 JAN 2000\")) self.assertNotEqual(DateValue.parse(\"1 JAN 2000\"), DateValue.parse(\"BET 1", "value in dates.items(): date = DateValue.parse(appr + \" \" +", "0001\") self.assertEqual(date.calendar, CalendarType.FRENCH_R) date = CalendarDate.parse(\"@#DHEBREW@ 7 NSN 5000\") self.assertIsInstance(date,", "TypeError(str(type(date))) return (\"french\", date) def visitSimple(self, date): if not isinstance(date,", "self.assertEqual(date.original, \"@#DJULIAN@ 100 B.C.\") self.assertEqual(date.calendar, CalendarType.JULIAN) date = CalendarDate.parse(\"@#DFRENCH R@", "date = CalendarDate.parse(\"@#DHEBREW@ 7 NSN 5000\") self.assertIsInstance(date, HebrewDate) self.assertEqual(date.year, 5000)", "self.assertEqual(date.key(), (2086672.5, 1)) def test_003_cal_date_cmp(self): \"\"\"Test date.CalendarDate class.\"\"\" self.assertTrue(GregorianDate(2016, \"JAN\",", "date = DateValue.parse(\"bet mar 1920 and apr 2000\") self.assertIsInstance(date, DateValueRange)", "# Less specific date compares later than more specific self.assertTrue(DateValue.parse(\"2000\")", "DateValue.parse(\"31 DEC 2000\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2000,", "compares as \"past\" the last day of year, but before", "DateValueSimple): raise TypeError(str(type(date))) return (\"simple\", date.date) def visitPeriod(self, date): if", "\"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(date.year, 2017)", "DEC 2000\")) # phrase is always later than any regular", "self.assertEqual(date.month, \"MAR\") self.assertEqual(date.month_num, 3) self.assertEqual(date.day, 10) self.assertEqual(date.original, \"@#DGREGORIAN@ 10 MAR", "dual_year=1700) self.assertEqual(date.key(), (2342003.5, 0)) date = FrenchDate(2017, \"VENT\", bc=True) self.assertEqual(date.key(),", "1 JAN 1999 AND 1 JAN 2000\")) self.assertNotEqual(DateValue.parse(\"1 JAN 2000\"),", "self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2000, \"JAN\", 1))) self.assertTrue(DateValue.parse(\"2016\") < DateValue.parse(\"2017\"))", "self.assertRaises(ValueError): date = CalendarDate.parse(\"start of time\") def test_006_cal_date_visitor(self): \"\"\"Test date.CalendarDate.accept", "GregorianDate(2000)) self.assertEqual(str(date), \"BETWEEN @#DJULIAN@ 1600 AND 2000\") date = DateValue.parse(\"bet", "date.phrase) def visitPhrase(self, date): if not isinstance(date, DateValuePhrase): raise TypeError(str(type(date)))", "\" \" + datestr) self.assertIsInstance(date, klass) self.assertEqual(date.kind, typeEnum) self.assertEqual(str(date), fmt", "1920 AND APR 2000\") def test_014_date_parse_approx(self): \"\"\"Test date.DateValue class.\"\"\" dates", "(\"about\", date.date) def visitCalculated(self, date): if not isinstance(date, DateValueCalculated): raise", "MAR 1920 AND APR 2000\") def test_014_date_parse_approx(self): \"\"\"Test date.DateValue class.\"\"\"", "if not isinstance(date, GregorianDate): raise TypeError(str(type(date))) return (\"gregorian\", date) def", "date2 = GregorianDate(2017, \"DEC\", 31) value = DateValueSimple(date1).accept(visitor) self.assertEqual(value, (\"simple\",", ") from ged4py.date import ( DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated,", "with range self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"BET 1 JAN 1999", "self.assertEqual(date.dual_year, 1700) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1699/00\") self.assertEqual(date.month, \"FEB\") self.assertEqual(date.month_num, 2) self.assertIsNone(date.day)", "JAN 2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") <", "\"JAN\", 1))) # order of dates is messed up dv", "1) self.assertEqual(date.calendar, CalendarType.FRENCH_R) date = JulianDate(5, \"JAN\", bc=True) self.assertEqual(date.year, 5)", "date.CalendarDate hash.\"\"\" self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9)), hash(GregorianDate(2017, \"OCT\", 9))) self.assertEqual(hash(GregorianDate(2017, \"OCT\",", "self.assertTrue(DateValue.parse(\"DEC 2000\") > DateValue.parse(\"31 DEC 2000\")) # phrase is always", "+ datestr) self.assertIsInstance(date, klass) self.assertEqual(date.kind, typeEnum) self.assertEqual(str(date), fmt + \"", "self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date), \"1967 B.C.\") date =", "\"FRUC\", 1) self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1\") self.assertEqual(date.month, \"FRUC\") self.assertEqual(date.month_num,", "dv1 = DateValue.parse(\"31 DEC 2000\") dv2 = DateValue.parse(\"31 DEC 2000\")", "if not isinstance(date, DateValueFrom): raise TypeError(str(type(date))) return (\"from\", date.date) def", "bc=True)) self.assertEqual(str(date), \"1967 B.C.\") date = DateValue.parse(\"@#DGREGORIAN@ 1 JAN 2017\")", "date) def visitJulian(self, date): if not isinstance(date, JulianDate): raise TypeError(str(type(date)))", "date.date) def visitCalculated(self, date): if not isinstance(date, DateValueCalculated): raise TypeError(str(type(date)))", "for datestr, value in dates.items(): date = DateValue.parse(appr + \"", "test_015_date_parse_phrase(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"(some phrase)\") self.assertIsInstance(date, DateValuePhrase)", "DateValuePeriod): raise TypeError(str(type(date))) return (\"period\", date.date1, date.date2) def visitFrom(self, date):", "def visitInterpreted(self, date): if not isinstance(date, DateValueInterpreted): raise TypeError(str(type(date))) return", "\"JAN\", 2) >= GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) ==", "self.assertTrue(DateValue.parse(\"FROM 1 JAN 1999 TO 1 JAN 2001\") < DateValue.parse(\"BET", "2) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = HebrewDate(5000) self.assertEqual(date.year, 5000) self.assertFalse(date.bc)", "\"OCT 2017 B.C.\") date = GregorianDate(1699, \"JAN\", 1, dual_year=1700) self.assertEqual(str(date),", "1) self.assertEqual(str(date), \"@#DFRENCH R@ 1 VEND 1\") date = JulianDate(1582,", "JulianDate(1582, \"OCT\", 5) self.assertEqual(str(date), \"@#DJULIAN@ 5 OCT 1582\") def test_005_cal_date_parse(self):", "day compares as \"past\" the last day of month, but", "raise TypeError(str(type(date))) return (\"phrase\", date.phrase) class TestDetailDate(unittest.TestCase): \"\"\"Tests for `ged4py.date`", "31)) self.assertTrue(GregorianDate(2017) < GregorianDate(2018, \"JAN\", 1)) # dual date self.assertTrue(GregorianDate(1700,", "self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"5000\") self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.HEBREW)", "1 JAN 2017\") self.assertIsInstance(date, DateValueAfter) self.assertEqual(date.kind, DateValueTypes.AFTER) self.assertEqual(date.date, GregorianDate(2017, \"JAN\",", "def test_006_cal_date_visitor(self): \"\"\"Test date.CalendarDate.accept method.\"\"\" visitor = TestDateVisitor() date =", "1) == HebrewDate(5780, \"SVN\", 4)) def test_004_cal_date_str(self): \"\"\"Test date.CalendarDate class.\"\"\"", "31)} approx = [ (\"ABT\", \"ABOUT\", DateValueAbout, DateValueTypes.ABOUT), (\"CAL\", \"CALCULATED\",", "> DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN 2001\")) #", "JulianDate(1582, \"OCT\", 5)) self.assertTrue(GregorianDate(1582, \"OCT\", 16) > JulianDate(1582, \"OCT\", 5))", "AND 1 JAN 2001\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31),", "self.assertTrue(GregorianDate(2017) < GregorianDate(2018, \"JAN\", 1)) # dual date self.assertTrue(GregorianDate(1700, \"JAN\",", "def test_002_cal_date_key(self): \"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\", 9)", "date.date) def visitTo(self, date): if not isinstance(date, DateValueTo): raise TypeError(str(type(date)))", "GregorianDate) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700) self.assertEqual(date.original, \"10 MAR 1699/00\") self.assertEqual(date.calendar,", "2017 (some phrase)\") def test_016_date_parse_simple(self): \"\"\"Test date.DateValue class.\"\"\" date =", "JAN 2017\") date = DateValue.parse(\"FROM 1920 TO 2000\") self.assertIsInstance(date, DateValuePeriod)", "self.assertEqual(date.month_num, 3) self.assertEqual(date.day, 10) self.assertEqual(date.original, \"@#DGREGORIAN@ 10 MAR 1698/99\") self.assertEqual(date.calendar,", "1700) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1699/00\") self.assertEqual(date.month, \"FEB\") self.assertEqual(date.month_num, 2) self.assertIsNone(date.day) self.assertEqual(date.calendar,", "TestDateVisitor() date1 = GregorianDate(2017, \"JAN\", 1) date2 = GregorianDate(2017, \"DEC\",", "1999\")) # comparing simple date with range self.assertTrue(DateValue.parse(\"1 JAN 2000\")", "return (\"after\", date.date) def visitAbout(self, date): if not isinstance(date, DateValueAbout):", "self.assertEqual(date.original, \"31 MAY 2020\") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse(\"@#DGREGORIAN@ 10", "DateValue.parse(\"FROM 1967\") self.assertIsInstance(date, DateValueFrom) self.assertEqual(date.kind, DateValueTypes.FROM) self.assertEqual(date.date, GregorianDate(1967)) self.assertEqual(str(date), \"FROM", "= DateValueAfter(date1).accept(visitor) self.assertEqual(value, (\"after\", date1)) value = DateValueRange(date1, date2).accept(visitor) self.assertEqual(value,", "> GregorianDate(2017, \"JAN\", 31)) self.assertTrue(GregorianDate(2017, \"JAN\") < GregorianDate(2017, \"FEB\", 1))", "CalendarDateVisitor ) from ged4py.date import ( DateValue, DateValueAbout, DateValueAfter, DateValueBefore,", "self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertIsNone(date.phrase) self.assertEqual(str(date), \"\") def test_019_date_value_visitor(self): \"\"\"Test date.DateValue class.\"\"\"", "= HebrewDate(5000) self.assertEqual(str(date), \"@#DHEBREW@ 5000\") date = FrenchDate(1, \"VEND\", 1)", "TypeError(str(type(date))) return (\"simple\", date.date) def visitPeriod(self, date): if not isinstance(date,", "Gregorian and French dates self.assertTrue(GregorianDate(1792, \"SEP\", 22) == FrenchDate(1, \"VEND\",", "2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"FROM 1 JAN 1999 TO", "value = DateValuePeriod(date1, date2).accept(visitor) self.assertEqual(value, (\"period\", date1, date2)) value =", "DateValue.parse(\"FROM 1920 TO 1999\")) # comparing simple date with range", "phrase is always later than any regular date self.assertTrue(DateValue.parse(\"(Could be", "DateValueBefore): raise TypeError(str(type(date))) return (\"before\", date.date) def visitAfter(self, date): if", "GregorianDate(1967, bc=True)) self.assertEqual(str(date), \"BEFORE 1967 B.C.\") date = DateValue.parse(\"AFT 1", "1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) < GregorianDate(2017, \"FEB\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\",", "2)) # missing day compares as \"past\" the last day", "self.assertFalse(date.bc) self.assertEqual(date.year_str, \"2017\") self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num, 10) self.assertEqual(date.day, 9) self.assertEqual(date.calendar,", "but before next year self.assertTrue(GregorianDate(2017) > GregorianDate(2017, \"DEC\", 31)) self.assertTrue(GregorianDate(2017)", "CalendarType.GREGORIAN) date = CalendarDate.parse(\"10 MAR 1699/00\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1699)", "later than any regular date self.assertTrue(DateValue.parse(\"(Could be 1996 or 1998)\")", "1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) < GregorianDate(2017, \"JAN\", 2)) self.assertTrue(GregorianDate(2017, \"JAN\",", "AND 2000\") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, JulianDate(1600)) self.assertEqual(date.date2, GregorianDate(2000))", "raise TypeError(str(type(date))) return (\"about\", date.date) def visitCalculated(self, date): if not", "GregorianDate(1967, bc=True)) self.assertEqual(str(date), \"1967 B.C.\") date = DateValue.parse(\"@#DGREGORIAN@ 1 JAN", "date = DateValue.parse(appr + \" \" + datestr) self.assertIsInstance(date, klass)", "self.assertTrue(DateValue.parse(\"BET 1900 AND 2000\") < DateValue.parse(\"FROM 1920 TO 1999\")) #", "JAN 2000 AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"FROM 1 JAN 2000", "def visitFrench(self, date): if not isinstance(date, FrenchDate): raise TypeError(str(type(date))) return", "1 JAN 2001\")) self.assertTrue(DateValue.parse(\"FROM 1 JAN 1999 TO 1 JAN", "AND 1 JAN 2000\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31),", "date): if not isinstance(date, DateValuePeriod): raise TypeError(str(type(date))) return (\"period\", date.date1,", "self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date), \"1967 B.C.\") date = DateValue.parse(\"@#DGREGORIAN@ 1", "DateValueAfter(date1).accept(visitor) self.assertEqual(value, (\"after\", date1)) value = DateValueRange(date1, date2).accept(visitor) self.assertEqual(value, (\"range\",", "!= GregorianDate(2017, \"JAN\", 2)) # missing day compares as \"past\"", "= DateValue.parse(\"TO 1 JAN 2017\") self.assertIsInstance(date, DateValueTo) self.assertEqual(date.kind, DateValueTypes.TO) self.assertEqual(date.date,", "self.assertEqual(value, (\"before\", date1)) value = DateValueAfter(date1).accept(visitor) self.assertEqual(value, (\"after\", date1)) value", "\"OCT\", 5)) self.assertTrue(JulianDate(1582, \"OCT\", 6) > GregorianDate(1582, \"OCT\", 15)) self.assertTrue(GregorianDate(2000,", "raise TypeError(str(type(date))) return (\"period\", date.date1, date.date2) def visitFrom(self, date): if", "2017) self.assertIsNone(date.dual_year) self.assertTrue(date.bc) self.assertEqual(date.year_str, \"2017 B.C.\") self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num, 10)", "not isinstance(date, DateValuePeriod): raise TypeError(str(type(date))) return (\"period\", date.date1, date.date2) def", "AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"BEF 1", "to 1 apr 2000\") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920,", "\"APR\")) self.assertEqual(str(date), \"BETWEEN MAR 1920 AND APR 2000\") def test_014_date_parse_approx(self):", "100) self.assertTrue(date.bc) self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.original, \"@#DJULIAN@ 100 B.C.\") self.assertEqual(date.calendar,", "isinstance(date, DateValueBefore): raise TypeError(str(type(date))) return (\"before\", date.date) def visitAfter(self, date):", "31), GregorianDate(2001, \"JAN\", 1))) # order of dates is messed", "\"OCT\", 5) self.assertEqual(str(date), \"@#DJULIAN@ 5 OCT 1582\") def test_005_cal_date_parse(self): \"\"\"Test", "FrenchDate(2017, \"VENT\", bc=True) self.assertEqual(date.key(), (1638959.5, 1)) date = HebrewDate(2017, \"TSH\",", "HebrewDate(5780, \"SVN\", 4)) def test_004_cal_date_str(self): \"\"\"Test date.CalendarDate class.\"\"\" date =", "\"9 OCT 2017\") date = GregorianDate(2017, \"OCT\", bc=True) self.assertEqual(str(date), \"OCT", "1698) self.assertEqual(date.dual_year, 1699) self.assertFalse(date.bc) self.assertEqual(date.month, \"MAR\") self.assertEqual(date.month_num, 3) self.assertEqual(date.day, 10)", "1967 B.C.\") date = DateValue.parse(\"AFT 1 JAN 2017\") self.assertIsInstance(date, DateValueAfter)", "self.assertIsInstance(date, JulianDate) self.assertEqual(date.year, 100) self.assertTrue(date.bc) self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.original, \"@#DJULIAN@", "self.assertEqual(date.date2, GregorianDate(2000, \"APR\", 1)) self.assertEqual(str(date), \"FROM MAR 1920 TO 1", "\"OCT\") self.assertEqual(date.month_num, 10) self.assertEqual(date.day, 9) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = GregorianDate(2017,", "return (\"calculated\", date.date) def visitEstimated(self, date): if not isinstance(date, DateValueEstimated):", "bc=True), \"JAN 2017\": GregorianDate(2017, \"JAN\"), \"31 JAN 2017\": GregorianDate(2017, \"JAN\",", "8) self.assertEqual(date.day, 7) self.assertEqual(date.original, \"@#DHEBREW@ 7 NSN 5000\") self.assertEqual(date.calendar, CalendarType.HEBREW)", "= GregorianDate(1699, \"JAN\", 1, dual_year=1700) self.assertEqual(str(date), \"1 JAN 1699/00\") date", "self.assertTrue(GregorianDate(1792, \"SEP\", 23) > FrenchDate(1, \"VEND\", 1)) self.assertTrue(FrenchDate(1, \"VEND\", 2)", "MAR 1920 TO 1 APR 2000\") def test_013_date_parse_range(self): \"\"\"Test date.DateValue", "DateValueAbout, DateValueTypes.ABOUT), (\"CAL\", \"CALCULATED\", DateValueCalculated, DateValueTypes.CALCULATED), (\"EST\", \"ESTIMATED\", DateValueEstimated, DateValueTypes.ESTIMATED)", "date.date) def visitPeriod(self, date): if not isinstance(date, DateValuePeriod): raise TypeError(str(type(date)))", "self.assertEqual(date.day, 7) self.assertEqual(date.original, \"@#DHEBREW@ 7 NSN 5000\") self.assertEqual(date.calendar, CalendarType.HEBREW) #", "self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"BET 1 JAN 2000 AND 1", "= FrenchDate(1, \"VEND\", 1) value = date.accept(visitor) self.assertEqual(value, (\"french\", date))", "return (\"range\", date.date1, date.date2) def visitBefore(self, date): if not isinstance(date,", "date1, date2)) value = DateValueAbout(date1).accept(visitor) self.assertEqual(value, (\"about\", date1)) value =", "dates self.assertTrue(GregorianDate(1582, \"OCT\", 15) == JulianDate(1582, \"OCT\", 5)) self.assertTrue(GregorianDate(1582, \"OCT\",", "B.C.\": GregorianDate(500, bc=True), \"JAN 2017\": GregorianDate(2017, \"JAN\"), \"31 JAN 2017\":", "1)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) == GregorianDate(2017, \"JAN\", 1)) self.assertTrue(GregorianDate(2017, \"JAN\",", "1967\") date = DateValue.parse(\"TO 1 JAN 2017\") self.assertIsInstance(date, DateValueTo) self.assertEqual(date.kind,", "of year, but before next year self.assertTrue(GregorianDate(2017) > GregorianDate(2017, \"DEC\",", "MAR 1699/00\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700) self.assertEqual(date.original, \"10", "+ datestr) self.assertEqual(date.date, value) def test_015_date_parse_phrase(self): \"\"\"Test date.DateValue class.\"\"\" date", "2000 TO 1 JAN 2001\"), DateValue.parse(\"BET 1 JAN 2000 AND", "\"FROM 1920 TO 2000\") date = DateValue.parse(\"from mar 1920 to", "GregorianDate(2017, \"JAN\"), \"31 JAN 2017\": GregorianDate(2017, \"JAN\", 31)} approx =", "DateValue.parse(\"TO 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"AFT 1", "\"@#DJULIAN@ 100 B.C.\") self.assertEqual(date.calendar, CalendarType.JULIAN) date = CalendarDate.parse(\"@#DFRENCH R@ 15", "1 JAN 2001\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, \"DEC\", 31), GregorianDate(2001,", "DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date), \"1967 B.C.\") date = DateValue.parse(\"@#DGREGORIAN@", "class TestDateVisitor(CalendarDateVisitor, DateValueVisitor): def visitGregorian(self, date): if not isinstance(date, GregorianDate):", "compares as \"past\" the last day of month, but before", "specific date compares later than more specific self.assertTrue(DateValue.parse(\"2000\") > DateValue.parse(\"31", "if not isinstance(date, DateValueRange): raise TypeError(str(type(date))) return (\"range\", date.date1, date.date2)", "\"FROM 1967\") date = DateValue.parse(\"TO 1 JAN 2017\") self.assertIsInstance(date, DateValueTo)", "2001\")) self.assertTrue(DateValue.parse(\"FROM 1 JAN 2000 TO 1 JAN 2002\") >", "cannot handle ROMAN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DROMAN@ 2020\") #", "DateValueTypes.BEFORE) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date), \"BEFORE 1967 B.C.\") date =", "approx: for datestr, value in dates.items(): date = DateValue.parse(appr +", "date.date) def visitEstimated(self, date): if not isinstance(date, DateValueEstimated): raise TypeError(str(type(date)))", "1)) self.assertEqual(str(date), \"FROM MAR 1920 TO 1 APR 2000\") def", "date = GregorianDate(1699, \"FEB\", dual_year=1700) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700) self.assertFalse(date.bc)", "\"OCT\", 9) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"2017\") self.assertEqual(date.month, \"OCT\")", "DateValuePhrase): raise TypeError(str(type(date))) return (\"phrase\", date.phrase) class TestDetailDate(unittest.TestCase): \"\"\"Tests for", "GregorianDate(2017, \"JAN\", 31)) self.assertTrue(GregorianDate(2017, \"JAN\") < GregorianDate(2017, \"FEB\", 1)) #", "> DateValue.parse(\"31 DEC 2000\")) # phrase is always later than", "5000) self.assertFalse(date.bc) self.assertEqual(date.month, \"NSN\") self.assertEqual(date.month_num, 8) self.assertEqual(date.day, 7) self.assertEqual(date.original, \"@#DHEBREW@", "import ( DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted,", "visitFrench(self, date): if not isinstance(date, FrenchDate): raise TypeError(str(type(date))) return (\"french\",", "DateValuePhrase(\"phrase\").accept(visitor) self.assertEqual(value, (\"phrase\", \"phrase\")) def test_020_date_hash(self): \"\"\"Test date.Date hash\"\"\" dv1", "self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = GregorianDate(1699, \"FEB\", dual_year=1700) self.assertEqual(date.year, 1699)", "\"VEND\", 1)), hash(FrenchDate(1, \"VEND\", 1))) self.assertEqual(hash(FrenchDate(1)), hash(FrenchDate(1))) def test_010_date_no_date(self): \"\"\"Test", "test_001_cal_date(self): \"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(date.year,", "2000 AND 1 JAN 2001\")) # Less specific date compares", "as \"past\" the last day of month, but before next", "date = JulianDate(1582, \"OCT\", 5) value = date.accept(visitor) self.assertEqual(value, (\"julian\",", "date = DateValue.parse(\"BET @#DJULIAN@ 1600 AND 2000\") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind,", "(\"hebrew\", date)) date = FrenchDate(1, \"VEND\", 1) value = date.accept(visitor)", "\"OCT\", 9)), hash(GregorianDate(2017, \"OCT\", 9))) self.assertEqual(hash(GregorianDate(2017, \"OCT\", 9, bc=True)), hash(GregorianDate(2017,", "def test_010_date_no_date(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"not a date\")", "bc=True) self.assertEqual(date.year, 5) self.assertTrue(date.bc) self.assertEqual(date.year_str, \"5 B.C.\") self.assertEqual(date.month, \"JAN\") self.assertEqual(date.month_num,", "= GregorianDate(1699, \"FEB\", 1, dual_year=1700) self.assertEqual(date.key(), (2342003.5, 0)) date =", "date = CalendarDate.parse(\"@#DROMAN@ 2020\") # cannot handle UNKNOWN with self.assertRaises(ValueError):", "HebrewDate): raise TypeError(str(type(date))) return (\"hebrew\", date) def visitFrench(self, date): if", "1)) # compare Gregorian and French dates self.assertTrue(GregorianDate(1792, \"SEP\", 22)", "value = DateValueInterpreted(date1, \"phrase\").accept(visitor) self.assertEqual(value, (\"interpreted\", date1, \"phrase\")) value =", "= DateValue.parse(\"AFT 1 JAN 2017\") self.assertIsInstance(date, DateValueAfter) self.assertEqual(date.kind, DateValueTypes.AFTER) self.assertEqual(date.date,", "AND 1 JAN 2001\")) self.assertTrue(DateValue.parse(\"FROM 1 JAN 1999 TO 1", "self.assertEqual(value, (\"about\", date1)) value = DateValueCalculated(date1).accept(visitor) self.assertEqual(value, (\"calculated\", date1)) value", "def test_012_date_parse_period(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"FROM 1967\") self.assertIsInstance(date,", "\"\"\"Test date.DateValue class.\"\"\" visitor = TestDateVisitor() date1 = GregorianDate(2017, \"JAN\",", "date = JulianDate(1000) self.assertEqual(date.key(), (2086672.5, 1)) def test_003_cal_date_cmp(self): \"\"\"Test date.CalendarDate", "\"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"not a date\") self.assertIsInstance(date, DateValuePhrase)", "self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date), \"BEFORE 1967 B.C.\") date = DateValue.parse(\"AFT", "(\"about\", date1)) value = DateValueCalculated(date1).accept(visitor) self.assertEqual(value, (\"calculated\", date1)) value =", "\"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017, \"OCT\", 9) self.assertEqual(date.key(), (2458035.5,", "JAN 2000\") > DateValue.parse(\"TO 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\")", "date): if not isinstance(date, DateValueAfter): raise TypeError(str(type(date))) return (\"after\", date.date)", "DateValueInterpreted): raise TypeError(str(type(date))) return (\"interpreted\", date.date, date.phrase) def visitPhrase(self, date):", "self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"TO 1 JAN 2017\") date", "DateValue.parse(\"bet mar 1920 and apr 2000\") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE)", "GERM 0001\") self.assertEqual(date.calendar, CalendarType.FRENCH_R) date = CalendarDate.parse(\"@#DHEBREW@ 7 NSN 5000\")", "1699) self.assertEqual(date.dual_year, 1700) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1699/00\") self.assertEqual(date.month, \"FEB\") self.assertEqual(date.month_num, 2)", "\"FROM MAR 1920 TO 1 APR 2000\") def test_013_date_parse_range(self): \"\"\"Test", "self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1\") self.assertEqual(date.month, \"FRUC\") self.assertEqual(date.month_num, 12) self.assertEqual(date.day, 1) self.assertEqual(date.calendar,", "class.\"\"\" dv = DateValue.parse(\"2016\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2016), GregorianDate(2016))) dv", "self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), \"FROM 1920 TO 2000\") date = DateValue.parse(\"from", "DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertIsNone(date.phrase) self.assertEqual(str(date), \"\") def test_019_date_value_visitor(self): \"\"\"Test date.DateValue", "\"OCT\", 9) self.assertEqual(date.key(), (2458035.5, 0)) date = GregorianDate(1699, \"FEB\", 1,", "approx = [ (\"ABT\", \"ABOUT\", DateValueAbout, DateValueTypes.ABOUT), (\"CAL\", \"CALCULATED\", DateValueCalculated,", "= GregorianDate(2017, \"OCT\", bc=True) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertTrue(date.bc) self.assertEqual(date.year_str, \"2017", "\"2017 B.C.\") self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num, 10) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date", "date.DateValue class.\"\"\" date = DateValue.parse(\"FROM 1967\") self.assertIsInstance(date, DateValueFrom) self.assertEqual(date.kind, DateValueTypes.FROM)", "\"JAN\", 1)) self.assertEqual(date.phrase, \"some phrase\") self.assertEqual(str(date), \"INTERPRETED 1 JAN 2017", "\"past\" the last day of month, but before next month", "CalendarDate.parse(\"start of time\") def test_006_cal_date_visitor(self): \"\"\"Test date.CalendarDate.accept method.\"\"\" visitor =", "self.assertEqual(date.kind, DateValueTypes.AFTER) self.assertEqual(date.date, GregorianDate(2017, \"JAN\", 1)) self.assertEqual(str(date), \"AFTER 1 JAN", "2000\") > DateValue.parse(\"31 DEC 2000\")) # phrase is always later", "date = DateValue.parse(value) self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertIsNone(date.phrase) self.assertEqual(str(date), \"\")", "visitEstimated(self, date): if not isinstance(date, DateValueEstimated): raise TypeError(str(type(date))) return (\"estimated\",", "compare Gregorian and French dates self.assertTrue(GregorianDate(1792, \"SEP\", 22) == FrenchDate(1,", "GregorianDate(2000)) self.assertEqual(str(date), \"FROM 1920 TO 2000\") date = DateValue.parse(\"from mar", "return (\"period\", date.date1, date.date2) def visitFrom(self, date): if not isinstance(date,", "self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.JULIAN) def test_002_cal_date_key(self): \"\"\"Test date.CalendarDate class.\"\"\" date =", "date.date, date.phrase) def visitPhrase(self, date): if not isinstance(date, DateValuePhrase): raise", "2001\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"BEF 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1", "test_017_date_cmp(self): \"\"\"Test date.Date class.\"\"\" dv = DateValue.parse(\"2016\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(),", "JAN 2000 TO 1 JAN 2002\") > DateValue.parse(\"BET 1 JAN", "ROMAN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DROMAN@ 2020\") # cannot handle", "but before next month self.assertTrue(GregorianDate(2017, \"JAN\") > GregorianDate(2017, \"JAN\", 31))", "self.assertTrue(DateValue.parse(\"2 JAN 2016\") > DateValue.parse(\"1 JAN 2016\")) self.assertTrue(DateValue.parse(\"BET 1900 AND", "@#DGREGORIAN@ 1 JAN 2017 (some phrase)\") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED)", "= DateValueEstimated(date1).accept(visitor) self.assertEqual(value, (\"estimated\", date1)) value = DateValueInterpreted(date1, \"phrase\").accept(visitor) self.assertEqual(value,", "self.assertTrue(DateValue.parse(\"1 JAN 2000\") > DateValue.parse(\"BEF 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN", "2001\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"BET 1 JAN 2000 AND", "JAN 2017 (some phrase)\") def test_016_date_parse_simple(self): \"\"\"Test date.DateValue class.\"\"\" date", "DateValueTypes.PHRASE) self.assertEqual(date.phrase, \"not a date\") self.assertEqual(str(date), \"(not a date)\") def", "self.assertEqual(value, (\"from\", date1)) value = DateValueTo(date1).accept(visitor) self.assertEqual(value, (\"to\", date1)) value", "self.assertEqual(date.month_num, 2) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = HebrewDate(5000) self.assertEqual(date.year, 5000)", "1 JAN 2000 TO 1 JAN 2002\") > DateValue.parse(\"BET 1", "self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = GregorianDate(1699, \"FEB\", dual_year=1700) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year,", "\"SVN\", 4)) def test_004_cal_date_str(self): \"\"\"Test date.CalendarDate class.\"\"\" date = GregorianDate(2017,", "10 MAR 1698/99\") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1698) self.assertEqual(date.dual_year, 1699) self.assertFalse(date.bc)", "self.assertEqual(date.original, \"@#DHEBREW@ 7 NSN 5000\") self.assertEqual(date.calendar, CalendarType.HEBREW) # cannot handle", "JAN 2001\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") < DateValue.parse(\"BET 1 JAN 2000", "1) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1\") self.assertEqual(date.month, \"FRUC\") self.assertEqual(date.month_num, 12) self.assertEqual(date.day, 1)", "with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DJULIAN@ 2020/21\") # cannot parse nonsense", "self.assertEqual(str(date), \"AFTER 1 JAN 2017\") date = DateValue.parse(\"BET @#DJULIAN@ 1600", "DateValue.parse(\"31 DEC 2000\") self.assertEqual(hash(dv1), hash(dv2)) dv1 = DateValue.parse(\"BET 31 DEC", "(\"EST\", \"ESTIMATED\", DateValueEstimated, DateValueTypes.ESTIMATED) ] for appr, fmt, klass, typeEnum", "test_013_date_parse_range(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"BEF 1967B.C.\") self.assertIsInstance(date, DateValueBefore)", "(GregorianDate(2000, \"DEC\", 31), GregorianDate(2000, \"JAN\", 1))) self.assertTrue(DateValue.parse(\"2016\") < DateValue.parse(\"2017\")) self.assertTrue(DateValue.parse(\"2", "date.DateValue class.\"\"\" date = DateValue.parse(\"1967 B.C.\") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE)", "and Julian dates self.assertTrue(GregorianDate(1582, \"OCT\", 15) == JulianDate(1582, \"OCT\", 5))", "JAN 2001\")) # Less specific date compares later than more", "9, bc=True)), hash(GregorianDate(2017, \"OCT\", 9, bc=True))) self.assertEqual(hash(FrenchDate(1, \"VEND\", 1)), hash(FrenchDate(1,", "1967 B.C. (some phrase)\") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(1967,", "CalendarType.JULIAN) date = CalendarDate.parse(\"@#DFRENCH R@ 15 GERM 0001\") self.assertIsInstance(date, FrenchDate)", "date): if not isinstance(date, DateValueFrom): raise TypeError(str(type(date))) return (\"from\", date.date)", "B.C.\") date = GregorianDate(1699, \"JAN\", 1, dual_year=1700) self.assertEqual(str(date), \"1 JAN", "2000\")) # comparing ranges self.assertEqual(DateValue.parse(\"FROM 1 JAN 2000 TO 1", "self.assertEqual(str(date), \"OCT 2017 B.C.\") date = GregorianDate(1699, \"JAN\", 1, dual_year=1700)", "( CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor ) from", "later than more specific self.assertTrue(DateValue.parse(\"2000\") > DateValue.parse(\"31 DEC 2000\")) self.assertTrue(DateValue.parse(\"DEC", "1920 TO 1 APR 2000\") def test_013_date_parse_range(self): \"\"\"Test date.DateValue class.\"\"\"", "date)) date = JulianDate(1582, \"OCT\", 5) value = date.accept(visitor) self.assertEqual(value,", "def test_014_date_parse_approx(self): \"\"\"Test date.DateValue class.\"\"\" dates = {\"500 B.C.\": GregorianDate(500,", "TypeError(str(type(date))) return (\"from\", date.date) def visitTo(self, date): if not isinstance(date,", "self.assertTrue(DateValue.parse(\"2000\") > DateValue.parse(\"31 DEC 2000\")) self.assertTrue(DateValue.parse(\"DEC 2000\") > DateValue.parse(\"31 DEC", "2017\") date = GregorianDate(2017, \"OCT\", bc=True) self.assertEqual(str(date), \"OCT 2017 B.C.\")", "DateValue.parse(\"from mar 1920 to 1 apr 2000\") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind,", "date): if not isinstance(date, DateValueTo): raise TypeError(str(type(date))) return (\"to\", date.date)", "< GregorianDate(2017, \"JAN\", 2)) self.assertTrue(GregorianDate(2017, \"JAN\", 1) <= GregorianDate(2017, \"JAN\",", "\"VEND\", 1)) self.assertTrue(FrenchDate(1, \"VEND\", 2) > GregorianDate(1792, \"SEP\", 22)) self.assertTrue(GregorianDate(2020,", "\"NSN\") self.assertEqual(date.month_num, 8) self.assertEqual(date.day, 7) self.assertEqual(date.original, \"@#DHEBREW@ 7 NSN 5000\")", "DateValueAbout): raise TypeError(str(type(date))) return (\"about\", date.date) def visitCalculated(self, date): if", "\"JAN\") < GregorianDate(2017, \"FEB\", 1)) # missing month compares as", "31 DEC 2000 AND 1 JAN 2001\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(),", "2001\") dv2 = DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN", "2000\") > DateValue.parse(\"BEF 1 JAN 2000\")) self.assertTrue(DateValue.parse(\"1 JAN 2000\") >", "def visitFrom(self, date): if not isinstance(date, DateValueFrom): raise TypeError(str(type(date))) return", "date1)) value = DateValuePeriod(date1, date2).accept(visitor) self.assertEqual(value, (\"period\", date1, date2)) value", "self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"1699/00\") self.assertEqual(date.month, \"FEB\") self.assertEqual(date.month_num,", "date.DateValue class.\"\"\" dates = {\"500 B.C.\": GregorianDate(500, bc=True), \"JAN 2017\":", "(\"interpreted\", date1, \"phrase\")) value = DateValuePhrase(\"phrase\").accept(visitor) self.assertEqual(value, (\"phrase\", \"phrase\")) def", "\"OCT\", 15)) self.assertTrue(GregorianDate(2000, \"JAN\", 14) == JulianDate(2000, \"JAN\", 1)) #", "visitPhrase(self, date): if not isinstance(date, DateValuePhrase): raise TypeError(str(type(date))) return (\"phrase\",", "= DateValue.parse(\"1967 B.C.\") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(1967, bc=True))", "\"\"\"Test date.Date class.\"\"\" dv = DateValue.parse(\"2016\") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2016),", "DateValue.parse(\"BET 1 JAN 2000 AND 1 JAN 2001\")) # Less", "\"JAN\", 31)) self.assertTrue(GregorianDate(2017, \"JAN\") < GregorianDate(2017, \"FEB\", 1)) # missing", "9) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.year_str, \"2017\") self.assertEqual(date.month, \"OCT\") self.assertEqual(date.month_num,", "dv2 = DateValue.parse(\"BET 31 DEC 2000 AND 1 JAN 2001\")", "= FrenchDate(2017, \"VENT\", bc=True) self.assertEqual(date.key(), (1638959.5, 1)) date = HebrewDate(2017,", "\"ESTIMATED\", DateValueEstimated, DateValueTypes.ESTIMATED) ] for appr, fmt, klass, typeEnum in", "5) value = date.accept(visitor) self.assertEqual(value, (\"julian\", date)) def test_007_cal_date_hash(self): \"\"\"Test", "for `ged4py.date` module.\"\"\" import unittest from ged4py.calendar import ( CalendarType,", "test_003_cal_date_cmp(self): \"\"\"Test date.CalendarDate class.\"\"\" self.assertTrue(GregorianDate(2016, \"JAN\", 1) < GregorianDate(2017, \"JAN\",", "value) def test_015_date_parse_phrase(self): \"\"\"Test date.DateValue class.\"\"\" date = DateValue.parse(\"(some phrase)\")", "1 apr 2000\") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920, \"MAR\"))", "raise TypeError(str(type(date))) return (\"julian\", date) def visitHebrew(self, date): if not", "as \"past\" the last day of year, but before next", "AND APR 2000\") def test_014_date_parse_approx(self): \"\"\"Test date.DateValue class.\"\"\" dates =", "# cannot parse nonsense with self.assertRaises(ValueError): date = CalendarDate.parse(\"start of", "DateValueFrom(date1).accept(visitor) self.assertEqual(value, (\"from\", date1)) value = DateValueTo(date1).accept(visitor) self.assertEqual(value, (\"to\", date1))", "\"COMP\", 5)) # compare Gregorian and Hebrew dates self.assertTrue(GregorianDate(2020, \"JAN\",", "JAN 2016\")) self.assertTrue(DateValue.parse(\"BET 1900 AND 2000\") < DateValue.parse(\"FROM 1920 TO", "JulianDate, CalendarDateVisitor ) from ged4py.date import ( DateValue, DateValueAbout, DateValueAfter,", "DateValue.parse(appr + \" \" + datestr) self.assertIsInstance(date, klass) self.assertEqual(date.kind, typeEnum)", "self.assertEqual(date.date1, GregorianDate(1920, \"MAR\")) self.assertEqual(date.date2, GregorianDate(2000, \"APR\", 1)) self.assertEqual(str(date), \"FROM MAR", "ranges self.assertEqual(DateValue.parse(\"FROM 1 JAN 2000 TO 1 JAN 2001\"), DateValue.parse(\"BET", "if not isinstance(date, JulianDate): raise TypeError(str(type(date))) return (\"julian\", date) def", "> DateValue.parse(\"31 DEC 2000\")) self.assertTrue(DateValue.parse(\"DEC 2000\") > DateValue.parse(\"31 DEC 2000\"))", "handle UNKNOWN with self.assertRaises(ValueError): date = CalendarDate.parse(\"@#DUNKNOWN@ 2020\") # dual" ]
[ "Any) -> Any: del self._get_current_object()[key] async def __aiter__(self) -> Any:", "try: return values[name] except KeyError: raise AttributeError(name) def __setattr__(self, name:", "o # noqa: E731 __le__ = lambda x, o: x._get_current_object()", "# noqa: E731 __lshift__ = lambda x, o: x._get_current_object() <<", "x, o: x._get_current_object().__divmod__(o) # noqa: E731 __pow__ = lambda x,", "o: x._get_current_object() >= o # noqa: E731 __hash__ = lambda", "object.__setattr__(self, \"_storage\", ContextVar(\"storage\")) def __getattr__(self, name: str) -> Any: values", "RuntimeError: raise AttributeError(\"__dict__\") def __repr__(self) -> str: try: obj =", "ignore # noqa: E731 __str__ = lambda x: str(x._get_current_object()) #", "E731 __lt__ = lambda x, o: x._get_current_object() < o #", "x, o: x._get_current_object().__rdivmod__(o) # noqa: E731 __copy__ = lambda x:", "lambda x, o: o - x._get_current_object() # noqa: E731 __rmul__", "= lambda x: abs(x._get_current_object()) # noqa: E731 __invert__ = lambda", "+ x._get_current_object() # noqa: E731 __rsub__ = lambda x, o:", "def __delitem__(self, key: Any) -> Any: del self._get_current_object()[key] async def", "# noqa: E731 __coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)", "x, o: x._get_current_object() | o # noqa: E731 __div__ =", "x __setattr__ = lambda x, n, v: setattr( # noqa:", "noqa: E731 __exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)", "asyncio.get_event_loop() if loop.is_running(): task = asyncio.current_task() task_id = id(task) return", "noqa: E731 __contains__ = lambda x, i: i in x._get_current_object()", "__iter__ = lambda x: iter(x._get_current_object()) # noqa: E731 __contains__ =", "int(x._get_current_object()) # noqa: E731 __float__ = lambda x: float(x._get_current_object()) #", "__rtruediv__ = __rdiv__ __rfloordiv__ = lambda x, o: o //", "x._get_current_object() != o # type: ignore # noqa: E731 __gt__", "lambda x, o: o / x._get_current_object() # noqa: E731 __rtruediv__", "object.__setattr__(self, \"__wrapped__\", local) object.__setattr__(self, \"__name__\", name) def _get_current_object(self) -> Any:", "__str__ = lambda x: str(x._get_current_object()) # type: ignore # noqa:", "Any: try: return dir(self._get_current_object()) except RuntimeError: return [] def __getattr__(self,", "dir(self._get_current_object()) except RuntimeError: return [] def __getattr__(self, name: Any) ->", "# noqa: E731 __ge__ = lambda x, o: x._get_current_object() >=", "= lambda x, o: x._get_current_object() + o # noqa: E731", "x._get_current_object() # noqa: E731 __rtruediv__ = __rdiv__ __rfloordiv__ = lambda", "lambda x, o: x._get_current_object() ^ o # noqa: E731 __or__", "E731 __ge__ = lambda x, o: x._get_current_object() >= o #", "TaskLocal() def push(self, value: Any) -> None: stack = getattr(self._task_local,", "stdlib from typing import Any # noqa # contextvars not", "* x._get_current_object() # noqa: E731 __rdiv__ = lambda x, o:", "[] def __getattr__(self, name: Any) -> Any: if name ==", "__init__(self) -> None: self._task_local = TaskLocal() def push(self, value: Any)", "E731 __lshift__ = lambda x, o: x._get_current_object() << o #", "= lambda x, memo: copy.deepcopy(x._get_current_object(), memo) # noqa: E731 __await__", "x, memo: copy.deepcopy(x._get_current_object(), memo) # noqa: E731 __await__ = lambda", "= lambda x: float(x._get_current_object()) # noqa: E731 __oct__ = lambda", "# type: ignore ) __delattr__ = lambda x, n: delattr(x._get_current_object(),", "name == \"__members__\": return dir(self._get_current_object()) return getattr(self._get_current_object(), name) def __setitem__(self,", "E731 __add__ = lambda x, o: x._get_current_object() + o #", "x: int(x._get_current_object()) # noqa: E731 __float__ = lambda x: float(x._get_current_object())", "\"_storage\", ContextVar(\"storage\")) def __getattr__(self, name: str) -> Any: values =", "if stack is None: self._task_local.stack = stack = [] stack.append(value)", "= lambda x, n: delattr(x._get_current_object(), n) # type: ignore #", "E731 __ne__ = lambda x, o: x._get_current_object() != o #", "if name == \"__members__\": return dir(self._get_current_object()) return getattr(self._get_current_object(), name) def", "noqa: E731 __invert__ = lambda x: ~(x._get_current_object()) # noqa: E731", "Callable, name: Optional[str] = None) -> None: # Note as", "E731 __rtruediv__ = __rdiv__ __rfloordiv__ = lambda x, o: o", "__setattr__ object.__setattr__(self, \"_storage\", ContextVar(\"storage\")) def __getattr__(self, name: str) -> Any:", "getattr(self._task_local, \"stack\", None) if stack is None: self._task_local.stack = stack", "Any, value: Any) -> Any: self._get_current_object()[key] = value def __delitem__(self,", "x._get_current_object() > o # noqa: E731 __ge__ = lambda x,", "return 0 class LocalStack: def __init__(self) -> None: self._task_local =", "o: x._get_current_object() <= o # noqa: E731 __eq__ = lambda", "% o # noqa: E731 __divmod__ = lambda x, o:", "__rmod__ = lambda x, o: o % x._get_current_object() # noqa:", "lambda x, i: i in x._get_current_object() # noqa: E731 __add__", "E731 __and__ = lambda x, o: x._get_current_object() & o #", "stack == []: return None else: return stack.pop() @property def", "noqa: E731 __rsub__ = lambda x, o: o - x._get_current_object()", "self._storage.set(values) except KeyError: raise AttributeError(name) @staticmethod def _task_identity() -> int:", "pop(self) -> Any: stack = getattr(self._task_local, \"stack\", None) if stack", "__eq__ = lambda x, o: x._get_current_object() == o # type:", "= __rdiv__ __rfloordiv__ = lambda x, o: o // x._get_current_object()", "return task_id else: return 0 class LocalStack: def __init__(self) ->", "= getattr(self._task_local, \"stack\", None) if stack is None: self._task_local.stack =", "# type: ignore # noqa: E731 __str__ = lambda x:", "async def __aiter__(self) -> Any: async for x in self._get_current_object():", "= asyncio.current_task() task_id = id(task) return task_id else: return 0", "o: x._get_current_object() ^ o # noqa: E731 __or__ = lambda", "Any) -> None: stack = getattr(self._task_local, \"stack\", None) if stack", "x, n, v: setattr( # noqa: E731, E501 x._get_current_object(), n,", "lambda x, o: x._get_current_object().__rdivmod__(o) # noqa: E731 __copy__ = lambda", "x, o: x._get_current_object().__coerce__(x, o) # noqa: E731 __enter__ = lambda", "lambda x, o: x._get_current_object() > o # noqa: E731 __ge__", "def __setattr__(self, name: str, value: Any) -> None: values =", "setattr( # noqa: E731, E501 x._get_current_object(), n, v # type:", "# noqa: E731 __gt__ = lambda x, o: x._get_current_object() >", "self._get_current_object() except RuntimeError: return \"<%s unbound>\" % self.__class__.__name__ return repr(obj)", "& o # noqa: E731 __xor__ = lambda x, o:", "AttributeError(\"__dict__\") def __repr__(self) -> str: try: obj = self._get_current_object() except", "except RuntimeError: return \"<%s unbound>\" % self.__class__.__name__ return repr(obj) def", "# noqa: E731 __await__ = lambda x: x._get_current_object().__await__() # noqa:", "__invert__ = lambda x: ~(x._get_current_object()) # noqa: E731 __complex__ =", "None: # Note as __setattr__ is overidden below, use the", "# noqa: E731 __eq__ = lambda x, o: x._get_current_object() ==", "x, o: x._get_current_object().__truediv__(o) # noqa: E731 __neg__ = lambda x:", "__setattr__ is overidden below, use the object __setattr__ object.__setattr__(self, \"__LocalProxy_local\",", "value def __delitem__(self, key: Any) -> Any: del self._get_current_object()[key] async", "top(self) -> Any: try: return self._task_local.stack[-1] except (AttributeError, IndexError): return", "try: return self._get_current_object().__dict__ except RuntimeError: raise AttributeError(\"__dict__\") def __repr__(self) ->", "x._get_current_object()(*a, **kw) # noqa: E731 __len__ = lambda x: len(x._get_current_object())", "__getattr__(self, name: Any) -> Any: if name == \"__members__\": return", "E731 __rshift__ = lambda x, o: x._get_current_object() >> o #", "Any: values = self._storage.get({}) try: return values[name] except KeyError: raise", "object local to the current task.\"\"\" __slots__ = (\"_storage\",) def", "= lambda x: complex(x._get_current_object()) # noqa: E731 __int__ = lambda", "= lambda x, o: x._get_current_object().__truediv__(o) # noqa: E731 __neg__ =", "= (\"__dict__\", \"__local\", \"__wrapped__\") def __init__(self, local: Callable, name: Optional[str]", "x._get_current_object() # noqa: E731 __rmul__ = lambda x, o: o", "E731 __int__ = lambda x: int(x._get_current_object()) # noqa: E731 __float__", "noqa: E731 __abs__ = lambda x: abs(x._get_current_object()) # noqa: E731", "noqa: E731 __call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)", "lambda x, o: x._get_current_object().__truediv__(o) # noqa: E731 __neg__ = lambda", "o # noqa: E731 __mul__ = lambda x, o: x._get_current_object()", "E731 __neg__ = lambda x: -(x._get_current_object()) # noqa: E731 __pos__", "task = asyncio.current_task() task_id = id(task) return task_id else: return", "= lambda x, o: x._get_current_object().__divmod__(o) # noqa: E731 __pow__ =", "stack is None: self._task_local.stack = stack = [] stack.append(value) def", "return self._get_current_object().__dict__ except RuntimeError: raise AttributeError(\"__dict__\") def __repr__(self) -> str:", "lambda x, o: x._get_current_object() >> o # noqa: E731 __and__", "o: x._get_current_object() ** o # noqa: E731 __lshift__ = lambda", "lambda x, o: x._get_current_object() // o # noqa: E731 __mod__", "def _get_current_object(self) -> Any: return object.__getattribute__(self, \"__LocalProxy_local\")() @property def __dict__(self)", "None: self._task_local = TaskLocal() def push(self, value: Any) -> None:", "~(x._get_current_object()) # noqa: E731 __complex__ = lambda x: complex(x._get_current_object()) #", "__slots__ = (\"__dict__\", \"__local\", \"__wrapped__\") def __init__(self, local: Callable, name:", "i in x._get_current_object() # noqa: E731 __add__ = lambda x,", "= lambda x: str(x._get_current_object()) # type: ignore # noqa: E731", "Any: self._get_current_object()[key] = value def __delitem__(self, key: Any) -> Any:", "Any: if name == \"__members__\": return dir(self._get_current_object()) return getattr(self._get_current_object(), name)", "__delitem__(self, key: Any) -> Any: del self._get_current_object()[key] async def __aiter__(self)", "x: +(x._get_current_object()) # noqa: E731 __abs__ = lambda x: abs(x._get_current_object())", "**kw: x._get_current_object().__exit__(*a, **kw) # noqa: E731 __radd__ = lambda x,", "noqa: E731 __enter__ = lambda x: x._get_current_object().__enter__() # noqa: E731", "= self._storage.get({}) try: return values[name] except KeyError: raise AttributeError(name) def", "E731 __mod__ = lambda x, o: x._get_current_object() % o #", "# noqa: E731 __pos__ = lambda x: +(x._get_current_object()) # noqa:", "E731 __rmod__ = lambda x, o: o % x._get_current_object() #", "__rfloordiv__ = lambda x, o: o // x._get_current_object() # noqa:", "E731 __le__ = lambda x, o: x._get_current_object() <= o #", "-> None: stack = getattr(self._task_local, \"stack\", None) if stack is", "stack is None or stack == []: return None else:", "= lambda x, o: x._get_current_object() != o # type: ignore", "x._get_current_object() # noqa: E731 __rmod__ = lambda x, o: o", "noqa: E731 __and__ = lambda x, o: x._get_current_object() & o", "E731 __rdiv__ = lambda x, o: o / x._get_current_object() #", "type: ignore # noqa: E731 __ne__ = lambda x, o:", "def __getattr__(self, name: str) -> Any: values = self._storage.get({}) try:", "-> None: values = self._storage.get({}) values[name] = value self._storage.set(values) def", "[] stack.append(value) def pop(self) -> Any: stack = getattr(self._task_local, \"stack\",", "E731 __call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw) #", "stdlib from typing import Callable, Dict, Optional class TaskLocal: \"\"\"An", "x._get_current_object() < o # noqa: E731 __le__ = lambda x,", "__radd__ = lambda x, o: o + x._get_current_object() # noqa:", "__repr__(self) -> str: try: obj = self._get_current_object() except RuntimeError: return", "unbound>\" % self.__class__.__name__ return repr(obj) def __bool__(self) -> bool: try:", "__neg__ = lambda x: -(x._get_current_object()) # noqa: E731 __pos__ =", "o: x._get_current_object() // o # noqa: E731 __mod__ = lambda", "x, o: x._get_current_object() // o # noqa: E731 __mod__ =", "# type: ignore # noqa: E731 __call__ = lambda x,", "# noqa: E731 __getitem__ = lambda x, i: x._get_current_object()[i] #", "def __getattr__(self, name: Any) -> Any: if name == \"__members__\":", "x._get_current_object() == o # type: ignore # noqa: E731 __ne__", "is overidden below, use the object __setattr__ object.__setattr__(self, \"__LocalProxy_local\", local)", "o # noqa: E731 __ge__ = lambda x, o: x._get_current_object()", "o: x._get_current_object() >> o # noqa: E731 __and__ = lambda", "lambda x: float(x._get_current_object()) # noqa: E731 __oct__ = lambda x:", "# noqa: E731 __int__ = lambda x: int(x._get_current_object()) # noqa:", "-> Dict[str, Any]: # type: ignore try: return self._get_current_object().__dict__ except", "__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o) # noqa: E731", "x, o: o // x._get_current_object() # noqa: E731 __rmod__ =", "-> Any: del self._get_current_object()[key] async def __aiter__(self) -> Any: async", "below, use the object __setattr__ object.__setattr__(self, \"__LocalProxy_local\", local) object.__setattr__(self, \"__wrapped__\",", "-> str: try: obj = self._get_current_object() except RuntimeError: return \"<%s", "_task_identity() -> int: loop = asyncio.get_event_loop() if loop.is_running(): task =", "# noqa: E731 __call__ = lambda x, *a, **kw: x._get_current_object()(*a,", "complex(x._get_current_object()) # noqa: E731 __int__ = lambda x: int(x._get_current_object()) #", "local: Callable, name: Optional[str] = None) -> None: # Note", "None: values = self._storage.get({}) values[name] = value self._storage.set(values) def __delattr__(self,", "except KeyError: raise AttributeError(name) def __setattr__(self, name: str, value: Any)", "lambda x, o: x._get_current_object() - o # noqa: E731 __mul__", "__pow__ = lambda x, o: x._get_current_object() ** o # noqa:", "self._get_current_object()[key] = value def __delitem__(self, key: Any) -> Any: del", "o # noqa: E731 __or__ = lambda x, o: x._get_current_object()", "ContextVar(\"storage\")) def __getattr__(self, name: str) -> Any: values = self._storage.get({})", "__rdiv__ = lambda x, o: o / x._get_current_object() # noqa:", "noqa: E731 __eq__ = lambda x, o: x._get_current_object() == o", "str(x._get_current_object()) # type: ignore # noqa: E731 __lt__ = lambda", "ignore # noqa: E731 __call__ = lambda x, *a, **kw:", "i: x._get_current_object()[i] # noqa: E731 __iter__ = lambda x: iter(x._get_current_object())", "noqa: E731 __deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo) #", "= lambda x, o: x._get_current_object().__rdivmod__(o) # noqa: E731 __copy__ =", "try: return dir(self._get_current_object()) except RuntimeError: return [] def __getattr__(self, name:", "Any: stack = getattr(self._task_local, \"stack\", None) if stack is None", "name: Any) -> Any: if name == \"__members__\": return dir(self._get_current_object())", "noqa: E731 __await__ = lambda x: x._get_current_object().__await__() # noqa: E731", "\"__members__\": return dir(self._get_current_object()) return getattr(self._get_current_object(), name) def __setitem__(self, key: Any,", "| o # noqa: E731 __div__ = lambda x, o:", "None: self._task_local.stack = stack = [] stack.append(value) def pop(self) ->", "x: hash(x._get_current_object()) # type: ignore # noqa: E731 __call__ =", "= lambda x: x._get_current_object().__enter__() # noqa: E731 __exit__ = lambda", "noqa: E731 __pow__ = lambda x, o: x._get_current_object() ** o", "type: ignore # noqa: E731 __gt__ = lambda x, o:", "v # type: ignore ) __delattr__ = lambda x, n:", "is None: self._task_local.stack = stack = [] stack.append(value) def pop(self)", "# noqa: E731 __rdiv__ = lambda x, o: o /", "E731 __pow__ = lambda x, o: x._get_current_object() ** o #", "None or stack == []: return None else: return stack.pop()", "lambda x, o: x._get_current_object() <= o # noqa: E731 __eq__", "# noqa: E731 __copy__ = lambda x: copy.copy(x._get_current_object()) # noqa:", "x: -(x._get_current_object()) # noqa: E731 __pos__ = lambda x: +(x._get_current_object())", "__lt__ = lambda x, o: x._get_current_object() < o # noqa:", "Dict, Optional class TaskLocal: \"\"\"An object local to the current", "loop = asyncio.get_event_loop() if loop.is_running(): task = asyncio.current_task() task_id =", "# noqa: E731 __abs__ = lambda x: abs(x._get_current_object()) # noqa:", "use the object __setattr__ object.__setattr__(self, \"_storage\", ContextVar(\"storage\")) def __getattr__(self, name:", "try: del values[name] self._storage.set(values) except KeyError: raise AttributeError(name) @staticmethod def", "# noqa: E731 __divmod__ = lambda x, o: x._get_current_object().__divmod__(o) #", "return None else: return stack.pop() @property def top(self) -> Any:", "# noqa: E731 __rshift__ = lambda x, o: x._get_current_object() >>", "# noqa: E731 __rmod__ = lambda x, o: o %", "x, *a, **kw: x._get_current_object().__exit__(*a, **kw) # noqa: E731 __radd__ =", "AttributeError(name) def __setattr__(self, name: str, value: Any) -> None: values", "values = self._storage.get({}) try: del values[name] self._storage.set(values) except KeyError: raise", "delattr(x._get_current_object(), n) # type: ignore # noqa: E731 __str__ =", "o # type: ignore # noqa: E731 __gt__ = lambda", "except KeyError: raise AttributeError(name) @staticmethod def _task_identity() -> int: loop", "__add__ = lambda x, o: x._get_current_object() + o # noqa:", "task_id else: return 0 class LocalStack: def __init__(self) -> None:", "# noqa: E731 __add__ = lambda x, o: x._get_current_object() +", "v: setattr( # noqa: E731, E501 x._get_current_object(), n, v #", "# noqa: E731 __ne__ = lambda x, o: x._get_current_object() !=", "** o # noqa: E731 __lshift__ = lambda x, o:", "= value self._storage.set(values) def __delattr__(self, name: str) -> None: values", "obj = self._get_current_object() except RuntimeError: return \"<%s unbound>\" % self.__class__.__name__", "x, o: x._get_current_object().__div__(o) # noqa: E731 __truediv__ = lambda x,", "E731 __rsub__ = lambda x, o: o - x._get_current_object() #", "= lambda x: x._get_current_object().__index__() # noqa: E731 __coerce__ = lambda", "[]: return None else: return stack.pop() @property def top(self) ->", "local) object.__setattr__(self, \"__name__\", name) def _get_current_object(self) -> Any: return object.__getattribute__(self,", "# noqa: E731 __div__ = lambda x, o: x._get_current_object().__div__(o) #", "= lambda x, o: x._get_current_object() == o # type: ignore", "__or__ = lambda x, o: x._get_current_object() | o # noqa:", "-> Any: self._get_current_object()[key] = value def __delitem__(self, key: Any) ->", "o # noqa: E731 __sub__ = lambda x, o: x._get_current_object()", "lambda x: copy.copy(x._get_current_object()) # noqa: E731 __deepcopy__ = lambda x,", "E731 __enter__ = lambda x: x._get_current_object().__enter__() # noqa: E731 __exit__", "x._get_current_object(), n, v # type: ignore ) __delattr__ = lambda", "o: x._get_current_object().__divmod__(o) # noqa: E731 __pow__ = lambda x, o:", "contextvars not understood as stdlib from typing import Any #", "self._storage.get({}) values[name] = value self._storage.set(values) def __delattr__(self, name: str) ->", "__xor__ = lambda x, o: x._get_current_object() ^ o # noqa:", "a task local object.\"\"\" __slots__ = (\"__dict__\", \"__local\", \"__wrapped__\") def", "noqa: E731 __neg__ = lambda x: -(x._get_current_object()) # noqa: E731", "x: complex(x._get_current_object()) # noqa: E731 __int__ = lambda x: int(x._get_current_object())", "__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw) # noqa:", "__future__ import annotations import asyncio import copy from contextvars import", "= lambda x, o: o / x._get_current_object() # noqa: E731", "# noqa: E731 __rmul__ = lambda x, o: o *", "try: obj = self._get_current_object() except RuntimeError: return \"<%s unbound>\" %", "RuntimeError: return [] def __getattr__(self, name: Any) -> Any: if", "__oct__ = lambda x: oct(x._get_current_object()) # noqa: E731 __hex__ =", "object.__setattr__(self, \"__LocalProxy_local\", local) object.__setattr__(self, \"__wrapped__\", local) object.__setattr__(self, \"__name__\", name) def", "o: x._get_current_object().__div__(o) # noqa: E731 __truediv__ = lambda x, o:", "o: o + x._get_current_object() # noqa: E731 __rsub__ = lambda", "<= o # noqa: E731 __eq__ = lambda x, o:", "noqa: E731 __rmul__ = lambda x, o: o * x._get_current_object()", "lambda x, n: delattr(x._get_current_object(), n) # type: ignore # noqa:", "name) def __setitem__(self, key: Any, value: Any) -> Any: self._get_current_object()[key]", "E731 __getitem__ = lambda x, i: x._get_current_object()[i] # noqa: E731", "__and__ = lambda x, o: x._get_current_object() & o # noqa:", "name: str) -> None: values = self._storage.get({}) try: del values[name]", "noqa: E731 __floordiv__ = lambda x, o: x._get_current_object() // o", "E731 __deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo) # noqa:", "E731 __str__ = lambda x: str(x._get_current_object()) # type: ignore #", "return values[name] except KeyError: raise AttributeError(name) def __setattr__(self, name: str,", "E731 __rmul__ = lambda x, o: o * x._get_current_object() #", "import ContextVar # noqa # contextvars not understood as stdlib", "# Note as __setattr__ is overidden below, use the object", "# noqa: E731 __iter__ = lambda x: iter(x._get_current_object()) # noqa:", "E731 __divmod__ = lambda x, o: x._get_current_object().__divmod__(o) # noqa: E731", "x: ~(x._get_current_object()) # noqa: E731 __complex__ = lambda x: complex(x._get_current_object())", "yield x __setattr__ = lambda x, n, v: setattr( #", "E731 __contains__ = lambda x, i: i in x._get_current_object() #", "try: return self._task_local.stack[-1] except (AttributeError, IndexError): return None class LocalProxy:", "self._task_local = TaskLocal() def push(self, value: Any) -> None: stack", "o: x._get_current_object().__truediv__(o) # noqa: E731 __neg__ = lambda x: -(x._get_current_object())", "(\"_storage\",) def __init__(self) -> None: # Note as __setattr__ is", "# noqa: E731 __index__ = lambda x: x._get_current_object().__index__() # noqa:", "__gt__ = lambda x, o: x._get_current_object() > o # noqa:", "lambda x, o: x._get_current_object().__div__(o) # noqa: E731 __truediv__ = lambda", "!= o # type: ignore # noqa: E731 __gt__ =", "# noqa: E731 __oct__ = lambda x: oct(x._get_current_object()) # noqa:", "# noqa: E731 __le__ = lambda x, o: x._get_current_object() <=", "o: x._get_current_object() == o # type: ignore # noqa: E731", "lambda x, *a, **kw: x._get_current_object()(*a, **kw) # noqa: E731 __len__", "__getattr__(self, name: str) -> Any: values = self._storage.get({}) try: return", "__dict__(self) -> Dict[str, Any]: # type: ignore try: return self._get_current_object().__dict__", "o: x._get_current_object() < o # noqa: E731 __le__ = lambda", "lambda x, o: x._get_current_object() >= o # noqa: E731 __hash__", "x._get_current_object() & o # noqa: E731 __xor__ = lambda x,", "asyncio import copy from contextvars import ContextVar # noqa #", "= lambda x: hash(x._get_current_object()) # type: ignore # noqa: E731", "__index__ = lambda x: x._get_current_object().__index__() # noqa: E731 __coerce__ =", "x, o: x._get_current_object() ** o # noqa: E731 __lshift__ =", "__setitem__(self, key: Any, value: Any) -> Any: self._get_current_object()[key] = value", "E731 __invert__ = lambda x: ~(x._get_current_object()) # noqa: E731 __complex__", "<reponame>Dunkledore/quart<gh_stars>1-10 from __future__ import annotations import asyncio import copy from", "x: x._get_current_object().__index__() # noqa: E731 __coerce__ = lambda x, o:", "copy.copy(x._get_current_object()) # noqa: E731 __deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(),", "= value def __delitem__(self, key: Any) -> Any: del self._get_current_object()[key]", "o # noqa: E731 __div__ = lambda x, o: x._get_current_object().__div__(o)", "local) object.__setattr__(self, \"__wrapped__\", local) object.__setattr__(self, \"__name__\", name) def _get_current_object(self) ->", "__abs__ = lambda x: abs(x._get_current_object()) # noqa: E731 __invert__ =", "E731 __exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) #", "raise AttributeError(\"__dict__\") def __repr__(self) -> str: try: obj = self._get_current_object()", "ignore try: return self._get_current_object().__dict__ except RuntimeError: raise AttributeError(\"__dict__\") def __repr__(self)", "lambda x: x._get_current_object().__enter__() # noqa: E731 __exit__ = lambda x,", "# noqa: E731 __floordiv__ = lambda x, o: x._get_current_object() //", "LocalStack: def __init__(self) -> None: self._task_local = TaskLocal() def push(self,", "^ o # noqa: E731 __or__ = lambda x, o:", "lambda x, o: o % x._get_current_object() # noqa: E731 __rdivmod__", "__rdiv__ __rfloordiv__ = lambda x, o: o // x._get_current_object() #", "__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o) # noqa: E731 __copy__", "copy from contextvars import ContextVar # noqa # contextvars not", "x, o: x._get_current_object() <= o # noqa: E731 __eq__ =", "return stack.pop() @property def top(self) -> Any: try: return self._task_local.stack[-1]", "x, o: o - x._get_current_object() # noqa: E731 __rmul__ =", "__mul__ = lambda x, o: x._get_current_object() * o # noqa:", "o: x._get_current_object() - o # noqa: E731 __mul__ = lambda", "__pos__ = lambda x: +(x._get_current_object()) # noqa: E731 __abs__ =", "noqa: E731 __ge__ = lambda x, o: x._get_current_object() >= o", "__setattr__ is overidden below, use the object __setattr__ object.__setattr__(self, \"_storage\",", "-> bool: try: return bool(self._get_current_object()) except RuntimeError: return False def", "x._get_current_object() >= o # noqa: E731 __hash__ = lambda x:", "return getattr(self._get_current_object(), name) def __setitem__(self, key: Any, value: Any) ->", "__ne__ = lambda x, o: x._get_current_object() != o # type:", "task local object.\"\"\" __slots__ = (\"__dict__\", \"__local\", \"__wrapped__\") def __init__(self,", "= id(task) return task_id else: return 0 class LocalStack: def", "o / x._get_current_object() # noqa: E731 __rtruediv__ = __rdiv__ __rfloordiv__", "o: x._get_current_object() & o # noqa: E731 __xor__ = lambda", "x._get_current_object().__rdivmod__(o) # noqa: E731 __copy__ = lambda x: copy.copy(x._get_current_object()) #", "as __setattr__ is overidden below, use the object __setattr__ object.__setattr__(self,", "x._get_current_object()[i] # noqa: E731 __iter__ = lambda x: iter(x._get_current_object()) #", "noqa: E731 __gt__ = lambda x, o: x._get_current_object() > o", "lambda x: complex(x._get_current_object()) # noqa: E731 __int__ = lambda x:", "Any: async for x in self._get_current_object(): yield x __setattr__ =", "E731 __hash__ = lambda x: hash(x._get_current_object()) # type: ignore #", "current task.\"\"\" __slots__ = (\"_storage\",) def __init__(self) -> None: #", "E731 __float__ = lambda x: float(x._get_current_object()) # noqa: E731 __oct__", "stack = [] stack.append(value) def pop(self) -> Any: stack =", "= lambda x, o: x._get_current_object().__div__(o) # noqa: E731 __truediv__ =", "= lambda x: +(x._get_current_object()) # noqa: E731 __abs__ = lambda", "raise AttributeError(name) def __setattr__(self, name: str, value: Any) -> None:", "noqa: E731 __iter__ = lambda x: iter(x._get_current_object()) # noqa: E731", "= lambda x, i: i in x._get_current_object() # noqa: E731", "noqa: E731 __le__ = lambda x, o: x._get_current_object() <= o", "lambda x, o: x._get_current_object() ** o # noqa: E731 __lshift__", "o: o * x._get_current_object() # noqa: E731 __rdiv__ = lambda", "Any) -> Any: self._get_current_object()[key] = value def __delitem__(self, key: Any)", "o: x._get_current_object().__coerce__(x, o) # noqa: E731 __enter__ = lambda x:", "n, v: setattr( # noqa: E731, E501 x._get_current_object(), n, v", "= lambda x, i: x._get_current_object()[i] # noqa: E731 __iter__ =", "noqa: E731 __getitem__ = lambda x, i: x._get_current_object()[i] # noqa:", "def _task_identity() -> int: loop = asyncio.get_event_loop() if loop.is_running(): task", "typing import Any # noqa # contextvars not understood as", "name: Optional[str] = None) -> None: # Note as __setattr__", "noqa: E731 __rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o) # noqa:", "hash(x._get_current_object()) # type: ignore # noqa: E731 __call__ = lambda", "Any) -> None: values = self._storage.get({}) values[name] = value self._storage.set(values)", "x._get_current_object().__coerce__(x, o) # noqa: E731 __enter__ = lambda x: x._get_current_object().__enter__()", "noqa: E731 __divmod__ = lambda x, o: x._get_current_object().__divmod__(o) # noqa:", "Optional[str] = None) -> None: # Note as __setattr__ is", "# noqa: E731 __invert__ = lambda x: ~(x._get_current_object()) # noqa:", "ignore # noqa: E731 __gt__ = lambda x, o: x._get_current_object()", "except RuntimeError: return False def __dir__(self) -> Any: try: return", "understood as stdlib from typing import Callable, Dict, Optional class", "# noqa: E731 __hex__ = lambda x: hex(x._get_current_object()) # noqa:", "= lambda x, n, v: setattr( # noqa: E731, E501", "# noqa: E731 __rtruediv__ = __rdiv__ __rfloordiv__ = lambda x,", "# noqa: E731 __enter__ = lambda x: x._get_current_object().__enter__() # noqa:", "__deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo) # noqa: E731", "\"__LocalProxy_local\")() @property def __dict__(self) -> Dict[str, Any]: # type: ignore", "E731 __or__ = lambda x, o: x._get_current_object() | o #", "x, i: x._get_current_object()[i] # noqa: E731 __iter__ = lambda x:", "int: loop = asyncio.get_event_loop() if loop.is_running(): task = asyncio.current_task() task_id", "E731 __mul__ = lambda x, o: x._get_current_object() * o #", "= [] stack.append(value) def pop(self) -> Any: stack = getattr(self._task_local,", "# type: ignore # noqa: E731 __ne__ = lambda x,", "**kw: x._get_current_object()(*a, **kw) # noqa: E731 __len__ = lambda x:", "o # noqa: E731 __and__ = lambda x, o: x._get_current_object()", "# noqa: E731 __lt__ = lambda x, o: x._get_current_object() <", "noqa: E731 __ne__ = lambda x, o: x._get_current_object() != o", "o # noqa: E731 __mod__ = lambda x, o: x._get_current_object()", "-(x._get_current_object()) # noqa: E731 __pos__ = lambda x: +(x._get_current_object()) #", "lambda x: hex(x._get_current_object()) # noqa: E731 __index__ = lambda x:", "x._get_current_object().__truediv__(o) # noqa: E731 __neg__ = lambda x: -(x._get_current_object()) #", "None: values = self._storage.get({}) try: del values[name] self._storage.set(values) except KeyError:", "not understood as stdlib from typing import Any # noqa", "o: o % x._get_current_object() # noqa: E731 __rdivmod__ = lambda", "@property def __dict__(self) -> Dict[str, Any]: # type: ignore try:", "noqa: E731 __index__ = lambda x: x._get_current_object().__index__() # noqa: E731", "o: x._get_current_object() > o # noqa: E731 __ge__ = lambda", "KeyError: raise AttributeError(name) @staticmethod def _task_identity() -> int: loop =", "= lambda x: hex(x._get_current_object()) # noqa: E731 __index__ = lambda", "# noqa: E731 __radd__ = lambda x, o: o +", "None else: return stack.pop() @property def top(self) -> Any: try:", "def __delattr__(self, name: str) -> None: values = self._storage.get({}) try:", "o: o / x._get_current_object() # noqa: E731 __rtruediv__ = __rdiv__", "use the object __setattr__ object.__setattr__(self, \"__LocalProxy_local\", local) object.__setattr__(self, \"__wrapped__\", local)", "lambda x, o: x._get_current_object() == o # type: ignore #", "E731 __gt__ = lambda x, o: x._get_current_object() > o #", "-> Any: stack = getattr(self._task_local, \"stack\", None) if stack is", "**kw) # noqa: E731 __len__ = lambda x: len(x._get_current_object()) #", "noqa: E731 __add__ = lambda x, o: x._get_current_object() + o", "__rsub__ = lambda x, o: o - x._get_current_object() # noqa:", "o: o // x._get_current_object() # noqa: E731 __rmod__ = lambda", "stack.pop() @property def top(self) -> Any: try: return self._task_local.stack[-1] except", "__copy__ = lambda x: copy.copy(x._get_current_object()) # noqa: E731 __deepcopy__ =", "__init__(self) -> None: # Note as __setattr__ is overidden below,", "from typing import Callable, Dict, Optional class TaskLocal: \"\"\"An object", "-> Any: try: return dir(self._get_current_object()) except RuntimeError: return [] def", "x, o: o / x._get_current_object() # noqa: E731 __rtruediv__ =", "o: x._get_current_object() % o # noqa: E731 __divmod__ = lambda", "in x._get_current_object() # noqa: E731 __add__ = lambda x, o:", "is overidden below, use the object __setattr__ object.__setattr__(self, \"_storage\", ContextVar(\"storage\"))", "Any: return object.__getattribute__(self, \"__LocalProxy_local\")() @property def __dict__(self) -> Dict[str, Any]:", "= lambda x: int(x._get_current_object()) # noqa: E731 __float__ = lambda", "x, o: x._get_current_object() << o # noqa: E731 __rshift__ =", "noqa: E731 __div__ = lambda x, o: x._get_current_object().__div__(o) # noqa:", "**kw) # noqa: E731 __radd__ = lambda x, o: o", "+ o # noqa: E731 __sub__ = lambda x, o:", "= lambda x: iter(x._get_current_object()) # noqa: E731 __contains__ = lambda", "push(self, value: Any) -> None: stack = getattr(self._task_local, \"stack\", None)", "ignore # noqa: E731 __ne__ = lambda x, o: x._get_current_object()", "\"__wrapped__\", local) object.__setattr__(self, \"__name__\", name) def _get_current_object(self) -> Any: return", "__init__(self, local: Callable, name: Optional[str] = None) -> None: #", "o: x._get_current_object().__rdivmod__(o) # noqa: E731 __copy__ = lambda x: copy.copy(x._get_current_object())", "TaskLocal: \"\"\"An object local to the current task.\"\"\" __slots__ =", "noqa: E731 __coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o) #", "self._task_local.stack = stack = [] stack.append(value) def pop(self) -> Any:", "o: x._get_current_object() + o # noqa: E731 __sub__ = lambda", "= lambda x, o: x._get_current_object().__coerce__(x, o) # noqa: E731 __enter__", "lambda x, n, v: setattr( # noqa: E731, E501 x._get_current_object(),", "asyncio.current_task() task_id = id(task) return task_id else: return 0 class", "noqa: E731 __mod__ = lambda x, o: x._get_current_object() % o", "for x in self._get_current_object(): yield x __setattr__ = lambda x,", "noqa: E731 __truediv__ = lambda x, o: x._get_current_object().__truediv__(o) # noqa:", "x._get_current_object().__div__(o) # noqa: E731 __truediv__ = lambda x, o: x._get_current_object().__truediv__(o)", "overidden below, use the object __setattr__ object.__setattr__(self, \"_storage\", ContextVar(\"storage\")) def", "values = self._storage.get({}) values[name] = value self._storage.set(values) def __delattr__(self, name:", "type: ignore # noqa: E731 __str__ = lambda x: str(x._get_current_object())", "Any) -> Any: if name == \"__members__\": return dir(self._get_current_object()) return", "class LocalProxy: \"\"\"Proxy to a task local object.\"\"\" __slots__ =", "key: Any) -> Any: del self._get_current_object()[key] async def __aiter__(self) ->", "return bool(self._get_current_object()) except RuntimeError: return False def __dir__(self) -> Any:", "or stack == []: return None else: return stack.pop() @property", "-> Any: if name == \"__members__\": return dir(self._get_current_object()) return getattr(self._get_current_object(),", "o // x._get_current_object() # noqa: E731 __rmod__ = lambda x,", "str: try: obj = self._get_current_object() except RuntimeError: return \"<%s unbound>\"", "E731 __sub__ = lambda x, o: x._get_current_object() - o #", "x: x._get_current_object().__enter__() # noqa: E731 __exit__ = lambda x, *a,", "# noqa: E731 __mul__ = lambda x, o: x._get_current_object() *", "= lambda x, o: o % x._get_current_object() # noqa: E731", "x, o: x._get_current_object() >= o # noqa: E731 __hash__ =", "x, o: o + x._get_current_object() # noqa: E731 __rsub__ =", "lambda x: iter(x._get_current_object()) # noqa: E731 __contains__ = lambda x,", "KeyError: raise AttributeError(name) def __setattr__(self, name: str, value: Any) ->", "+(x._get_current_object()) # noqa: E731 __abs__ = lambda x: abs(x._get_current_object()) #", "x, o: x._get_current_object() > o # noqa: E731 __ge__ =", "return \"<%s unbound>\" % self.__class__.__name__ return repr(obj) def __bool__(self) ->", ">= o # noqa: E731 __hash__ = lambda x: hash(x._get_current_object())", "// o # noqa: E731 __mod__ = lambda x, o:", "lambda x: abs(x._get_current_object()) # noqa: E731 __invert__ = lambda x:", "values = self._storage.get({}) try: return values[name] except KeyError: raise AttributeError(name)", "\"stack\", None) if stack is None or stack == []:", "= lambda x, o: x._get_current_object() * o # noqa: E731", "None) -> None: # Note as __setattr__ is overidden below,", "E731 __div__ = lambda x, o: x._get_current_object().__div__(o) # noqa: E731", "lambda x, memo: copy.deepcopy(x._get_current_object(), memo) # noqa: E731 __await__ =", "__floordiv__ = lambda x, o: x._get_current_object() // o # noqa:", "lambda x, o: x._get_current_object().__coerce__(x, o) # noqa: E731 __enter__ =", "(AttributeError, IndexError): return None class LocalProxy: \"\"\"Proxy to a task", "E731 __eq__ = lambda x, o: x._get_current_object() == o #", "value: Any) -> None: values = self._storage.get({}) values[name] = value", "x, o: x._get_current_object() != o # type: ignore # noqa:", "= lambda x, o: x._get_current_object() % o # noqa: E731", "stack.append(value) def pop(self) -> Any: stack = getattr(self._task_local, \"stack\", None)", "x._get_current_object().__index__() # noqa: E731 __coerce__ = lambda x, o: x._get_current_object().__coerce__(x,", "x: abs(x._get_current_object()) # noqa: E731 __invert__ = lambda x: ~(x._get_current_object())", "ContextVar # noqa # contextvars not understood as stdlib from", ") __delattr__ = lambda x, n: delattr(x._get_current_object(), n) # type:", "o: x._get_current_object() != o # type: ignore # noqa: E731", "noqa: E731 __rtruediv__ = __rdiv__ __rfloordiv__ = lambda x, o:", "lambda x, i: x._get_current_object()[i] # noqa: E731 __iter__ = lambda", "x._get_current_object() # noqa: E731 __rsub__ = lambda x, o: o", "Any]: # type: ignore try: return self._get_current_object().__dict__ except RuntimeError: raise", "-> None: # Note as __setattr__ is overidden below, use", "__hash__ = lambda x: hash(x._get_current_object()) # type: ignore # noqa:", "str, value: Any) -> None: values = self._storage.get({}) values[name] =", "x._get_current_object() | o # noqa: E731 __div__ = lambda x,", "class TaskLocal: \"\"\"An object local to the current task.\"\"\" __slots__", "__mod__ = lambda x, o: x._get_current_object() % o # noqa:", "lambda x, o: x._get_current_object() * o # noqa: E731 __floordiv__", "below, use the object __setattr__ object.__setattr__(self, \"_storage\", ContextVar(\"storage\")) def __getattr__(self,", "repr(obj) def __bool__(self) -> bool: try: return bool(self._get_current_object()) except RuntimeError:", "self.__class__.__name__ return repr(obj) def __bool__(self) -> bool: try: return bool(self._get_current_object())", "__div__ = lambda x, o: x._get_current_object().__div__(o) # noqa: E731 __truediv__", "values[name] = value self._storage.set(values) def __delattr__(self, name: str) -> None:", "o: x._get_current_object() << o # noqa: E731 __rshift__ = lambda", "o + x._get_current_object() # noqa: E731 __rsub__ = lambda x,", "object __setattr__ object.__setattr__(self, \"_storage\", ContextVar(\"storage\")) def __getattr__(self, name: str) ->", "-> Any: try: return self._task_local.stack[-1] except (AttributeError, IndexError): return None", "E731 __hex__ = lambda x: hex(x._get_current_object()) # noqa: E731 __index__", "__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) # noqa:", "object __setattr__ object.__setattr__(self, \"__LocalProxy_local\", local) object.__setattr__(self, \"__wrapped__\", local) object.__setattr__(self, \"__name__\",", "lambda x, o: o + x._get_current_object() # noqa: E731 __rsub__", "o # noqa: E731 __divmod__ = lambda x, o: x._get_current_object().__divmod__(o)", "\"__wrapped__\") def __init__(self, local: Callable, name: Optional[str] = None) ->", "# noqa: E731 __str__ = lambda x: str(x._get_current_object()) # type:", "self._task_local.stack[-1] except (AttributeError, IndexError): return None class LocalProxy: \"\"\"Proxy to", "__len__ = lambda x: len(x._get_current_object()) # noqa: E731 __getitem__ =", "type: ignore ) __delattr__ = lambda x, n: delattr(x._get_current_object(), n)", "E731 __rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o) # noqa: E731", "return [] def __getattr__(self, name: Any) -> Any: if name", "# noqa: E731 __complex__ = lambda x: complex(x._get_current_object()) # noqa:", "oct(x._get_current_object()) # noqa: E731 __hex__ = lambda x: hex(x._get_current_object()) #", "memo) # noqa: E731 __await__ = lambda x: x._get_current_object().__await__() #", "def __bool__(self) -> bool: try: return bool(self._get_current_object()) except RuntimeError: return", "Note as __setattr__ is overidden below, use the object __setattr__", "the object __setattr__ object.__setattr__(self, \"__LocalProxy_local\", local) object.__setattr__(self, \"__wrapped__\", local) object.__setattr__(self,", "# noqa: E731 __hash__ = lambda x: hash(x._get_current_object()) # type:", "if loop.is_running(): task = asyncio.current_task() task_id = id(task) return task_id", "# noqa: E731 __or__ = lambda x, o: x._get_current_object() |", "del self._get_current_object()[key] async def __aiter__(self) -> Any: async for x", "__float__ = lambda x: float(x._get_current_object()) # noqa: E731 __oct__ =", "o # noqa: E731 __floordiv__ = lambda x, o: x._get_current_object()", "= TaskLocal() def push(self, value: Any) -> None: stack =", "# type: ignore try: return self._get_current_object().__dict__ except RuntimeError: raise AttributeError(\"__dict__\")", "x._get_current_object().__divmod__(o) # noqa: E731 __pow__ = lambda x, o: x._get_current_object()", "# type: ignore # noqa: E731 __gt__ = lambda x,", "# noqa: E731 __xor__ = lambda x, o: x._get_current_object() ^", "= lambda x: -(x._get_current_object()) # noqa: E731 __pos__ = lambda", "object.__getattribute__(self, \"__LocalProxy_local\")() @property def __dict__(self) -> Dict[str, Any]: # type:", "del values[name] self._storage.set(values) except KeyError: raise AttributeError(name) @staticmethod def _task_identity()", "Any: try: return self._task_local.stack[-1] except (AttributeError, IndexError): return None class", "def __dict__(self) -> Dict[str, Any]: # type: ignore try: return", "return dir(self._get_current_object()) except RuntimeError: return [] def __getattr__(self, name: Any)", "noqa: E731 __or__ = lambda x, o: x._get_current_object() | o", "# noqa: E731 __rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o) #", "x: copy.copy(x._get_current_object()) # noqa: E731 __deepcopy__ = lambda x, memo:", "= lambda x, o: x._get_current_object() <= o # noqa: E731", "x, o: x._get_current_object() + o # noqa: E731 __sub__ =", "as stdlib from typing import Any # noqa # contextvars", "# noqa: E731 __exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a,", "x: iter(x._get_current_object()) # noqa: E731 __contains__ = lambda x, i:", "except RuntimeError: return [] def __getattr__(self, name: Any) -> Any:", "= lambda x, o: x._get_current_object() << o # noqa: E731", "= lambda x, o: o + x._get_current_object() # noqa: E731", "not understood as stdlib from typing import Callable, Dict, Optional", "x, o: x._get_current_object() == o # type: ignore # noqa:", "= None) -> None: # Note as __setattr__ is overidden", "noqa: E731 __hex__ = lambda x: hex(x._get_current_object()) # noqa: E731", "RuntimeError: return \"<%s unbound>\" % self.__class__.__name__ return repr(obj) def __bool__(self)", "def push(self, value: Any) -> None: stack = getattr(self._task_local, \"stack\",", "__dir__(self) -> Any: try: return dir(self._get_current_object()) except RuntimeError: return []", "False def __dir__(self) -> Any: try: return dir(self._get_current_object()) except RuntimeError:", "n: delattr(x._get_current_object(), n) # type: ignore # noqa: E731 __str__", "values[name] except KeyError: raise AttributeError(name) def __setattr__(self, name: str, value:", "__setattr__(self, name: str, value: Any) -> None: values = self._storage.get({})", "x._get_current_object().__exit__(*a, **kw) # noqa: E731 __radd__ = lambda x, o:", "# noqa # contextvars not understood as stdlib from typing", "__delattr__(self, name: str) -> None: values = self._storage.get({}) try: del", "lambda x, o: x._get_current_object() != o # type: ignore #", "to the current task.\"\"\" __slots__ = (\"_storage\",) def __init__(self) ->", "== \"__members__\": return dir(self._get_current_object()) return getattr(self._get_current_object(), name) def __setitem__(self, key:", "task.\"\"\" __slots__ = (\"_storage\",) def __init__(self) -> None: # Note", "noqa: E731 __lshift__ = lambda x, o: x._get_current_object() << o", "E731 __xor__ = lambda x, o: x._get_current_object() ^ o #", "E731 __abs__ = lambda x: abs(x._get_current_object()) # noqa: E731 __invert__", "\"stack\", None) if stack is None: self._task_local.stack = stack =", "-> Any: values = self._storage.get({}) try: return values[name] except KeyError:", "o % x._get_current_object() # noqa: E731 __rdivmod__ = lambda x,", "\"\"\"An object local to the current task.\"\"\" __slots__ = (\"_storage\",)", "from typing import Any # noqa # contextvars not understood", "= lambda x, o: x._get_current_object() - o # noqa: E731", "LocalProxy: \"\"\"Proxy to a task local object.\"\"\" __slots__ = (\"__dict__\",", "lambda x, o: o * x._get_current_object() # noqa: E731 __rdiv__", "= lambda x: len(x._get_current_object()) # noqa: E731 __getitem__ = lambda", "-> None: values = self._storage.get({}) try: del values[name] self._storage.set(values) except", "== []: return None else: return stack.pop() @property def top(self)", "= lambda x, *a, **kw: x._get_current_object()(*a, **kw) # noqa: E731", "= lambda x: copy.copy(x._get_current_object()) # noqa: E731 __deepcopy__ = lambda", "stack = getattr(self._task_local, \"stack\", None) if stack is None: self._task_local.stack", "x, o: x._get_current_object() & o # noqa: E731 __xor__ =", "# noqa: E731 __neg__ = lambda x: -(x._get_current_object()) # noqa:", "value: Any) -> Any: self._get_current_object()[key] = value def __delitem__(self, key:", "E731, E501 x._get_current_object(), n, v # type: ignore ) __delattr__", "raise AttributeError(name) @staticmethod def _task_identity() -> int: loop = asyncio.get_event_loop()", "iter(x._get_current_object()) # noqa: E731 __contains__ = lambda x, i: i", "from contextvars import ContextVar # noqa # contextvars not understood", "n) # type: ignore # noqa: E731 __str__ = lambda", "bool: try: return bool(self._get_current_object()) except RuntimeError: return False def __dir__(self)", "\"\"\"Proxy to a task local object.\"\"\" __slots__ = (\"__dict__\", \"__local\",", "object.\"\"\" __slots__ = (\"__dict__\", \"__local\", \"__wrapped__\") def __init__(self, local: Callable,", "str) -> None: values = self._storage.get({}) try: del values[name] self._storage.set(values)", "lambda x, o: x._get_current_object().__divmod__(o) # noqa: E731 __pow__ = lambda", "noqa: E731 __float__ = lambda x: float(x._get_current_object()) # noqa: E731", "@property def top(self) -> Any: try: return self._task_local.stack[-1] except (AttributeError,", "__setattr__ object.__setattr__(self, \"__LocalProxy_local\", local) object.__setattr__(self, \"__wrapped__\", local) object.__setattr__(self, \"__name__\", name)", "x._get_current_object() # noqa: E731 __add__ = lambda x, o: x._get_current_object()", "__int__ = lambda x: int(x._get_current_object()) # noqa: E731 __float__ =", "lambda x: oct(x._get_current_object()) # noqa: E731 __hex__ = lambda x:", "lambda x: int(x._get_current_object()) # noqa: E731 __float__ = lambda x:", "else: return 0 class LocalStack: def __init__(self) -> None: self._task_local", "# noqa: E731 __rsub__ = lambda x, o: o -", "o * x._get_current_object() # noqa: E731 __rdiv__ = lambda x,", "noqa: E731 __rmod__ = lambda x, o: o % x._get_current_object()", "noqa: E731 __sub__ = lambda x, o: x._get_current_object() - o", "x, o: o % x._get_current_object() # noqa: E731 __rdivmod__ =", "- x._get_current_object() # noqa: E731 __rmul__ = lambda x, o:", "name: str, value: Any) -> None: values = self._storage.get({}) values[name]", "# noqa: E731 __mod__ = lambda x, o: x._get_current_object() %", "% x._get_current_object() # noqa: E731 __rdivmod__ = lambda x, o:", "\"__LocalProxy_local\", local) object.__setattr__(self, \"__wrapped__\", local) object.__setattr__(self, \"__name__\", name) def _get_current_object(self)", "memo: copy.deepcopy(x._get_current_object(), memo) # noqa: E731 __await__ = lambda x:", "noqa: E731 __pos__ = lambda x: +(x._get_current_object()) # noqa: E731", "self._storage.set(values) def __delattr__(self, name: str) -> None: values = self._storage.get({})", "\"__name__\", name) def _get_current_object(self) -> Any: return object.__getattribute__(self, \"__LocalProxy_local\")() @property", "noqa: E731 __xor__ = lambda x, o: x._get_current_object() ^ o", "import annotations import asyncio import copy from contextvars import ContextVar", "None: stack = getattr(self._task_local, \"stack\", None) if stack is None:", "return dir(self._get_current_object()) return getattr(self._get_current_object(), name) def __setitem__(self, key: Any, value:", "i: i in x._get_current_object() # noqa: E731 __add__ = lambda", "o: o - x._get_current_object() # noqa: E731 __rmul__ = lambda", "= (\"_storage\",) def __init__(self) -> None: # Note as __setattr__", "understood as stdlib from typing import Any # noqa #", "noqa: E731 __radd__ = lambda x, o: o + x._get_current_object()", "lambda x: -(x._get_current_object()) # noqa: E731 __pos__ = lambda x:", "= lambda x, o: x._get_current_object() >> o # noqa: E731", "x: len(x._get_current_object()) # noqa: E731 __getitem__ = lambda x, i:", "def top(self) -> Any: try: return self._task_local.stack[-1] except (AttributeError, IndexError):", "x._get_current_object() << o # noqa: E731 __rshift__ = lambda x,", "self._get_current_object(): yield x __setattr__ = lambda x, n, v: setattr(", "E731 __floordiv__ = lambda x, o: x._get_current_object() // o #", "/ x._get_current_object() # noqa: E731 __rtruediv__ = __rdiv__ __rfloordiv__ =", "ignore ) __delattr__ = lambda x, n: delattr(x._get_current_object(), n) #", "__contains__ = lambda x, i: i in x._get_current_object() # noqa:", "self._storage.get({}) try: del values[name] self._storage.set(values) except KeyError: raise AttributeError(name) @staticmethod", "= lambda x: oct(x._get_current_object()) # noqa: E731 __hex__ = lambda", "noqa: E731 __lt__ = lambda x, o: x._get_current_object() < o", "(\"__dict__\", \"__local\", \"__wrapped__\") def __init__(self, local: Callable, name: Optional[str] =", "import Callable, Dict, Optional class TaskLocal: \"\"\"An object local to", "object.__setattr__(self, \"__name__\", name) def _get_current_object(self) -> Any: return object.__getattribute__(self, \"__LocalProxy_local\")()", "x in self._get_current_object(): yield x __setattr__ = lambda x, n,", "the current task.\"\"\" __slots__ = (\"_storage\",) def __init__(self) -> None:", "x._get_current_object() ** o # noqa: E731 __lshift__ = lambda x,", "value: Any) -> None: stack = getattr(self._task_local, \"stack\", None) if", "x, o: x._get_current_object() % o # noqa: E731 __divmod__ =", "__delattr__ = lambda x, n: delattr(x._get_current_object(), n) # type: ignore", "= lambda x, o: o * x._get_current_object() # noqa: E731", "*a, **kw: x._get_current_object().__exit__(*a, **kw) # noqa: E731 __radd__ = lambda", "<< o # noqa: E731 __rshift__ = lambda x, o:", "% self.__class__.__name__ return repr(obj) def __bool__(self) -> bool: try: return", "# noqa: E731, E501 x._get_current_object(), n, v # type: ignore", "None class LocalProxy: \"\"\"Proxy to a task local object.\"\"\" __slots__", "return self._task_local.stack[-1] except (AttributeError, IndexError): return None class LocalProxy: \"\"\"Proxy", "= lambda x: ~(x._get_current_object()) # noqa: E731 __complex__ = lambda", "E731 __coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o) # noqa:", "x, i: i in x._get_current_object() # noqa: E731 __add__ =", "copy.deepcopy(x._get_current_object(), memo) # noqa: E731 __await__ = lambda x: x._get_current_object().__await__()", "if stack is None or stack == []: return None", "lambda x, o: o // x._get_current_object() # noqa: E731 __rmod__", "x: float(x._get_current_object()) # noqa: E731 __oct__ = lambda x: oct(x._get_current_object())", "x, o: x._get_current_object() - o # noqa: E731 __mul__ =", "__complex__ = lambda x: complex(x._get_current_object()) # noqa: E731 __int__ =", "lambda x, o: x._get_current_object() % o # noqa: E731 __divmod__", "None) if stack is None: self._task_local.stack = stack = []", "except RuntimeError: raise AttributeError(\"__dict__\") def __repr__(self) -> str: try: obj", "# contextvars not understood as stdlib from typing import Callable,", "noqa # contextvars not understood as stdlib from typing import", "o - x._get_current_object() # noqa: E731 __rmul__ = lambda x,", "x._get_current_object() * o # noqa: E731 __floordiv__ = lambda x,", "# noqa: E731 __contains__ = lambda x, i: i in", "def __init__(self) -> None: self._task_local = TaskLocal() def push(self, value:", "from __future__ import annotations import asyncio import copy from contextvars", "name) def _get_current_object(self) -> Any: return object.__getattribute__(self, \"__LocalProxy_local\")() @property def", "def __init__(self, local: Callable, name: Optional[str] = None) -> None:", "= lambda x, o: x._get_current_object() < o # noqa: E731", "# noqa: E731 __and__ = lambda x, o: x._get_current_object() &", "Any # noqa # contextvars not understood as stdlib from", "annotations import asyncio import copy from contextvars import ContextVar #", "__bool__(self) -> bool: try: return bool(self._get_current_object()) except RuntimeError: return False", "return object.__getattribute__(self, \"__LocalProxy_local\")() @property def __dict__(self) -> Dict[str, Any]: #", "contextvars import ContextVar # noqa # contextvars not understood as", "AttributeError(name) @staticmethod def _task_identity() -> int: loop = asyncio.get_event_loop() if", "lambda x: x._get_current_object().__index__() # noqa: E731 __coerce__ = lambda x,", "noqa: E731 __len__ = lambda x: len(x._get_current_object()) # noqa: E731", "noqa: E731 __complex__ = lambda x: complex(x._get_current_object()) # noqa: E731", "E501 x._get_current_object(), n, v # type: ignore ) __delattr__ =", "o # noqa: E731 __eq__ = lambda x, o: x._get_current_object()", "hex(x._get_current_object()) # noqa: E731 __index__ = lambda x: x._get_current_object().__index__() #", "lambda x, o: x._get_current_object() & o # noqa: E731 __xor__", "try: return bool(self._get_current_object()) except RuntimeError: return False def __dir__(self) ->", "import asyncio import copy from contextvars import ContextVar # noqa", "return False def __dir__(self) -> Any: try: return dir(self._get_current_object()) except", "noqa: E731 __int__ = lambda x: int(x._get_current_object()) # noqa: E731", "name: str) -> Any: values = self._storage.get({}) try: return values[name]", "x: hex(x._get_current_object()) # noqa: E731 __index__ = lambda x: x._get_current_object().__index__()", "ignore # noqa: E731 __lt__ = lambda x, o: x._get_current_object()", "- o # noqa: E731 __mul__ = lambda x, o:", "= lambda x, o: x._get_current_object() >= o # noqa: E731", "self._get_current_object().__dict__ except RuntimeError: raise AttributeError(\"__dict__\") def __repr__(self) -> str: try:", "@staticmethod def _task_identity() -> int: loop = asyncio.get_event_loop() if loop.is_running():", "class LocalStack: def __init__(self) -> None: self._task_local = TaskLocal() def", "__slots__ = (\"_storage\",) def __init__(self) -> None: # Note as", "typing import Callable, Dict, Optional class TaskLocal: \"\"\"An object local", "x._get_current_object() % o # noqa: E731 __divmod__ = lambda x,", "= lambda x, o: x._get_current_object() | o # noqa: E731", "lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) # noqa: E731 __radd__", "E731 __radd__ = lambda x, o: o + x._get_current_object() #", "self._storage.get({}) try: return values[name] except KeyError: raise AttributeError(name) def __setattr__(self,", "# noqa: E731 __len__ = lambda x: len(x._get_current_object()) # noqa:", "loop.is_running(): task = asyncio.current_task() task_id = id(task) return task_id else:", "x, o: x._get_current_object() * o # noqa: E731 __floordiv__ =", "noqa: E731 __rshift__ = lambda x, o: x._get_current_object() >> o", "__divmod__ = lambda x, o: x._get_current_object().__divmod__(o) # noqa: E731 __pow__", "return repr(obj) def __bool__(self) -> bool: try: return bool(self._get_current_object()) except", "x: oct(x._get_current_object()) # noqa: E731 __hex__ = lambda x: hex(x._get_current_object())", "Any: del self._get_current_object()[key] async def __aiter__(self) -> Any: async for", "__truediv__ = lambda x, o: x._get_current_object().__truediv__(o) # noqa: E731 __neg__", "bool(self._get_current_object()) except RuntimeError: return False def __dir__(self) -> Any: try:", "x: str(x._get_current_object()) # type: ignore # noqa: E731 __lt__ =", "__rmul__ = lambda x, o: o * x._get_current_object() # noqa:", "def __init__(self) -> None: # Note as __setattr__ is overidden", "x, n: delattr(x._get_current_object(), n) # type: ignore # noqa: E731", "== o # type: ignore # noqa: E731 __ne__ =", "x._get_current_object() + o # noqa: E731 __sub__ = lambda x,", "float(x._get_current_object()) # noqa: E731 __oct__ = lambda x: oct(x._get_current_object()) #", "type: ignore # noqa: E731 __call__ = lambda x, *a,", "-> None: self._task_local = TaskLocal() def push(self, value: Any) ->", "E731 __pos__ = lambda x: +(x._get_current_object()) # noqa: E731 __abs__", "= stack = [] stack.append(value) def pop(self) -> Any: stack", "Callable, Dict, Optional class TaskLocal: \"\"\"An object local to the", "< o # noqa: E731 __le__ = lambda x, o:", ">> o # noqa: E731 __and__ = lambda x, o:", "x, o: x._get_current_object() < o # noqa: E731 __le__ =", "__enter__ = lambda x: x._get_current_object().__enter__() # noqa: E731 __exit__ =", "E731 __complex__ = lambda x: complex(x._get_current_object()) # noqa: E731 __int__", "= lambda x, o: o - x._get_current_object() # noqa: E731", "import Any # noqa # contextvars not understood as stdlib", "lambda x, o: x._get_current_object() << o # noqa: E731 __rshift__", "-> Any: async for x in self._get_current_object(): yield x __setattr__", "o # noqa: E731 __xor__ = lambda x, o: x._get_current_object()", "= lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) # noqa: E731", "o # noqa: E731 __hash__ = lambda x: hash(x._get_current_object()) #", "contextvars not understood as stdlib from typing import Callable, Dict,", "return None class LocalProxy: \"\"\"Proxy to a task local object.\"\"\"", "x._get_current_object() # noqa: E731 __rdiv__ = lambda x, o: o", "noqa: E731 __rdiv__ = lambda x, o: o / x._get_current_object()", "o # noqa: E731 __rshift__ = lambda x, o: x._get_current_object()", "noqa: E731 __copy__ = lambda x: copy.copy(x._get_current_object()) # noqa: E731", "RuntimeError: return False def __dir__(self) -> Any: try: return dir(self._get_current_object())", "async for x in self._get_current_object(): yield x __setattr__ = lambda", "task_id = id(task) return task_id else: return 0 class LocalStack:", "to a task local object.\"\"\" __slots__ = (\"__dict__\", \"__local\", \"__wrapped__\")", "def __setitem__(self, key: Any, value: Any) -> Any: self._get_current_object()[key] =", "x, *a, **kw: x._get_current_object()(*a, **kw) # noqa: E731 __len__ =", "the object __setattr__ object.__setattr__(self, \"_storage\", ContextVar(\"storage\")) def __getattr__(self, name: str)", "= lambda x, o: x._get_current_object() ** o # noqa: E731", "x._get_current_object().__enter__() # noqa: E731 __exit__ = lambda x, *a, **kw:", "noqa: E731 __hash__ = lambda x: hash(x._get_current_object()) # type: ignore", "Optional class TaskLocal: \"\"\"An object local to the current task.\"\"\"", "import copy from contextvars import ContextVar # noqa # contextvars", "= self._get_current_object() except RuntimeError: return \"<%s unbound>\" % self.__class__.__name__ return", "E731 __truediv__ = lambda x, o: x._get_current_object().__truediv__(o) # noqa: E731", "= lambda x, o: x._get_current_object() ^ o # noqa: E731", "-> int: loop = asyncio.get_event_loop() if loop.is_running(): task = asyncio.current_task()", "value self._storage.set(values) def __delattr__(self, name: str) -> None: values =", "type: ignore try: return self._get_current_object().__dict__ except RuntimeError: raise AttributeError(\"__dict__\") def", "__ge__ = lambda x, o: x._get_current_object() >= o # noqa:", "= getattr(self._task_local, \"stack\", None) if stack is None or stack", "x._get_current_object() // o # noqa: E731 __mod__ = lambda x,", "// x._get_current_object() # noqa: E731 __rmod__ = lambda x, o:", "x._get_current_object() - o # noqa: E731 __mul__ = lambda x,", "__lshift__ = lambda x, o: x._get_current_object() << o # noqa:", "getattr(self._task_local, \"stack\", None) if stack is None or stack ==", "E731 __copy__ = lambda x: copy.copy(x._get_current_object()) # noqa: E731 __deepcopy__", "IndexError): return None class LocalProxy: \"\"\"Proxy to a task local", "__hex__ = lambda x: hex(x._get_current_object()) # noqa: E731 __index__ =", "o # type: ignore # noqa: E731 __ne__ = lambda", "* o # noqa: E731 __floordiv__ = lambda x, o:", "lambda x: hash(x._get_current_object()) # type: ignore # noqa: E731 __call__", "o) # noqa: E731 __enter__ = lambda x: x._get_current_object().__enter__() #", "__le__ = lambda x, o: x._get_current_object() <= o # noqa:", "= self._storage.get({}) values[name] = value self._storage.set(values) def __delattr__(self, name: str)", "def pop(self) -> Any: stack = getattr(self._task_local, \"stack\", None) if", "_get_current_object(self) -> Any: return object.__getattribute__(self, \"__LocalProxy_local\")() @property def __dict__(self) ->", "# contextvars not understood as stdlib from typing import Any", "# type: ignore # noqa: E731 __lt__ = lambda x,", "__rshift__ = lambda x, o: x._get_current_object() >> o # noqa:", "x, o: x._get_current_object() ^ o # noqa: E731 __or__ =", "def __dir__(self) -> Any: try: return dir(self._get_current_object()) except RuntimeError: return", "except (AttributeError, IndexError): return None class LocalProxy: \"\"\"Proxy to a", "E731 __oct__ = lambda x: oct(x._get_current_object()) # noqa: E731 __hex__", "= lambda x, o: x._get_current_object() & o # noqa: E731", "E731 __index__ = lambda x: x._get_current_object().__index__() # noqa: E731 __coerce__", "x._get_current_object() # noqa: E731 __rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)", "local object.\"\"\" __slots__ = (\"__dict__\", \"__local\", \"__wrapped__\") def __init__(self, local:", "E731 __len__ = lambda x: len(x._get_current_object()) # noqa: E731 __getitem__", "__sub__ = lambda x, o: x._get_current_object() - o # noqa:", "else: return stack.pop() @property def top(self) -> Any: try: return", "\"<%s unbound>\" % self.__class__.__name__ return repr(obj) def __bool__(self) -> bool:", "as stdlib from typing import Callable, Dict, Optional class TaskLocal:", "> o # noqa: E731 __ge__ = lambda x, o:", "lambda x, o: x._get_current_object() + o # noqa: E731 __sub__", "Dict[str, Any]: # type: ignore try: return self._get_current_object().__dict__ except RuntimeError:", "= asyncio.get_event_loop() if loop.is_running(): task = asyncio.current_task() task_id = id(task)", "local to the current task.\"\"\" __slots__ = (\"_storage\",) def __init__(self)", "-> Any: return object.__getattribute__(self, \"__LocalProxy_local\")() @property def __dict__(self) -> Dict[str,", "noqa: E731, E501 x._get_current_object(), n, v # type: ignore )", "def __aiter__(self) -> Any: async for x in self._get_current_object(): yield", "noqa: E731 __mul__ = lambda x, o: x._get_current_object() * o", "0 class LocalStack: def __init__(self) -> None: self._task_local = TaskLocal()", "abs(x._get_current_object()) # noqa: E731 __invert__ = lambda x: ~(x._get_current_object()) #", "id(task) return task_id else: return 0 class LocalStack: def __init__(self)", "# noqa: E731 __truediv__ = lambda x, o: x._get_current_object().__truediv__(o) #", "# noqa: E731 __sub__ = lambda x, o: x._get_current_object() -", "in self._get_current_object(): yield x __setattr__ = lambda x, n, v:", "key: Any, value: Any) -> Any: self._get_current_object()[key] = value def", "= lambda x, o: o // x._get_current_object() # noqa: E731", "lambda x: len(x._get_current_object()) # noqa: E731 __getitem__ = lambda x,", "= lambda x, o: x._get_current_object() > o # noqa: E731", "getattr(self._get_current_object(), name) def __setitem__(self, key: Any, value: Any) -> Any:", "len(x._get_current_object()) # noqa: E731 __getitem__ = lambda x, i: x._get_current_object()[i]", "o: x._get_current_object() | o # noqa: E731 __div__ = lambda", "__getitem__ = lambda x, i: x._get_current_object()[i] # noqa: E731 __iter__", "o # noqa: E731 __lshift__ = lambda x, o: x._get_current_object()", "lambda x: +(x._get_current_object()) # noqa: E731 __abs__ = lambda x:", "lambda x: ~(x._get_current_object()) # noqa: E731 __complex__ = lambda x:", "\"__local\", \"__wrapped__\") def __init__(self, local: Callable, name: Optional[str] = None)", "x, o: o * x._get_current_object() # noqa: E731 __rdiv__ =", "lambda x: str(x._get_current_object()) # type: ignore # noqa: E731 __lt__", "is None or stack == []: return None else: return", "# noqa: E731 __pow__ = lambda x, o: x._get_current_object() **", "def __repr__(self) -> str: try: obj = self._get_current_object() except RuntimeError:", "dir(self._get_current_object()) return getattr(self._get_current_object(), name) def __setitem__(self, key: Any, value: Any)", "o: x._get_current_object() * o # noqa: E731 __floordiv__ = lambda", "x, o: x._get_current_object() >> o # noqa: E731 __and__ =", "*a, **kw: x._get_current_object()(*a, **kw) # noqa: E731 __len__ = lambda", "__setattr__ = lambda x, n, v: setattr( # noqa: E731,", "overidden below, use the object __setattr__ object.__setattr__(self, \"__LocalProxy_local\", local) object.__setattr__(self,", "self._get_current_object()[key] async def __aiter__(self) -> Any: async for x in", "type: ignore # noqa: E731 __lt__ = lambda x, o:", "str) -> Any: values = self._storage.get({}) try: return values[name] except", "__aiter__(self) -> Any: async for x in self._get_current_object(): yield x", "x._get_current_object() <= o # noqa: E731 __eq__ = lambda x,", "E731 __iter__ = lambda x: iter(x._get_current_object()) # noqa: E731 __contains__", "x._get_current_object() ^ o # noqa: E731 __or__ = lambda x,", "# noqa: E731 __float__ = lambda x: float(x._get_current_object()) # noqa:", "noqa: E731 __str__ = lambda x: str(x._get_current_object()) # type: ignore", "= lambda x, o: x._get_current_object() // o # noqa: E731", "lambda x, o: x._get_current_object() | o # noqa: E731 __div__", "n, v # type: ignore ) __delattr__ = lambda x,", "# noqa: E731 __deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo)", "= self._storage.get({}) try: del values[name] self._storage.set(values) except KeyError: raise AttributeError(name)", "stack = getattr(self._task_local, \"stack\", None) if stack is None or", "noqa: E731 __oct__ = lambda x: oct(x._get_current_object()) # noqa: E731", "lambda x, o: x._get_current_object() < o # noqa: E731 __le__", "values[name] self._storage.set(values) except KeyError: raise AttributeError(name) @staticmethod def _task_identity() ->", "None) if stack is None or stack == []: return", "x._get_current_object() >> o # noqa: E731 __and__ = lambda x," ]
[ "self.loader: # send batch to device batch = batch.to(device) #", "present) self.model.eval() # Set the `testing=true` flag otherwise the final", "min_cluster_size: {min_cluster_size}, min_samples: {min_samples}') return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric, cluster_selection_method=cluster_selection_method) else:", "iou_threshold self.noise_label = noise_label self.clustering = clustering assert clustering in", "h5py import hdbscan import numpy as np import torch from", "loader, output_file, config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs): super().__init__(model, loader, output_file,", "self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array) # save results with h5py.File(self.output_file, 'w')", "3: return raw.shape else: return raw.shape[1:] @staticmethod def _get_output_dataset_names(number_of_datasets, prefix='predictions'):", "logger.info(f\"Using channel '{prediction_channel}'...\") pred = np.expand_dims(pred[prediction_channel], axis=0) logger.info(f'Saving predictions for", "channel prediction map prediction_maps_shape = (1,) + volume_shape logger.info(f'The shape", "the config 'predictions{n}' is used as a default dataset name,", "config, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) def predict(self): out_channels", "predict(self): out_channels = self.config['model'].get('out_channels') if out_channels is None: out_channels =", "predictions into a list if there is only one output", "_ in range(output_heads)] return prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks,", "by {self.clustering}: {np.max(clusters)}. Duration: {time.time() - start} sec.') return clusters", "range(output_heads)] return prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file,", "output_file, dataset): # save probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')", "segmented volume. Args: embeddings (ndarray): 4D (CDHW) embeddings tensor Returns:", "over new_labels and merge regions if the IoU exceeds a", "segmentation # visit the patch visited_voxels_array[index] += 1 def _merge_labels(self,", "in the H5 format. The resulting volume is the segmentation", "marked by a number greater than 0 \"\"\" index =", "else: return raw.shape[1:] @staticmethod def _get_output_dataset_names(number_of_datasets, prefix='predictions'): if number_of_datasets ==", "avoid 'double' normalization # when the patches overlap with each", "**kwargs): super().__init__(model, loader, output_file, config, **kwargs) def _allocate_prediction_maps(self, output_shape, output_heads,", "the output prediction maps (CDHW): {prediction_maps_shape}') avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True)", "class EmbeddingsPredictor(_AbstractPredictor): \"\"\" Applies the embedding model on the given", "is the segmentation itself (not the embedding vectors) obtained by", "slice:{index}') # send batch to device batch = batch.to(device) #", "clustering embeddings with HDBSCAN or MeanShift algorithm patch by patch", "output_file, config, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) def predict(self):", "1: predictions = [predictions] # for each output head for", "tuple(index) # get new unassigned label max_label = np.max(output_segmentation) +", "self.clustering_name = clustering self.clustering = self._get_clustering(clustering, kwargs) def predict(self): device", "config (dict): global config dict \"\"\" def __init__(self, model, loader,", "kwargs.get('min_samples', None), metric = kwargs.get('metric', 'euclidean') cluster_selection_method = kwargs.get('cluster_selection_method', 'eom')", "**kwargs): super().__init__(model, loader, output_file, config, **kwargs) self.iou_threshold = iou_threshold self.noise_label", "f'Number of clusters found by {self.clustering}: {np.max(clusters)}. Duration: {time.time() -", "dtype='float32') for _ in range(output_heads)] # initialize normalization mask in", "self._get_output_dataset_names(output_heads, prefix='normalization') # normalize the prediction_maps inside the H5 for", "because of the current simple stitching that we're using for", "# update the output_segmentation output_segmentation[index] = segmentation # visit the", "avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True) logger.info(f'Avoid block artifacts: {avoid_block_artifacts}') # create", "y, x = prediction_map.shape[1:] # take slices which are 1/27", "new unassigned label max_label = np.max(output_segmentation) + 1 # make", "the argument is not present in the config 'predictions{n}' is", "# take slices which are 1/27 of the original volume", "to self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset) # close the output", "def _merge_labels(self, current_segmentation, new_labels, new_segmentation): def _most_frequent_label(labels): unique, counts =", "that has been visited already in order to avoid 'double'", "accumulate probabilities into the output prediction array prediction_map[index] += pred", "prediction_channel is not None: # use only the 'prediction_channel' logger.info(f\"Using", "into the output prediction array prediction_map[u_index] += u_prediction # count", "of the patch inside `output_segmentation` volume output_segmentation (ndarray): current state", "\"\"\" def __init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7, noise_label=-1,", "# save results to self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset) #", "model used for prediction data_loader (torch.utils.data.DataLoader): input data loader output_file", "the patches overlap with each other normalization_mask[index] = 1 logger.info(f'Deleting", "Output predictions will be padded with pad_width: {dataset.pad_width}') prediction_datasets =", "z, y, x = prediction_map.shape[1:] # take slices which are", "in order to avoid 'double' normalization # when the patches", "predictions will be padded with pad_width: {dataset.pad_width}') prediction_datasets = self._get_output_dataset_names(output_heads,", "arrays prediction_maps = [np.zeros(output_shape, dtype='float32') for _ in range(output_heads)] #", "h5py.File(self.output_file, 'w') as output_file: prediction_datasets = self._get_output_dataset_names(output_heads, prefix=f'segmentation/{self.clustering_name}') for output_segmentation,", "index (tuple): position of the patch inside `output_segmentation` volume output_segmentation", "MeanShift from pytorch3dunet.datasets.hdf5 import SliceBuilder from pytorch3dunet.unet3d.utils import get_logger from", "batches...') # dimensionality of the the output predictions volume_shape =", "// 3) for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape): logger.info(f'Normalizing slice:", "self.noise_label # get the overlap mask in the current patch", "for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps, normalization_masks, prediction_datasets, normalization_datasets):", "other normalization_mask[index] = 1 logger.info(f'Deleting {normalization_dataset}...') del output_file[normalization_dataset] class EmbeddingsPredictor(_AbstractPredictor):", "+ volume_shape else: # single channel prediction map prediction_maps_shape =", "maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') for prediction_map, normalization_mask, prediction_dataset in", "dataset names inside the H5 is given by `des_dataset_name` config", "_merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array): \"\"\" Given the `segmentation` patch,", "the output H5 file config (dict): global config dict \"\"\"", "# reshape (C, D, H, W) -> (C, D *", "frequent overlapping label most_frequent_label = _most_frequent_label(current_segmentation[new_label_mask]) # skip 'noise' label", "= (out_channels,) + volume_shape else: # single channel prediction map", "{volume_shape}') logger.info('Allocating segmentation array...') # initialize the output prediction arrays", "output_file, config, **kwargs) def _allocate_prediction_maps(self, output_shape, output_heads, output_file): # allocate", "(tuple): position of the patch inside `output_segmentation` volume output_segmentation (ndarray):", "skip 'noise' label if most_frequent_label == self.noise_label: continue current_label_mask =", "patches self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array) # save results with h5py.File(self.output_file,", "_get_output_dataset_names(number_of_datasets, prefix='predictions'): if number_of_datasets == 1: return [prefix] else: return", "chunks=True, compression='gzip') for dataset_name in prediction_datasets] # allocate datasets for", "`output_file` in the H5 format. The resulting volume is the", "= np.argmax(counts) return unique[ind] result = [] # iterate over", "config, **kwargs): self.model = model self.loader = loader self.output_file =", "support multiple internal datasets raw = dataset.raws[0] if raw.ndim ==", "predicted volume does not fit into RAM. The output dataset", "predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=prediction_map, compression=\"gzip\") class LazyPredictor(StandardPredictor): \"\"\" Applies", "logger.info(f'Avoid block artifacts: {avoid_block_artifacts}') # create destination H5 file h5_output_file", "the current simple stitching that we're using for pred, index", "self.noise_label segmentation += int(max_label) segmentation[noise_mask] = self.noise_label # get the", "np.unique(segmentation[overlap_mask]) merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation) # relabel new segmentation", "found by {self.clustering}: {np.max(clusters)}. Duration: {time.time() - start} sec.') return", "logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}') avoid_block_artifacts", "(CDHW) embeddings tensor Returns: 3D (DHW) segmentation \"\"\" # shape", "for prediction data_loader (torch.utils.data.DataLoader): input data loader output_file (str): path", "output_heads, h5_output_file, self.loader.dataset) # close the output H5 file h5_output_file.close()", "segmentation with the merged labels for current_label, new_label in merged_labels:", "for normalization normalization_mask[u_index] += 1 else: # accumulate probabilities into", "overlap_mask = visited_voxels_array[index] > 0 # get the new labels", "shape of the output prediction maps (CDHW): {prediction_maps_shape}') avoid_block_artifacts =", "simple stitching that we're using for pred, index in zip(prediction,", "IoU exceeds a given threshold for new_label in new_labels: #", "embedding model on the given dataset and saves the result", "current patch overlap_mask = visited_voxels_array[index] > 0 # get the", "allocate datasets for probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') prediction_maps", "label assigned by hdbscan if new_label == self.noise_label: continue new_label_mask", "import hdbscan import numpy as np import torch from sklearn.cluster", "loader, output_file, config, **kwargs) self.iou_threshold = iou_threshold self.noise_label = noise_label", "prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks, prediction_datasets): prediction_map = prediction_map", "prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width] logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=prediction_map,", "# use fast MeanShift with bin seeding return MeanShift(bandwidth=bandwidth, bin_seeding=True)", "import numpy as np import torch from sklearn.cluster import MeanShift", "segmentation output_shape = embeddings.shape[1:] # reshape (C, D, H, W)", "min_samples: {min_samples}') return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric, cluster_selection_method=cluster_selection_method) else: bandwidth =", "the network don't fit in into RAM use `LazyPredictor` instead.", "patches are directly saved into the H5 and they won't", "model, loader, output_file, config, **kwargs): self.model = model self.loader =", "normalization_dataset in zip(prediction_maps, normalization_masks, prediction_datasets, normalization_datasets): # split the volume", "number greater than 0 \"\"\" index = tuple(index) # get", "if avoid_block_artifacts: # unpad in order to avoid block artifacts", "network output\") device = self.config['device'] output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running", "prediction_maps, normalization_masks, output_heads, output_file, dataset): # save probability maps prediction_datasets", "normalization masks normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') normalization_masks = [ output_file.create_dataset(dataset_name,", "greater than 0 \"\"\" index = tuple(index) # get new", "algorithm patch by patch and then stitching the patches together.", "def _get_output_dataset_names(number_of_datasets, prefix='predictions'): if number_of_datasets == 1: return [prefix] else:", "between current segmentation patch and the output_segmentation # but keep", "UNet model used for prediction data_loader (torch.utils.data.DataLoader): input data loader", "new_labels = np.unique(segmentation[overlap_mask]) merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation) # relabel", "segmentation with hdbscan clustering segmentation = self._embeddings_to_segmentation(pred) # stitch patches", "batch.to(device) # forward pass predictions = self.model(batch) # wrap predictions", "= kwargs @staticmethod def _volume_shape(dataset): # TODO: support multiple internal", "probabilities into the output prediction array prediction_map[index] += pred #", "array and the array visited voxels merge the segmented patch", "# stitch patches self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array) # save results", "{np.max(clusters)}. Duration: {time.time() - start} sec.') return clusters def _merge_segmentation(self,", "output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True, compression='gzip') for dataset_name in normalization_datasets] return", "normalization_masks, prediction_datasets, normalization_datasets): # split the volume into 4 parts", "# forward pass embeddings = self.model(batch) # wrap predictions into", "path to the output H5 file config (dict): global config", "the output probability maps u_prediction, u_index = unpad(pred, index, volume_shape)", "out_channels is None: out_channels = self.config['model']['dt_out_channels'] prediction_channel = self.config.get('prediction_channel', None)", "prediction_map, normalization_mask in zip(predictions, prediction_maps, normalization_masks): # convert to numpy", "in zip(prediction, indices): # save patch index: (C,D,H,W) if prediction_channel", "logger.info(f'Deleting {normalization_dataset}...') del output_file[normalization_dataset] class EmbeddingsPredictor(_AbstractPredictor): \"\"\" Applies the embedding", "arrays...') prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape, output_heads, h5_output_file) # Sets the", "to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=output_segmentation, compression=\"gzip\") def _embeddings_to_segmentation(self, embeddings): \"\"\" Cluster", "no clashes between current segmentation patch and the output_segmentation #", "output segmentation volume_shape = self._volume_shape(self.loader.dataset) logger.info(f'The shape of the output", "'hdbscan': min_cluster_size = kwargs.get('min_cluster_size', 50) min_samples = kwargs.get('min_samples', None), metric", "result def _get_clustering(self, clustering_alg, kwargs): logger.info(f'Using {clustering_alg} for clustering') if", "else: # single channel prediction map prediction_maps_shape = (1,) +", "explicitly self.model.eval() self.model.testing = True # Run predictions on the", "average out probabilities of overlapping patches normalization_masks = [np.zeros(output_shape, dtype='uint8')", "logger.info(f'Running prediction on {len(self.loader)} batches...') # dimensionality of the the", "Sets the module in evaluation mode explicitly self.model.eval() self.model.testing =", "count voxel visits for normalization normalization_mask[u_index] += 1 else: #", "i in range(number_of_datasets)] def predict(self): raise NotImplementedError class StandardPredictor(_AbstractPredictor): \"\"\"", "padding unsupported in LazyPredictor. Output predictions will be padded with", "slower than the `StandardPredictor` it should only be used when", "the network if output_heads == 1: embeddings = [embeddings] for", "in LazyPredictor. Output predictions will be padded with pad_width: {dataset.pad_width}')", "result = [] # iterate over new_labels and merge regions", "3, y // 3, x // 3) for index in", "in self.loader: # send batch to device batch = batch.to(device)", "h5_output_file = h5py.File(self.output_file, 'w') # allocate prediction and normalization arrays", "= np.unique(segmentation[overlap_mask]) merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation) # relabel new", "prediction_maps, normalization_masks, output_heads, output_file, dataset): if dataset.mirror_padding: logger.warn( f'Mirror padding", "config self.predictor_config = kwargs @staticmethod def _volume_shape(dataset): # TODO: support", "head from the network. Args: model (Unet3D): trained 3D UNet", "logger = get_logger('UNet3DPredictor') class _AbstractPredictor: def __init__(self, model, loader, output_file,", "embeddings for slice:{index}') # send batch to device batch =", "H * W) and transpose -> (D * H *", "output_segmentation, visited_voxels_array): \"\"\" Given the `segmentation` patch, its `index` in", "# iterate over new_labels and merge regions if the IoU", "model on the given dataset and saves the result in", "allocate prediction and normalization arrays logger.info('Allocating prediction and normalization arrays...')", "memory separately logger.info(f'Normalizing {prediction_dataset}...') z, y, x = prediction_map.shape[1:] #", "prefix='normalization') normalization_masks = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True, compression='gzip') for", "patch, its `index` in the `output_segmentation` array and the array", "# allocate datasets for normalization masks normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization')", "(torch.utils.data.DataLoader): input data loader output_file (str): path to the output", "prediction_channel = self.config.get('prediction_channel', None) if prediction_channel is not None: logger.info(f\"Using", "to segmentation with hdbscan clustering segmentation = self._embeddings_to_segmentation(pred) # stitch", "= self.config.get('prediction_channel', None) if prediction_channel is not None: logger.info(f\"Using only", "datasets raw = dataset.raws[0] if raw.ndim == 3: return raw.shape", "as np import torch from sklearn.cluster import MeanShift from pytorch3dunet.datasets.hdf5", "# skip 'noise' label if most_frequent_label == self.noise_label: continue current_label_mask", "the H5 format. Predictions from the network are kept in", "config, **kwargs) def predict(self): out_channels = self.config['model'].get('out_channels') if out_channels is", "pad_width: {pad_width}. Cropping before saving...') prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width,", "3) for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape): logger.info(f'Normalizing slice: {index}')", "patch inside `output_segmentation` volume output_segmentation (ndarray): current state of the", "h5_output_file, self.loader.dataset) # close the output H5 file h5_output_file.close() def", "output\") device = self.config['device'] output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running prediction", "_volume_shape(dataset): # TODO: support multiple internal datasets raw = dataset.raws[0]", "logger.info(f'Running prediction on {len(self.loader)} patches...') # dimensionality of the the", "# initialize normalization mask in order to average out probabilities", "'euclidean') cluster_selection_method = kwargs.get('cluster_selection_method', 'eom') logger.info(f'HDBSCAN params: min_cluster_size: {min_cluster_size}, min_samples:", "f'Mirror padding unsupported in LazyPredictor. Output predictions will be padded", "self.noise_label = noise_label self.clustering = clustering assert clustering in ['hdbscan',", "on the given dataset and saves the result in the", "clustering_alg == 'hdbscan': min_cluster_size = kwargs.get('min_cluster_size', 50) min_samples = kwargs.get('min_samples',", "in evaluation mode explicitly self.model.eval() self.model.testing = True # Run", "x = prediction_map.shape[1:] # take slices which are 1/27 of", "initialize visited_voxels arrays visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for _ in", "predict(self): raise NotImplementedError class StandardPredictor(_AbstractPredictor): \"\"\" Applies the model on", "layers if present) self.model.eval() # Set the `testing=true` flag otherwise", "prediction data_loader (torch.utils.data.DataLoader): input data loader output_file (str): path to", "{normalization_dataset}...') del output_file[normalization_dataset] class EmbeddingsPredictor(_AbstractPredictor): \"\"\" Applies the embedding model", "def _merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array): \"\"\" Given the `segmentation`", "output H5 file config (dict): global config dict \"\"\" def", "prediction_datasets = self._get_output_dataset_names(output_heads, prefix=f'segmentation/{self.clustering_name}') for output_segmentation, prediction_dataset in zip(output_segmentations, prediction_datasets):", "self.loader = loader self.output_file = output_file self.config = config self.predictor_config", "unpad(pred, index, volume_shape) # accumulate probabilities into the output prediction", "in the `output_file` in the H5 format. Predicted patches are", "size as `output_segmentation`); visited voxels will be marked by a", "most_frequent_label = _most_frequent_label(current_segmentation[new_label_mask]) # skip 'noise' label if most_frequent_label ==", "= prediction.cpu().numpy() # for each batch sample for pred, index", "logger.info(f\"Using only channel '{prediction_channel}' from the network output\") device =", "supported' logger.info(f'IoU threshold: {iou_threshold}') self.clustering_name = clustering self.clustering = self._get_clustering(clustering,", "[np.zeros(output_shape, dtype='float32') for _ in range(output_heads)] # initialize normalization mask", "for _ in range(output_heads)] # Sets the module in evaluation", "import h5py import hdbscan import numpy as np import torch", "if prediction_channel is None: prediction_maps_shape = (out_channels,) + volume_shape else:", "True) logger.info(f'Avoid block artifacts: {avoid_block_artifacts}') # create destination H5 file", "channel_slice = slice(0, 1) index = (channel_slice,) + index if", "H5 format. The resulting volume is the segmentation itself (not", "1 # make sure there are no clashes between current", "from sklearn.cluster import MeanShift from pytorch3dunet.datasets.hdf5 import SliceBuilder from pytorch3dunet.unet3d.utils", "Cluster embeddings vectors with HDBSCAN and return the segmented volume.", "be used when the predicted volume does not fit into", "output_file self.config = config self.predictor_config = kwargs @staticmethod def _volume_shape(dataset):", "> self.iou_threshold: # merge labels result.append((most_frequent_label, new_label)) return result def", "with hdbscan clustering segmentation = self._embeddings_to_segmentation(pred) # stitch patches self._merge_segmentation(segmentation,", "dimensionality of the the output segmentation volume_shape = self._volume_shape(self.loader.dataset) logger.info(f'The", "batch, indices in self.loader: # logger.info(f'Predicting embeddings for slice:{index}') #", "# allocate prediction and normalization arrays logger.info('Allocating prediction and normalization", "only one output head from the network if output_heads ==", "= self.clustering.fit_predict(flattened_embeddings).reshape(output_shape) logger.info( f'Number of clusters found by {self.clustering}: {np.max(clusters)}.", "prediction = prediction.cpu().numpy() # for each batch sample for pred,", "iterate over new_labels and merge regions if the IoU exceeds", "h5py.File(self.output_file, 'w') # allocate prediction and normalization arrays logger.info('Allocating prediction", "index = tuple(index) # get new unassigned label max_label =", "self.predictor_config.get('avoid_block_artifacts', True) logger.info(f'Avoid block artifacts: {avoid_block_artifacts}') # create destination H5", "# allocate datasets for probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')", "in range(output_heads)] # initialize visited_voxels arrays visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8')", "ind = np.argmax(counts) return unique[ind] result = [] # iterate", "numpy as np import torch from sklearn.cluster import MeanShift from", "50) min_samples = kwargs.get('min_samples', None), metric = kwargs.get('metric', 'euclidean') cluster_selection_method", "HDBSCAN and return the segmented volume. Args: embeddings (ndarray): 4D", "out_channels = self.config['model'].get('out_channels') if out_channels is None: out_channels = self.config['model']['dt_out_channels']", "self._embeddings_to_segmentation(pred) # stitch patches self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array) # save", "prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width] logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...')", "of the original volume patch_shape = (z // 3, y", "dataset.mirror_padding: logger.warn( f'Mirror padding unsupported in LazyPredictor. Output predictions will", "(D * H * W, C) flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose()", "(out_channels,) + volume_shape else: # single channel prediction map prediction_maps_shape", "allocate datasets for normalization masks normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') normalization_masks", "del output_file[normalization_dataset] class EmbeddingsPredictor(_AbstractPredictor): \"\"\" Applies the embedding model on", "is None: channel_slice = slice(0, out_channels) else: channel_slice = slice(0,", "continue current_label_mask = current_segmentation == most_frequent_label # compute Jaccard index", "dataset and saves the result in the `output_file` in the", "be applied! self.model.testing = True # Run predictions on the", "logger.info(f'MeanShift params: bandwidth: {bandwidth}, bin_seeding: True') # use fast MeanShift", "visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for _ in range(output_heads)] # Sets", "= [] # iterate over new_labels and merge regions if", "loader, output_file, config, **kwargs): self.model = model self.loader = loader", "zip(prediction, indices): # convert embeddings to segmentation with hdbscan clustering", "output segmentation (DHW): {volume_shape}') logger.info('Allocating segmentation array...') # initialize the", "return clusters def _merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array): \"\"\" Given", "the output prediction arrays output_segmentations = [np.zeros(volume_shape, dtype='int32') for _", "= [embeddings] for prediction, output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations, visited_voxels_arrays):", "# split the volume into 4 parts and load each", "exceeds a given threshold for new_label in new_labels: # skip", "output segmentation visited_voxels_array (ndarray): array of voxels visited so far", "with torch.no_grad(): for batch, indices in self.loader: # send batch", "in zip(output_segmentations, prediction_datasets): logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=output_segmentation, compression=\"gzip\")", "(ndarray): current state of the output segmentation visited_voxels_array (ndarray): array", "used when the predicted volume does not fit into RAM.", "initialize normalization mask in order to average out probabilities of", "H, W) -> (C, D * H * W) and", "normalization_masks, prediction_datasets): prediction_map = prediction_map / normalization_mask if dataset.mirror_padding: pad_width", "merge regions if the IoU exceeds a given threshold for", "dataset): # save probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') for", "super().__init__(model, loader, output_file, config, **kwargs) def _allocate_prediction_maps(self, output_shape, output_heads, output_file):", "inside the H5 is given by `des_dataset_name` config argument. If", "torch.no_grad(): for batch, indices in self.loader: # send batch to", "output_file[normalization_dataset] class EmbeddingsPredictor(_AbstractPredictor): \"\"\" Applies the embedding model on the", "saved into the H5 and they won't be stored in", "segmentation patch and the output_segmentation # but keep the noise", "order to get the segmentation volume start = time.time() clusters", "the H5 format. The resulting volume is the segmentation itself", "loader, output_file, config, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) def", "from the network. Args: model (Unet3D): trained 3D UNet model", "overlap with each other normalization_mask[index] = 1 logger.info(f'Deleting {normalization_dataset}...') del", "internal datasets raw = dataset.raws[0] if raw.ndim == 3: return", "dataset.raws[0] if raw.ndim == 3: return raw.shape else: return raw.shape[1:]", "def __init__(self, model, loader, output_file, config, **kwargs): super().__init__(model, loader, output_file,", "volume start = time.time() clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape) logger.info( f'Number of", "into the output prediction array prediction_map[index] += pred # count", "output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True, compression='gzip') for dataset_name in prediction_datasets] #", "create destination H5 file h5_output_file = h5py.File(self.output_file, 'w') # allocate", "self.noise_label: continue new_label_mask = new_segmentation == new_label # get only", "self.model.testing = True # Run predictions on the entire input", "None: # use only the 'prediction_channel' logger.info(f\"Using channel '{prediction_channel}'...\") pred", "the results from the network don't fit in into RAM", "to avoid block artifacts in the output probability maps u_prediction,", "// 3, x // 3) for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape,", "The resulting volume is the segmentation itself (not the embedding", "clustering self.clustering = self._get_clustering(clustering, kwargs) def predict(self): device = self.config['device']", "then stitching the patches together. \"\"\" def __init__(self, model, loader,", "= (channel_slice,) + index if prediction_channel is not None: #", "= slice(0, 1) index = (channel_slice,) + index if prediction_channel", "output_file, config, **kwargs) self.iou_threshold = iou_threshold self.noise_label = noise_label self.clustering", "save results to self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset) # close", "{len(self.loader)} batches...') # dimensionality of the the output predictions volume_shape", "self.config['device'] output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)} patches...')", "segmentation) # relabel new segmentation with the merged labels for", "# perform clustering and reshape in order to get the", "u_index = unpad(pred, index, volume_shape) # accumulate probabilities into the", "visited_voxels_arrays): # convert to numpy array prediction = prediction.cpu().numpy() #", "most_frequent_label # compute Jaccard index iou = np.bitwise_and(new_label_mask, current_label_mask).sum() /", "(CDHW): {prediction_maps_shape}') avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True) logger.info(f'Avoid block artifacts: {avoid_block_artifacts}')", "# close the output H5 file h5_output_file.close() def _allocate_prediction_maps(self, output_shape,", "Args: embeddings (ndarray): 4D (CDHW) embeddings tensor Returns: 3D (DHW)", "self.iou_threshold = iou_threshold self.noise_label = noise_label self.clustering = clustering assert", "= output_file self.config = config self.predictor_config = kwargs @staticmethod def", "won't be stored in memory. Since this predictor is slower", "{output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=output_segmentation, compression=\"gzip\") def _embeddings_to_segmentation(self, embeddings): \"\"\" Cluster embeddings", "Applies the model on the given dataset and saves the", "output_heads, h5_output_file) # Sets the module in evaluation mode explicitly", "= prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width] logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset,", "Given the `segmentation` patch, its `index` in the `output_segmentation` array", "into the `output_segmentation` Args: segmentation (ndarray): segmented patch index (tuple):", "super().__init__(model, loader, output_file, config, **kwargs) self.iou_threshold = iou_threshold self.noise_label =", "only be used when the predicted volume does not fit", "{time.time() - start} sec.') return clusters def _merge_segmentation(self, segmentation, index,", "should only be used when the predicted volume does not", "self.config['model']['dt_out_channels'] prediction_channel = self.config.get('prediction_channel', None) if prediction_channel is not None:", "volume output_segmentation (ndarray): current state of the output segmentation visited_voxels_array", "for batchnorm/dropout layers if present) self.model.eval() # Set the `testing=true`", "prefix=f'segmentation/{self.clustering_name}') for output_segmentation, prediction_dataset in zip(output_segmentations, prediction_datasets): logger.info(f'Saving predictions to:", "order to average out probabilities of overlapping patches normalization_masks =", "**kwargs): super().__init__(model, loader, output_file, config, **kwargs) def predict(self): out_channels =", "for pred, index in zip(prediction, indices): # convert embeddings to", "the output segmentation visited_voxels_array (ndarray): array of voxels visited so", "bandwidth: {bandwidth}, bin_seeding: True') # use fast MeanShift with bin", "new_labels, segmentation) # relabel new segmentation with the merged labels", "pad_width = dataset.pad_width logger.info(f'Dataset loaded with mirror padding, pad_width: {pad_width}.", "dataset_name in normalization_datasets] return prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks,", "clustering and reshape in order to get the segmentation volume", "patches normalization_masks = [np.zeros(output_shape, dtype='uint8') for _ in range(output_heads)] return", "voxels visited so far (same size as `output_segmentation`); visited voxels", "(C, D, H, W) -> (C, D * H *", "'{prediction_channel}' from the network output\") device = self.config['device'] output_heads =", "import MeanShift from pytorch3dunet.datasets.hdf5 import SliceBuilder from pytorch3dunet.unet3d.utils import get_logger", "prediction_datasets): prediction_map = prediction_map / normalization_mask if dataset.mirror_padding: pad_width =", "probabilities into the output prediction array prediction_map[u_index] += u_prediction #", "the result in the `output_file` in the H5 format. Predictions", "segmentation volume_shape = self._volume_shape(self.loader.dataset) logger.info(f'The shape of the output segmentation", "`output_segmentation` volume output_segmentation (ndarray): current state of the output segmentation", "of the output segmentation visited_voxels_array (ndarray): array of voxels visited", "new_label in new_labels: # skip 'noise' label assigned by hdbscan", "padded with pad_width: {dataset.pad_width}') prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') normalization_datasets =", "# accumulate probabilities into the output prediction array prediction_map[index] +=", "self.iou_threshold: # merge labels result.append((most_frequent_label, new_label)) return result def _get_clustering(self,", "H5 format. Predicted patches are directly saved into the H5", "prediction.cpu().numpy() # iterate sequentially because of the current simple stitching", "= dataset.raws[0] if raw.ndim == 3: return raw.shape else: return", "assigned by hdbscan if new_label == self.noise_label: continue new_label_mask =", "{clustering_alg} for clustering') if clustering_alg == 'hdbscan': min_cluster_size = kwargs.get('min_cluster_size',", "hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric, cluster_selection_method=cluster_selection_method) else: bandwidth = kwargs['bandwidth'] logger.info(f'MeanShift params:", "normalization_masks = self._allocate_prediction_maps(prediction_maps_shape, output_heads, h5_output_file) # Sets the module in", "normalization mask in order to average out probabilities of overlapping", "self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)} batches...') # dimensionality of", "patch_shape = (z // 3, y // 3, x //", "mode explicitly (necessary for batchnorm/dropout layers if present) self.model.eval() #", "pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width] logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=prediction_map, compression=\"gzip\")", "array prediction_map[index] += pred # count voxel visits for normalization", "when the predicted volume does not fit into RAM. The", "metric=metric, cluster_selection_method=cluster_selection_method) else: bandwidth = kwargs['bandwidth'] logger.info(f'MeanShift params: bandwidth: {bandwidth},", "the `segmentation` patch, its `index` in the `output_segmentation` array and", "prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset): if", "the `output_file` in the H5 format. The resulting volume is", "before saving...') prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width] logger.info(f'Saving predictions", "itself (not the embedding vectors) obtained by clustering embeddings with", "`output_segmentation` array and the array visited voxels merge the segmented", "self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset) # close the output H5", "'Only HDBSCAN and MeanShift are supported' logger.info(f'IoU threshold: {iou_threshold}') self.clustering_name", "format. Predictions from the network are kept in memory. If", "shape=output_shape, dtype='uint8', chunks=True, compression='gzip') for dataset_name in normalization_datasets] return prediction_maps,", "in into RAM use `LazyPredictor` instead. The output dataset names", "used for prediction data_loader (torch.utils.data.DataLoader): input data loader output_file (str):", "there are no clashes between current segmentation patch and the", "send batch to device batch = batch.to(device) # forward pass", "else: channel_slice = slice(0, 1) index = (channel_slice,) + index", "// 3, y // 3, x // 3) for index", "@staticmethod def _volume_shape(dataset): # TODO: support multiple internal datasets raw", "# visit the patch visited_voxels_array[index] += 1 def _merge_labels(self, current_segmentation,", "= prediction_map / normalization_mask if dataset.mirror_padding: pad_width = dataset.pad_width logger.info(f'Dataset", "prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') # normalize", "# dimensionality of the the output segmentation volume_shape = self._volume_shape(self.loader.dataset)", "1 def _merge_labels(self, current_segmentation, new_labels, new_segmentation): def _most_frequent_label(labels): unique, counts", "current_segmentation == most_frequent_label # compute Jaccard index iou = np.bitwise_and(new_label_mask,", "predictions for slice:{index}...') if avoid_block_artifacts: # unpad in order to", "result in the `output_file` in the H5 format. Predictions from", "kwargs @staticmethod def _volume_shape(dataset): # TODO: support multiple internal datasets", "Since this predictor is slower than the `StandardPredictor` it should", "network if output_heads == 1: predictions = [predictions] # for", "noise label noise_mask = segmentation == self.noise_label segmentation += int(max_label)", "label most_frequent_label = _most_frequent_label(current_segmentation[new_label_mask]) # skip 'noise' label if most_frequent_label", "'w') as output_file: prediction_datasets = self._get_output_dataset_names(output_heads, prefix=f'segmentation/{self.clustering_name}') for output_segmentation, prediction_dataset", "predict(self): device = self.config['device'] output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running prediction", "3D UNet model used for prediction data_loader (torch.utils.data.DataLoader): input data", "normalization_datasets] return prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file,", "order to avoid block artifacts in the output probability maps", "the prediction_maps inside the H5 for prediction_map, normalization_mask, prediction_dataset, normalization_dataset", "(Unet3D): trained 3D UNet model used for prediction data_loader (torch.utils.data.DataLoader):", "in zip(predictions, prediction_maps, normalization_masks): # convert to numpy array prediction", "# save probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') for prediction_map,", "with the merged labels for current_label, new_label in merged_labels: segmentation[segmentation", "config, **kwargs) self.iou_threshold = iou_threshold self.noise_label = noise_label self.clustering =", "the final Softmax/Sigmoid won't be applied! self.model.testing = True #", "save probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') for prediction_map, normalization_mask,", "time.time() clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape) logger.info( f'Number of clusters found by", "network don't fit in into RAM use `LazyPredictor` instead. The", "batchnorm/dropout layers if present) self.model.eval() # Set the `testing=true` flag", "pred # count voxel visits for normalization normalization_mask[index] += 1", "pad_width: {dataset.pad_width}') prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization')", "volume_shape logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')", "zip(prediction, indices): # save patch index: (C,D,H,W) if prediction_channel is", "= self._merge_labels(output_segmentation[index], new_labels, segmentation) # relabel new segmentation with the", "time import h5py import hdbscan import numpy as np import", "1: embeddings = [embeddings] for prediction, output_segmentation, visited_voxels_array in zip(embeddings,", "will be marked by a number greater than 0 \"\"\"", "stitching the patches together. \"\"\" def __init__(self, model, loader, output_file,", "the patch visited_voxels_array[index] += 1 def _merge_labels(self, current_segmentation, new_labels, new_segmentation):", "if the IoU exceeds a given threshold for new_label in", "# use only the 'prediction_channel' logger.info(f\"Using channel '{prediction_channel}'...\") pred =", "0 \"\"\" index = tuple(index) # get new unassigned label", "patch index (tuple): position of the patch inside `output_segmentation` volume", "in zip(embeddings, output_segmentations, visited_voxels_arrays): # convert to numpy array prediction", "of the the output segmentation volume_shape = self._volume_shape(self.loader.dataset) logger.info(f'The shape", "predictor is slower than the `StandardPredictor` it should only be", "bin_seeding: True') # use fast MeanShift with bin seeding return", "the patches together. \"\"\" def __init__(self, model, loader, output_file, config,", "hdbscan import numpy as np import torch from sklearn.cluster import", "HDBSCAN or MeanShift algorithm patch by patch and then stitching", "of the output head from the network. Args: model (Unet3D):", "MeanShift are supported' logger.info(f'IoU threshold: {iou_threshold}') self.clustering_name = clustering self.clustering", "\"\"\" Applies the embedding model on the given dataset and", "in order to avoid block artifacts in the output probability", "indices): # convert embeddings to segmentation with hdbscan clustering segmentation", "Jaccard index iou = np.bitwise_and(new_label_mask, current_label_mask).sum() / np.bitwise_or(new_label_mask, current_label_mask).sum() if", "and normalization arrays...') prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape, output_heads, h5_output_file) #", "the result in the `output_file` in the H5 format. Predicted", "be stored in memory. Since this predictor is slower than", "current_label_mask).sum() if iou > self.iou_threshold: # merge labels result.append((most_frequent_label, new_label))", "logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=output_segmentation, compression=\"gzip\") def _embeddings_to_segmentation(self, embeddings):", "output probability maps u_prediction, u_index = unpad(pred, index, volume_shape) #", "self.loader: # logger.info(f'Predicting embeddings for slice:{index}') # send batch to", "'noise' label assigned by hdbscan if new_label == self.noise_label: continue", "# Run predictions on the entire input dataset with torch.no_grad():", "input data loader output_file (str): path to the output H5", "# for each output head for prediction, prediction_map, normalization_mask in", "from the network if output_heads == 1: predictions = [predictions]", "# count voxel visits for normalization normalization_mask[index] += 1 #", "indices): # save patch index: (C,D,H,W) if prediction_channel is None:", "shape=output_shape, dtype='float32', chunks=True, compression='gzip') for dataset_name in prediction_datasets] # allocate", "in merged_labels: segmentation[segmentation == new_label] = current_label # update the", "artifacts in the output probability maps u_prediction, u_index = unpad(pred,", "output_file: prediction_datasets = self._get_output_dataset_names(output_heads, prefix=f'segmentation/{self.clustering_name}') for output_segmentation, prediction_dataset in zip(output_segmentations,", "file h5_output_file = h5py.File(self.output_file, 'w') # allocate prediction and normalization", "won't be applied! self.model.testing = True # Run predictions on", "embeddings to segmentation with hdbscan clustering segmentation = self._embeddings_to_segmentation(pred) #", "== 3: return raw.shape else: return raw.shape[1:] @staticmethod def _get_output_dataset_names(number_of_datasets,", "slice that has been visited already in order to avoid", "{avoid_block_artifacts}') # create destination H5 file h5_output_file = h5py.File(self.output_file, 'w')", "or MeanShift algorithm patch by patch and then stitching the", "prediction on {len(self.loader)} batches...') # dimensionality of the the output", "normalization_masks, output_heads, output_file, dataset): if dataset.mirror_padding: logger.warn( f'Mirror padding unsupported", "= (1,) + volume_shape logger.info(f'The shape of the output prediction", "result in the `output_file` in the H5 format. The resulting", "the network output\") device = self.config['device'] output_heads = self.config['model'].get('output_heads', 1)", "raw.shape[1:] @staticmethod def _get_output_dataset_names(number_of_datasets, prefix='predictions'): if number_of_datasets == 1: return", "probability maps u_prediction, u_index = unpad(pred, index, volume_shape) # accumulate", "single channel prediction map prediction_maps_shape = (1,) + volume_shape logger.info(f'The", "@staticmethod def _get_output_dataset_names(number_of_datasets, prefix='predictions'): if number_of_datasets == 1: return [prefix]", "current_label_mask).sum() / np.bitwise_or(new_label_mask, current_label_mask).sum() if iou > self.iou_threshold: # merge", "clustering segmentation = self._embeddings_to_segmentation(pred) # stitch patches self._merge_segmentation(segmentation, index, output_segmentation,", "label noise_mask = segmentation == self.noise_label segmentation += int(max_label) segmentation[noise_mask]", "# unpad in order to avoid block artifacts in the", "is given by `des_dataset_name` config argument. If the argument is", "torch from sklearn.cluster import MeanShift from pytorch3dunet.datasets.hdf5 import SliceBuilder from", "def __init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs):", "if out_channels is None: out_channels = self.config['model']['dt_out_channels'] prediction_channel = self.config.get('prediction_channel',", "segmentation[noise_mask] = self.noise_label # get the overlap mask in the", "the output_segmentation output_segmentation[index] = segmentation # visit the patch visited_voxels_array[index]", "merge the segmented patch (`segmentation`) into the `output_segmentation` Args: segmentation", "self.config['device'] output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)} batches...')", "as `output_segmentation`); visited voxels will be marked by a number", "on {len(self.loader)} batches...') # dimensionality of the the output predictions", "= h5py.File(self.output_file, 'w') # allocate prediction and normalization arrays logger.info('Allocating", "saves the result in the `output_file` in the H5 format.", "is None: out_channels = self.config['model']['dt_out_channels'] prediction_channel = self.config.get('prediction_channel', None) if", "segmentation visited_voxels_array (ndarray): array of voxels visited so far (same", "slices which are 1/27 of the original volume patch_shape =", "prediction and normalization arrays logger.info('Allocating prediction and normalization arrays...') prediction_maps,", "output_heads, output_file, dataset): # save probability maps prediction_datasets = self._get_output_dataset_names(output_heads,", "for probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') prediction_maps = [", "if present) self.model.eval() # Set the `testing=true` flag otherwise the", "int(max_label) segmentation[noise_mask] = self.noise_label # get the overlap mask in", "return prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset):", "'{prediction_channel}'...\") pred = np.expand_dims(pred[prediction_channel], axis=0) logger.info(f'Saving predictions for slice:{index}...') if", "new_labels, new_segmentation): def _most_frequent_label(labels): unique, counts = np.unique(labels, return_counts=True) ind", "stitching that we're using for pred, index in zip(prediction, indices):", "directly saved into the H5 and they won't be stored", "into the H5 and they won't be stored in memory.", "destination H5 file h5_output_file = h5py.File(self.output_file, 'w') # allocate prediction", "only the 'prediction_channel' logger.info(f\"Using channel '{prediction_channel}'...\") pred = np.expand_dims(pred[prediction_channel], axis=0)", "(str): path to the output H5 file config (dict): global", "patch index: (C,D,H,W) if prediction_channel is None: channel_slice = slice(0,", "slice(0, out_channels) else: channel_slice = slice(0, 1) index = (channel_slice,)", "* W, C) flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose() logger.info('Clustering embeddings...') #", "if output_heads == 1: embeddings = [embeddings] for prediction, output_segmentation,", "new_segmentation == new_label # get only the most frequent overlapping", "{bandwidth}, bin_seeding: True') # use fast MeanShift with bin seeding", "the module in evaluation mode explicitly self.model.eval() self.model.testing = True", "x // 3) for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape): logger.info(f'Normalizing", "merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation) # relabel new segmentation with", "zip(prediction_maps, normalization_masks, prediction_datasets, normalization_datasets): # split the volume into 4", "# for each batch sample for pred, index in zip(prediction,", "volume_shape = self._volume_shape(self.loader.dataset) logger.info(f'The shape of the output segmentation (DHW):", "kwargs.get('min_cluster_size', 50) min_samples = kwargs.get('min_samples', None), metric = kwargs.get('metric', 'euclidean')", "{prediction_dataset}...') z, y, x = prediction_map.shape[1:] # take slices which", "(z // 3, y // 3, x // 3) for", "most_frequent_label == self.noise_label: continue current_label_mask = current_segmentation == most_frequent_label #", "H5 file config (dict): global config dict \"\"\" def __init__(self,", "entire input dataset with torch.no_grad(): for batch, indices in self.loader:", "# make sure to reset the slice that has been", "If the results from the network don't fit in into", "for pred, index in zip(prediction, indices): # save patch index:", "raw.shape else: return raw.shape[1:] @staticmethod def _get_output_dataset_names(number_of_datasets, prefix='predictions'): if number_of_datasets", "is not None: # use only the 'prediction_channel' logger.info(f\"Using channel", "list if there is only one output head from the", "sec.') return clusters def _merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array): \"\"\"", "masks normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') normalization_masks = [ output_file.create_dataset(dataset_name, shape=output_shape,", "[np.zeros(volume_shape, dtype='uint8') for _ in range(output_heads)] # Sets the module", "return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric, cluster_selection_method=cluster_selection_method) else: bandwidth = kwargs['bandwidth'] logger.info(f'MeanShift", "(ndarray): 4D (CDHW) embeddings tensor Returns: 3D (DHW) segmentation \"\"\"", "else: # accumulate probabilities into the output prediction array prediction_map[index]", "with HDBSCAN and return the segmented volume. Args: embeddings (ndarray):", "self.config = config self.predictor_config = kwargs @staticmethod def _volume_shape(dataset): #", "visited so far (same size as `output_segmentation`); visited voxels will", "normalization_mask in zip(predictions, prediction_maps, normalization_masks): # convert to numpy array", "{output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=prediction_map, compression=\"gzip\") class LazyPredictor(StandardPredictor): \"\"\" Applies the model", "logger.info(f'Normalizing {prediction_dataset}...') z, y, x = prediction_map.shape[1:] # take slices", "prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') prediction_maps = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32',", "volume into 4 parts and load each into the memory", "metric = kwargs.get('metric', 'euclidean') cluster_selection_method = kwargs.get('cluster_selection_method', 'eom') logger.info(f'HDBSCAN params:", "# forward pass predictions = self.model(batch) # wrap predictions into", "_save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset): # save probability maps", "volume. Args: embeddings (ndarray): 4D (CDHW) embeddings tensor Returns: 3D", "in evaluation mode explicitly (necessary for batchnorm/dropout layers if present)", "dimensionality of the the output predictions volume_shape = self._volume_shape(self.loader.dataset) if", "batch to device batch = batch.to(device) # forward pass embeddings", "the overlap mask in the current patch overlap_mask = visited_voxels_array[index]", "than the `StandardPredictor` it should only be used when the", "network if output_heads == 1: embeddings = [embeddings] for prediction,", "if prediction_channel is not None: # use only the 'prediction_channel'", "dtype='uint8', chunks=True, compression='gzip') for dataset_name in normalization_datasets] return prediction_maps, normalization_masks", "be marked by a number greater than 0 \"\"\" index", "1 # save results to self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset)", "output_file, config, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) def _allocate_prediction_maps(self,", "= kwargs.get('min_samples', None), metric = kwargs.get('metric', 'euclidean') cluster_selection_method = kwargs.get('cluster_selection_method',", "prefix='normalization') # normalize the prediction_maps inside the H5 for prediction_map,", "split the volume into 4 parts and load each into", "if number_of_datasets == 1: return [prefix] else: return [f'{prefix}{i}' for", "hdbscan if new_label == self.noise_label: continue new_label_mask = new_segmentation ==", "maps u_prediction, u_index = unpad(pred, index, volume_shape) # accumulate probabilities", "slice: {index}') prediction_map[index] /= normalization_mask[index] # make sure to reset", "new_label # get only the most frequent overlapping label most_frequent_label", "for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape): logger.info(f'Normalizing slice: {index}') prediction_map[index]", "clustering_alg, kwargs): logger.info(f'Using {clustering_alg} for clustering') if clustering_alg == 'hdbscan':", "= [predictions] # for each output head for prediction, prediction_map,", "_most_frequent_label(current_segmentation[new_label_mask]) # skip 'noise' label if most_frequent_label == self.noise_label: continue", "# merge labels result.append((most_frequent_label, new_label)) return result def _get_clustering(self, clustering_alg,", "= np.unique(labels, return_counts=True) ind = np.argmax(counts) return unique[ind] result =", "the volume into 4 parts and load each into the", "_ in range(output_heads)] # initialize normalization mask in order to", "in zip(prediction_maps, normalization_masks, prediction_datasets): prediction_map = prediction_map / normalization_mask if", "1: return [prefix] else: return [f'{prefix}{i}' for i in range(number_of_datasets)]", "prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps,", "and normalization arrays logger.info('Allocating prediction and normalization arrays...') prediction_maps, normalization_masks", "block artifacts: {avoid_block_artifacts}') # create destination H5 file h5_output_file =", "cluster_selection_method = kwargs.get('cluster_selection_method', 'eom') logger.info(f'HDBSCAN params: min_cluster_size: {min_cluster_size}, min_samples: {min_samples}')", "output prediction arrays output_segmentations = [np.zeros(volume_shape, dtype='int32') for _ in", "global config dict \"\"\" def __init__(self, model, loader, output_file, config,", "normalization_masks = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True, compression='gzip') for dataset_name", "get_logger('UNet3DPredictor') class _AbstractPredictor: def __init__(self, model, loader, output_file, config, **kwargs):", "'noise' label if most_frequent_label == self.noise_label: continue current_label_mask = current_segmentation", "self.output_file = output_file self.config = config self.predictor_config = kwargs @staticmethod", "= [np.zeros(volume_shape, dtype='uint8') for _ in range(output_heads)] # Sets the", "logger.info(f'Predicting embeddings for slice:{index}') # send batch to device batch", "this predictor is slower than the `StandardPredictor` it should only", "Set the `testing=true` flag otherwise the final Softmax/Sigmoid won't be", "Args: segmentation (ndarray): segmented patch index (tuple): position of the", "merged labels for current_label, new_label in merged_labels: segmentation[segmentation == new_label]", "-> (D * H * W, C) flattened_embeddings = embeddings.reshape(embeddings.shape[0],", "1) logger.info(f'Running prediction on {len(self.loader)} patches...') # dimensionality of the", "(not the embedding vectors) obtained by clustering embeddings with HDBSCAN", "None: logger.info(f\"Using only channel '{prediction_channel}' from the network output\") device", "[prefix] else: return [f'{prefix}{i}' for i in range(number_of_datasets)] def predict(self):", "the output segmentation (DHW): {volume_shape}') logger.info('Allocating segmentation array...') # initialize", "if dataset.mirror_padding: pad_width = dataset.pad_width logger.info(f'Dataset loaded with mirror padding,", "current state of the output segmentation visited_voxels_array (ndarray): array of", "normalization normalization_mask[index] += 1 # save results to self._save_results(prediction_maps, normalization_masks,", "indices in self.loader: # logger.info(f'Predicting embeddings for slice:{index}') # send", "# get the new labels inside the overlap_mask new_labels =", "\"\"\" Given the `segmentation` patch, its `index` in the `output_segmentation`", "min_samples = kwargs.get('min_samples', None), metric = kwargs.get('metric', 'euclidean') cluster_selection_method =", "prediction and normalization arrays...') prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape, output_heads, h5_output_file)", "device = self.config['device'] output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on", "_allocate_prediction_maps(self, output_shape, output_heads, output_file): # allocate datasets for probability maps", "if new_label == self.noise_label: continue new_label_mask = new_segmentation == new_label", "for each output head for prediction, prediction_map, normalization_mask in zip(predictions,", "in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape): logger.info(f'Normalizing slice: {index}') prediction_map[index] /= normalization_mask[index]", "module in evaluation mode explicitly (necessary for batchnorm/dropout layers if", "output_file, dataset): if dataset.mirror_padding: logger.warn( f'Mirror padding unsupported in LazyPredictor.", "volume is the segmentation itself (not the embedding vectors) obtained", "data loader output_file (str): path to the output H5 file", "out probabilities of overlapping patches normalization_masks = [np.zeros(output_shape, dtype='uint8') for", "4 parts and load each into the memory separately logger.info(f'Normalizing", "class _AbstractPredictor: def __init__(self, model, loader, output_file, config, **kwargs): self.model", "results from the network don't fit in into RAM use", "original volume patch_shape = (z // 3, y // 3,", "the new labels inside the overlap_mask new_labels = np.unique(segmentation[overlap_mask]) merged_labels", "{pad_width}. Cropping before saving...') prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width]", "= segmentation # visit the patch visited_voxels_array[index] += 1 def", "final Softmax/Sigmoid won't be applied! self.model.testing = True # Run", "segmentation itself (not the embedding vectors) obtained by clustering embeddings", "name, where `n` denotes the number of the output head", "logger.info('Allocating segmentation array...') # initialize the output prediction arrays output_segmentations", "- start} sec.') return clusters def _merge_segmentation(self, segmentation, index, output_segmentation,", "# initialize the output prediction arrays prediction_maps = [np.zeros(output_shape, dtype='float32')", "of clusters found by {self.clustering}: {np.max(clusters)}. Duration: {time.time() - start}", "array...') # initialize the output prediction arrays output_segmentations = [np.zeros(volume_shape,", "the slice that has been visited already in order to", "segmentation = self._embeddings_to_segmentation(pred) # stitch patches self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array)", "# make sure there are no clashes between current segmentation", "# wrap predictions into a list if there is only", "channel '{prediction_channel}'...\") pred = np.expand_dims(pred[prediction_channel], axis=0) logger.info(f'Saving predictions for slice:{index}...')", "are directly saved into the H5 and they won't be", "they won't be stored in memory. Since this predictor is", "prediction_maps = [np.zeros(output_shape, dtype='float32') for _ in range(output_heads)] # initialize", "logger.info('Clustering embeddings...') # perform clustering and reshape in order to", "a list if there is only one output head from", "is used as a default dataset name, where `n` denotes", "for dataset_name in prediction_datasets] # allocate datasets for normalization masks", "as a default dataset name, where `n` denotes the number", "batch sample for pred, index in zip(prediction, indices): # save", "mask in the current patch overlap_mask = visited_voxels_array[index] > 0", "each output head for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps,", "(DHW): {volume_shape}') logger.info('Allocating segmentation array...') # initialize the output prediction", "get_logger from pytorch3dunet.unet3d.utils import unpad logger = get_logger('UNet3DPredictor') class _AbstractPredictor:", "batch.to(device) # forward pass embeddings = self.model(batch) # wrap predictions", "normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') # normalize the prediction_maps inside the", "visited_voxels_array (ndarray): array of voxels visited so far (same size", "# convert to numpy array prediction = prediction.cpu().numpy() # for", "+= u_prediction # count voxel visits for normalization normalization_mask[u_index] +=", "prediction_dataset in zip(prediction_maps, normalization_masks, prediction_datasets): prediction_map = prediction_map / normalization_mask", "np import torch from sklearn.cluster import MeanShift from pytorch3dunet.datasets.hdf5 import", "shape of the output segmentation output_shape = embeddings.shape[1:] # reshape", "for normalization normalization_mask[index] += 1 # save results to self._save_results(prediction_maps,", "+= pred # count voxel visits for normalization normalization_mask[index] +=", "config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs): super().__init__(model, loader, output_file, config, **kwargs)", "normalization_mask[index] # make sure to reset the slice that has", "> 0 # get the new labels inside the overlap_mask", "mode explicitly self.model.eval() self.model.testing = True # Run predictions on", "normalization_mask[u_index] += 1 else: # accumulate probabilities into the output", "class StandardPredictor(_AbstractPredictor): \"\"\" Applies the model on the given dataset", "logger.info('Allocating prediction and normalization arrays...') prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape, output_heads,", "hdbscan clustering segmentation = self._embeddings_to_segmentation(pred) # stitch patches self._merge_segmentation(segmentation, index,", "of the output prediction maps (CDHW): {prediction_maps_shape}') avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts',", "params: bandwidth: {bandwidth}, bin_seeding: True') # use fast MeanShift with", "Cropping before saving...') prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width] logger.info(f'Saving", "# shape of the output segmentation output_shape = embeddings.shape[1:] #", "= [np.zeros(output_shape, dtype='float32') for _ in range(output_heads)] # initialize normalization", "logger.info(f'Using {clustering_alg} for clustering') if clustering_alg == 'hdbscan': min_cluster_size =", "to average out probabilities of overlapping patches normalization_masks = [np.zeros(output_shape,", "with each other normalization_mask[index] = 1 logger.info(f'Deleting {normalization_dataset}...') del output_file[normalization_dataset]", "'double' normalization # when the patches overlap with each other", "and the output_segmentation # but keep the noise label noise_mask", "-1).transpose() logger.info('Clustering embeddings...') # perform clustering and reshape in order", "self.model.eval() self.model.testing = True # Run predictions on the entire", "array prediction = prediction.cpu().numpy() # iterate sequentially because of the", "head from the network if output_heads == 1: predictions =", "prediction map prediction_maps_shape = (1,) + volume_shape logger.info(f'The shape of", "= config self.predictor_config = kwargs @staticmethod def _volume_shape(dataset): # TODO:", "# initialize the output prediction arrays output_segmentations = [np.zeros(volume_shape, dtype='int32')", "= True # Run predictions on the entire input dataset", "been visited already in order to avoid 'double' normalization #", "== new_label] = current_label # update the output_segmentation output_segmentation[index] =", "== new_label # get only the most frequent overlapping label", "normalization_mask[index] = 1 logger.info(f'Deleting {normalization_dataset}...') del output_file[normalization_dataset] class EmbeddingsPredictor(_AbstractPredictor): \"\"\"", "kwargs.get('metric', 'euclidean') cluster_selection_method = kwargs.get('cluster_selection_method', 'eom') logger.info(f'HDBSCAN params: min_cluster_size: {min_cluster_size},", "[embeddings] for prediction, output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations, visited_voxels_arrays): #", "segmentation volume start = time.time() clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape) logger.info( f'Number", "visited already in order to avoid 'double' normalization # when", "segmentation \"\"\" # shape of the output segmentation output_shape =", "= np.bitwise_and(new_label_mask, current_label_mask).sum() / np.bitwise_or(new_label_mask, current_label_mask).sum() if iou > self.iou_threshold:", "slice:{index}...') if avoid_block_artifacts: # unpad in order to avoid block", "label max_label = np.max(output_segmentation) + 1 # make sure there", "0 # get the new labels inside the overlap_mask new_labels", "_most_frequent_label(labels): unique, counts = np.unique(labels, return_counts=True) ind = np.argmax(counts) return", "== 'hdbscan': min_cluster_size = kwargs.get('min_cluster_size', 50) min_samples = kwargs.get('min_samples', None),", "visits for normalization normalization_mask[index] += 1 # save results to", "to device batch = batch.to(device) # forward pass predictions =", "in range(output_heads)] # Sets the module in evaluation mode explicitly", "return unique[ind] result = [] # iterate over new_labels and", "results with h5py.File(self.output_file, 'w') as output_file: prediction_datasets = self._get_output_dataset_names(output_heads, prefix=f'segmentation/{self.clustering_name}')", "device batch = batch.to(device) # forward pass predictions = self.model(batch)", "data=output_segmentation, compression=\"gzip\") def _embeddings_to_segmentation(self, embeddings): \"\"\" Cluster embeddings vectors with", "H5 is given by `des_dataset_name` config argument. If the argument", "self.noise_label: continue current_label_mask = current_segmentation == most_frequent_label # compute Jaccard", "padding, pad_width: {pad_width}. Cropping before saving...') prediction_map = prediction_map[:, pad_width:-pad_width,", "= self.config['model']['dt_out_channels'] prediction_channel = self.config.get('prediction_channel', None) if prediction_channel is not", "prediction arrays output_segmentations = [np.zeros(volume_shape, dtype='int32') for _ in range(output_heads)]", "flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose() logger.info('Clustering embeddings...') # perform clustering and", "from the network are kept in memory. If the results", "loader self.output_file = output_file self.config = config self.predictor_config = kwargs", "{self.clustering}: {np.max(clusters)}. Duration: {time.time() - start} sec.') return clusters def", "of the output segmentation (DHW): {volume_shape}') logger.info('Allocating segmentation array...') #", "embeddings = self.model(batch) # wrap predictions into a list if", "True') # use fast MeanShift with bin seeding return MeanShift(bandwidth=bandwidth,", "batch, indices in self.loader: # send batch to device batch", "LazyPredictor. Output predictions will be padded with pad_width: {dataset.pad_width}') prediction_datasets", "segmentation array...') # initialize the output prediction arrays output_segmentations =", "current_label, new_label in merged_labels: segmentation[segmentation == new_label] = current_label #", "normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks, prediction_datasets): prediction_map = prediction_map /", "= kwargs.get('min_cluster_size', 50) min_samples = kwargs.get('min_samples', None), metric = kwargs.get('metric',", "output segmentation output_shape = embeddings.shape[1:] # reshape (C, D, H,", "def _get_clustering(self, clustering_alg, kwargs): logger.info(f'Using {clustering_alg} for clustering') if clustering_alg", "in ['hdbscan', 'meanshift'], 'Only HDBSCAN and MeanShift are supported' logger.info(f'IoU", "if output_heads == 1: predictions = [predictions] # for each", "is not present in the config 'predictions{n}' is used as", "from the network don't fit in into RAM use `LazyPredictor`", "block artifacts in the output probability maps u_prediction, u_index =", "save patch index: (C,D,H,W) if prediction_channel is None: channel_slice =", "index in zip(prediction, indices): # convert embeddings to segmentation with", "for normalization masks normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') normalization_masks = [", "self.model(batch) # wrap predictions into a list if there is", "output prediction array prediction_map[u_index] += u_prediction # count voxel visits", "**kwargs) def predict(self): out_channels = self.config['model'].get('out_channels') if out_channels is None:", "new_labels and merge regions if the IoU exceeds a given", "normalization arrays...') prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape, output_heads, h5_output_file) # Sets", "data=prediction_map, compression=\"gzip\") class LazyPredictor(StandardPredictor): \"\"\" Applies the model on the", "= prediction_map.shape[1:] # take slices which are 1/27 of the", "def __init__(self, model, loader, output_file, config, **kwargs): self.model = model", "C) flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose() logger.info('Clustering embeddings...') # perform clustering", "make sure there are no clashes between current segmentation patch", "segmentation[segmentation == new_label] = current_label # update the output_segmentation output_segmentation[index]", "super().__init__(model, loader, output_file, config, **kwargs) def predict(self): out_channels = self.config['model'].get('out_channels')", "new_label] = current_label # update the output_segmentation output_segmentation[index] = segmentation", "def _allocate_prediction_maps(self, output_shape, output_heads, output_file): # initialize the output prediction", "dataset name, where `n` denotes the number of the output", "default dataset name, where `n` denotes the number of the", "the `output_file` in the H5 format. Predicted patches are directly", "in range(number_of_datasets)] def predict(self): raise NotImplementedError class StandardPredictor(_AbstractPredictor): \"\"\" Applies", "into a list if there is only one output head", "patch_shape=patch_shape, stride_shape=patch_shape): logger.info(f'Normalizing slice: {index}') prediction_map[index] /= normalization_mask[index] # make", "= self._get_output_dataset_names(output_heads, prefix='predictions') for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks,", "prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps, normalization_masks): # convert to", "patches overlap with each other normalization_mask[index] = 1 logger.info(f'Deleting {normalization_dataset}...')", "+ volume_shape logger.info(f'The shape of the output prediction maps (CDHW):", "Sets the module in evaluation mode explicitly (necessary for batchnorm/dropout", "self.config.get('prediction_channel', None) if prediction_channel is not None: logger.info(f\"Using only channel", "output head from the network. Args: model (Unet3D): trained 3D", "as output_file: prediction_datasets = self._get_output_dataset_names(output_heads, prefix=f'segmentation/{self.clustering_name}') for output_segmentation, prediction_dataset in", "the entire input dataset with torch.no_grad(): for batch, indices in", "{len(self.loader)} patches...') # dimensionality of the the output segmentation volume_shape", "to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=prediction_map, compression=\"gzip\") class LazyPredictor(StandardPredictor): \"\"\" Applies the", "segmentation += int(max_label) segmentation[noise_mask] = self.noise_label # get the overlap", "perform clustering and reshape in order to get the segmentation", "logger.warn( f'Mirror padding unsupported in LazyPredictor. Output predictions will be", "Args: model (Unet3D): trained 3D UNet model used for prediction", "maps (CDHW): {prediction_maps_shape}') avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True) logger.info(f'Avoid block artifacts:", "= self._get_output_dataset_names(output_heads, prefix='predictions') normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') # normalize the", "**kwargs): self.model = model self.loader = loader self.output_file = output_file", "reshape in order to get the segmentation volume start =", "pytorch3dunet.unet3d.utils import unpad logger = get_logger('UNet3DPredictor') class _AbstractPredictor: def __init__(self,", "{min_cluster_size}, min_samples: {min_samples}') return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric, cluster_selection_method=cluster_selection_method) else: bandwidth", "# logger.info(f'Predicting embeddings for slice:{index}') # send batch to device", "output head from the network if output_heads == 1: embeddings", "and merge regions if the IoU exceeds a given threshold", "__init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs): super().__init__(model,", "in normalization_datasets] return prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads,", "not None: # use only the 'prediction_channel' logger.info(f\"Using channel '{prediction_channel}'...\")", "flag otherwise the final Softmax/Sigmoid won't be applied! self.model.testing =", "== 1: return [prefix] else: return [f'{prefix}{i}' for i in", "output_shape, output_heads, output_file): # initialize the output prediction arrays prediction_maps", "predictions = [predictions] # for each output head for prediction,", "self._volume_shape(self.loader.dataset) if prediction_channel is None: prediction_maps_shape = (out_channels,) + volume_shape", "indices in self.loader: # send batch to device batch =", "u_prediction, u_index = unpad(pred, index, volume_shape) # accumulate probabilities into", "class LazyPredictor(StandardPredictor): \"\"\" Applies the model on the given dataset", "[ output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True, compression='gzip') for dataset_name in prediction_datasets]", "overlapping patches normalization_masks = [np.zeros(output_shape, dtype='uint8') for _ in range(output_heads)]", "self._get_output_dataset_names(output_heads, prefix='predictions') for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks, prediction_datasets):", "# save patch index: (C,D,H,W) if prediction_channel is None: channel_slice", "stored in memory. Since this predictor is slower than the", "forward pass predictions = self.model(batch) # wrap predictions into a", "given by `des_dataset_name` config argument. If the argument is not", "visited_voxels arrays visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for _ in range(output_heads)]", "# get only the most frequent overlapping label most_frequent_label =", "= self._get_output_dataset_names(output_heads, prefix='normalization') normalization_masks = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True,", "the predicted volume does not fit into RAM. The output", "output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations, visited_voxels_arrays): # convert to numpy", "normalization_masks, output_heads, output_file, dataset): # save probability maps prediction_datasets =", "SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape): logger.info(f'Normalizing slice: {index}') prediction_map[index] /= normalization_mask[index] #", "visited voxels will be marked by a number greater than", "instead. The output dataset names inside the H5 is given", "range(output_heads)] # Sets the module in evaluation mode explicitly self.model.eval()", "= self._embeddings_to_segmentation(pred) # stitch patches self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array) #", "trained 3D UNet model used for prediction data_loader (torch.utils.data.DataLoader): input", "in prediction_datasets] # allocate datasets for normalization masks normalization_datasets =", "to device batch = batch.to(device) # forward pass embeddings =", "prediction_channel is not None: logger.info(f\"Using only channel '{prediction_channel}' from the", "not present in the config 'predictions{n}' is used as a", "array prediction_map[u_index] += u_prediction # count voxel visits for normalization", "# save results with h5py.File(self.output_file, 'w') as output_file: prediction_datasets =", "# get the overlap mask in the current patch overlap_mask", "model self.loader = loader self.output_file = output_file self.config = config", "of the the output predictions volume_shape = self._volume_shape(self.loader.dataset) if prediction_channel", "don't fit in into RAM use `LazyPredictor` instead. The output", "= [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True, compression='gzip') for dataset_name in", "index in zip(prediction, indices): # save patch index: (C,D,H,W) if", "index, output_segmentation, visited_voxels_array) # save results with h5py.File(self.output_file, 'w') as", "when the patches overlap with each other normalization_mask[index] = 1", "= 1 logger.info(f'Deleting {normalization_dataset}...') del output_file[normalization_dataset] class EmbeddingsPredictor(_AbstractPredictor): \"\"\" Applies", "== self.noise_label segmentation += int(max_label) segmentation[noise_mask] = self.noise_label # get", "clusters found by {self.clustering}: {np.max(clusters)}. Duration: {time.time() - start} sec.')", "Applies the embedding model on the given dataset and saves", "= self.model(batch) # wrap predictions into a list if there", "obtained by clustering embeddings with HDBSCAN or MeanShift algorithm patch", "numpy array prediction = prediction.cpu().numpy() # iterate sequentially because of", "current_label_mask = current_segmentation == most_frequent_label # compute Jaccard index iou", "dtype='int32') for _ in range(output_heads)] # initialize visited_voxels arrays visited_voxels_arrays", "segmentation == self.noise_label segmentation += int(max_label) segmentation[noise_mask] = self.noise_label #", "take slices which are 1/27 of the original volume patch_shape", "the IoU exceeds a given threshold for new_label in new_labels:", "datasets for normalization masks normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') normalization_masks =", "segmentation (ndarray): segmented patch index (tuple): position of the patch", "None: channel_slice = slice(0, out_channels) else: channel_slice = slice(0, 1)", "use only the 'prediction_channel' logger.info(f\"Using channel '{prediction_channel}'...\") pred = np.expand_dims(pred[prediction_channel],", "to avoid 'double' normalization # when the patches overlap with", "index iou = np.bitwise_and(new_label_mask, current_label_mask).sum() / np.bitwise_or(new_label_mask, current_label_mask).sum() if iou", "= slice(0, out_channels) else: channel_slice = slice(0, 1) index =", "`output_file` in the H5 format. Predictions from the network are", "with torch.no_grad(): for batch, indices in self.loader: # logger.info(f'Predicting embeddings", "self.model.eval() # Set the `testing=true` flag otherwise the final Softmax/Sigmoid", "def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset): if dataset.mirror_padding: logger.warn(", "far (same size as `output_segmentation`); visited voxels will be marked", "threshold: {iou_threshold}') self.clustering_name = clustering self.clustering = self._get_clustering(clustering, kwargs) def", "datasets for probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') prediction_maps =", "prefix='predictions') normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') # normalize the prediction_maps inside", "saving...') prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width] logger.info(f'Saving predictions to:", "make sure to reset the slice that has been visited", "the network. Args: model (Unet3D): trained 3D UNet model used", "def _embeddings_to_segmentation(self, embeddings): \"\"\" Cluster embeddings vectors with HDBSCAN and", "output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)} batches...') #", "if dataset.mirror_padding: logger.warn( f'Mirror padding unsupported in LazyPredictor. Output predictions", "names inside the H5 is given by `des_dataset_name` config argument.", "than 0 \"\"\" index = tuple(index) # get new unassigned", "_merge_labels(self, current_segmentation, new_labels, new_segmentation): def _most_frequent_label(labels): unique, counts = np.unique(labels,", "zip(prediction_maps, normalization_masks, prediction_datasets): prediction_map = prediction_map / normalization_mask if dataset.mirror_padding:", "new_segmentation): def _most_frequent_label(labels): unique, counts = np.unique(labels, return_counts=True) ind =", "prediction_maps_shape = (1,) + volume_shape logger.info(f'The shape of the output", "output dataset names inside the H5 is given by `des_dataset_name`", "state of the output segmentation visited_voxels_array (ndarray): array of voxels", "mask in order to average out probabilities of overlapping patches", "unique, counts = np.unique(labels, return_counts=True) ind = np.argmax(counts) return unique[ind]", "which are 1/27 of the original volume patch_shape = (z", "self.predictor_config = kwargs @staticmethod def _volume_shape(dataset): # TODO: support multiple", "segmentation, index, output_segmentation, visited_voxels_array): \"\"\" Given the `segmentation` patch, its", "the `testing=true` flag otherwise the final Softmax/Sigmoid won't be applied!", "prediction_map[index] /= normalization_mask[index] # make sure to reset the slice", "H * W, C) flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose() logger.info('Clustering embeddings...')", "__init__(self, model, loader, output_file, config, **kwargs): self.model = model self.loader", "output prediction arrays prediction_maps = [np.zeros(output_shape, dtype='float32') for _ in", "loader output_file (str): path to the output H5 file config", "start} sec.') return clusters def _merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array):", "a default dataset name, where `n` denotes the number of", "= kwargs['bandwidth'] logger.info(f'MeanShift params: bandwidth: {bandwidth}, bin_seeding: True') # use", "y // 3, x // 3) for index in SliceBuilder._build_slices(prediction_map,", "1/27 of the original volume patch_shape = (z // 3,", "range(output_heads)] # initialize visited_voxels arrays visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for", "self._volume_shape(self.loader.dataset) logger.info(f'The shape of the output segmentation (DHW): {volume_shape}') logger.info('Allocating", "zip(output_segmentations, prediction_datasets): logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=output_segmentation, compression=\"gzip\") def", "will be padded with pad_width: {dataset.pad_width}') prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')", "and saves the result in the `output_file` in the H5", "# relabel new segmentation with the merged labels for current_label,", "output_heads == 1: predictions = [predictions] # for each output", "file h5_output_file.close() def _allocate_prediction_maps(self, output_shape, output_heads, output_file): # initialize the", "with pad_width: {dataset.pad_width}') prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') normalization_datasets = self._get_output_dataset_names(output_heads,", "return the segmented volume. Args: embeddings (ndarray): 4D (CDHW) embeddings", "chunks=True, compression='gzip') for dataset_name in normalization_datasets] return prediction_maps, normalization_masks def", "__init__(self, model, loader, output_file, config, **kwargs): super().__init__(model, loader, output_file, config,", "in self.loader: # logger.info(f'Predicting embeddings for slice:{index}') # send batch", "fit in into RAM use `LazyPredictor` instead. The output dataset", "MeanShift algorithm patch by patch and then stitching the patches", "= self._get_output_dataset_names(output_heads, prefix=f'segmentation/{self.clustering_name}') for output_segmentation, prediction_dataset in zip(output_segmentations, prediction_datasets): logger.info(f'Saving", "_ in range(output_heads)] # initialize visited_voxels arrays visited_voxels_arrays = [np.zeros(volume_shape,", "self.clustering = clustering assert clustering in ['hdbscan', 'meanshift'], 'Only HDBSCAN", "(same size as `output_segmentation`); visited voxels will be marked by", "return result def _get_clustering(self, clustering_alg, kwargs): logger.info(f'Using {clustering_alg} for clustering')", "from pytorch3dunet.unet3d.utils import unpad logger = get_logger('UNet3DPredictor') class _AbstractPredictor: def", "prefix='predictions') for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks, prediction_datasets): prediction_map", "memory. If the results from the network don't fit in", "the network are kept in memory. If the results from", "pad_width:-pad_width] logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=prediction_map, compression=\"gzip\") class LazyPredictor(StandardPredictor):", "the `output_segmentation` Args: segmentation (ndarray): segmented patch index (tuple): position", "dtype='uint8') for _ in range(output_heads)] # Sets the module in", "the model on the given dataset and saves the result", "a given threshold for new_label in new_labels: # skip 'noise'", "kept in memory. If the results from the network don't", "the current patch overlap_mask = visited_voxels_array[index] > 0 # get", "Duration: {time.time() - start} sec.') return clusters def _merge_segmentation(self, segmentation,", "H5 file h5_output_file = h5py.File(self.output_file, 'w') # allocate prediction and", "+= 1 def _merge_labels(self, current_segmentation, new_labels, new_segmentation): def _most_frequent_label(labels): unique,", "patch overlap_mask = visited_voxels_array[index] > 0 # get the new", "loaded with mirror padding, pad_width: {pad_width}. Cropping before saving...') prediction_map", "**kwargs) self.iou_threshold = iou_threshold self.noise_label = noise_label self.clustering = clustering", "output_heads, output_file, dataset): if dataset.mirror_padding: logger.warn( f'Mirror padding unsupported in", "unique[ind] result = [] # iterate over new_labels and merge", "bandwidth = kwargs['bandwidth'] logger.info(f'MeanShift params: bandwidth: {bandwidth}, bin_seeding: True') #", "[np.zeros(volume_shape, dtype='int32') for _ in range(output_heads)] # initialize visited_voxels arrays", "self._get_output_dataset_names(output_heads, prefix='predictions') normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') # normalize the prediction_maps", "together. \"\"\" def __init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7,", "D, H, W) -> (C, D * H * W)", "= loader self.output_file = output_file self.config = config self.predictor_config =", "return [f'{prefix}{i}' for i in range(number_of_datasets)] def predict(self): raise NotImplementedError", "explicitly (necessary for batchnorm/dropout layers if present) self.model.eval() # Set", "only channel '{prediction_channel}' from the network output\") device = self.config['device']", "zip(embeddings, output_segmentations, visited_voxels_arrays): # convert to numpy array prediction =", "embeddings with HDBSCAN or MeanShift algorithm patch by patch and", "prediction_datasets] # allocate datasets for normalization masks normalization_datasets = self._get_output_dataset_names(output_heads,", "self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)} patches...') # dimensionality of", "None) if prediction_channel is not None: logger.info(f\"Using only channel '{prediction_channel}'", "output_heads == 1: embeddings = [embeddings] for prediction, output_segmentation, visited_voxels_array", "`LazyPredictor` instead. The output dataset names inside the H5 is", "results to self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset) # close the", "W) and transpose -> (D * H * W, C)", "compression='gzip') for dataset_name in prediction_datasets] # allocate datasets for normalization", "* H * W) and transpose -> (D * H", "update the output_segmentation output_segmentation[index] = segmentation # visit the patch", "clustering assert clustering in ['hdbscan', 'meanshift'], 'Only HDBSCAN and MeanShift", "visits for normalization normalization_mask[u_index] += 1 else: # accumulate probabilities", "(DHW) segmentation \"\"\" # shape of the output segmentation output_shape", "`testing=true` flag otherwise the final Softmax/Sigmoid won't be applied! self.model.testing", "if clustering_alg == 'hdbscan': min_cluster_size = kwargs.get('min_cluster_size', 50) min_samples =", "1 logger.info(f'Deleting {normalization_dataset}...') del output_file[normalization_dataset] class EmbeddingsPredictor(_AbstractPredictor): \"\"\" Applies the", "channel_slice = slice(0, out_channels) else: channel_slice = slice(0, 1) index", "sklearn.cluster import MeanShift from pytorch3dunet.datasets.hdf5 import SliceBuilder from pytorch3dunet.unet3d.utils import", "volume does not fit into RAM. The output dataset names", "patch and the output_segmentation # but keep the noise label", "min_samples=min_samples, metric=metric, cluster_selection_method=cluster_selection_method) else: bandwidth = kwargs['bandwidth'] logger.info(f'MeanShift params: bandwidth:", "prefix='predictions'): if number_of_datasets == 1: return [prefix] else: return [f'{prefix}{i}'", "with mirror padding, pad_width: {pad_width}. Cropping before saving...') prediction_map =", "the output segmentation output_shape = embeddings.shape[1:] # reshape (C, D,", "unassigned label max_label = np.max(output_segmentation) + 1 # make sure", "result in the `output_file` in the H5 format. Predicted patches", "the `output_file` in the H5 format. Predictions from the network", "from the network if output_heads == 1: embeddings = [embeddings]", "u_prediction # count voxel visits for normalization normalization_mask[u_index] += 1", "-> (C, D * H * W) and transpose ->", "np.argmax(counts) return unique[ind] result = [] # iterate over new_labels", "prediction_maps_shape = (out_channels,) + volume_shape else: # single channel prediction", "min_cluster_size = kwargs.get('min_cluster_size', 50) min_samples = kwargs.get('min_samples', None), metric =", "by `des_dataset_name` config argument. If the argument is not present", "position of the patch inside `output_segmentation` volume output_segmentation (ndarray): current", "compression=\"gzip\") def _embeddings_to_segmentation(self, embeddings): \"\"\" Cluster embeddings vectors with HDBSCAN", "prediction maps (CDHW): {prediction_maps_shape}') avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True) logger.info(f'Avoid block", "`output_segmentation` Args: segmentation (ndarray): segmented patch index (tuple): position of", "the given dataset and saves the result in the `output_file`", "if raw.ndim == 3: return raw.shape else: return raw.shape[1:] @staticmethod", "dataset.pad_width logger.info(f'Dataset loaded with mirror padding, pad_width: {pad_width}. Cropping before", "fit into RAM. The output dataset names inside the H5", "device batch = batch.to(device) # forward pass embeddings = self.model(batch)", "+= 1 # save results to self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file,", "in the H5 format. Predictions from the network are kept", "iou = np.bitwise_and(new_label_mask, current_label_mask).sum() / np.bitwise_or(new_label_mask, current_label_mask).sum() if iou >", "(ndarray): array of voxels visited so far (same size as", "* H * W, C) flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose() logger.info('Clustering", "params: min_cluster_size: {min_cluster_size}, min_samples: {min_samples}') return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric, cluster_selection_method=cluster_selection_method)", "self.config['model'].get('out_channels') if out_channels is None: out_channels = self.config['model']['dt_out_channels'] prediction_channel =", "each batch sample for pred, index in zip(prediction, indices): #", "{prediction_maps_shape}') avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True) logger.info(f'Avoid block artifacts: {avoid_block_artifacts}') #", "labels inside the overlap_mask new_labels = np.unique(segmentation[overlap_mask]) merged_labels = self._merge_labels(output_segmentation[index],", "skip 'noise' label assigned by hdbscan if new_label == self.noise_label:", "patch by patch and then stitching the patches together. \"\"\"", "used as a default dataset name, where `n` denotes the", "labels for current_label, new_label in merged_labels: segmentation[segmentation == new_label] =", "= prediction.cpu().numpy() # iterate sequentially because of the current simple", "volume patch_shape = (z // 3, y // 3, x", "None), metric = kwargs.get('metric', 'euclidean') cluster_selection_method = kwargs.get('cluster_selection_method', 'eom') logger.info(f'HDBSCAN", "shape of the output segmentation (DHW): {volume_shape}') logger.info('Allocating segmentation array...')", "number of the output head from the network. Args: model", "for clustering') if clustering_alg == 'hdbscan': min_cluster_size = kwargs.get('min_cluster_size', 50)", "= clustering assert clustering in ['hdbscan', 'meanshift'], 'Only HDBSCAN and", "new_label == self.noise_label: continue new_label_mask = new_segmentation == new_label #", "inside `output_segmentation` volume output_segmentation (ndarray): current state of the output", "# Sets the module in evaluation mode explicitly (necessary for", "embeddings tensor Returns: 3D (DHW) segmentation \"\"\" # shape of", "self._allocate_prediction_maps(prediction_maps_shape, output_heads, h5_output_file) # Sets the module in evaluation mode", "output_segmentation, prediction_dataset in zip(output_segmentations, prediction_datasets): logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset,", "not None: logger.info(f\"Using only channel '{prediction_channel}' from the network output\")", "== 1: predictions = [predictions] # for each output head", "If the argument is not present in the config 'predictions{n}'", "its `index` in the `output_segmentation` array and the array visited", "into RAM use `LazyPredictor` instead. The output dataset names inside", "{index}') prediction_map[index] /= normalization_mask[index] # make sure to reset the", "W) -> (C, D * H * W) and transpose", "embeddings): \"\"\" Cluster embeddings vectors with HDBSCAN and return the", "in range(output_heads)] # initialize normalization mask in order to average", "LazyPredictor(StandardPredictor): \"\"\" Applies the model on the given dataset and", "in range(output_heads)] return prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads,", "and MeanShift are supported' logger.info(f'IoU threshold: {iou_threshold}') self.clustering_name = clustering", "self._get_clustering(clustering, kwargs) def predict(self): device = self.config['device'] output_heads = self.config['model'].get('output_heads',", "normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') normalization_masks = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8',", "= self.config['device'] output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)}", "pad_width:-pad_width, pad_width:-pad_width] logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=prediction_map, compression=\"gzip\") class", "= batch.to(device) # forward pass predictions = self.model(batch) # wrap", "'prediction_channel' logger.info(f\"Using channel '{prediction_channel}'...\") pred = np.expand_dims(pred[prediction_channel], axis=0) logger.info(f'Saving predictions", "# convert embeddings to segmentation with hdbscan clustering segmentation =", "initialize the output prediction arrays output_segmentations = [np.zeros(volume_shape, dtype='int32') for", "separately logger.info(f'Normalizing {prediction_dataset}...') z, y, x = prediction_map.shape[1:] # take", "Softmax/Sigmoid won't be applied! self.model.testing = True # Run predictions", "self._get_output_dataset_names(output_heads, prefix=f'segmentation/{self.clustering_name}') for output_segmentation, prediction_dataset in zip(output_segmentations, prediction_datasets): logger.info(f'Saving predictions", "self.clustering.fit_predict(flattened_embeddings).reshape(output_shape) logger.info( f'Number of clusters found by {self.clustering}: {np.max(clusters)}. Duration:", "output_file): # allocate datasets for probability maps prediction_datasets = self._get_output_dataset_names(output_heads,", "output predictions volume_shape = self._volume_shape(self.loader.dataset) if prediction_channel is None: prediction_maps_shape", "= self._get_clustering(clustering, kwargs) def predict(self): device = self.config['device'] output_heads =", "+ 1 # make sure there are no clashes between", "patch (`segmentation`) into the `output_segmentation` Args: segmentation (ndarray): segmented patch", "prediction_channel is None: channel_slice = slice(0, out_channels) else: channel_slice =", "= self.config['model'].get('out_channels') if out_channels is None: out_channels = self.config['model']['dt_out_channels'] prediction_channel", "resulting volume is the segmentation itself (not the embedding vectors)", "one output head from the network if output_heads == 1:", "only the most frequent overlapping label most_frequent_label = _most_frequent_label(current_segmentation[new_label_mask]) #", "regions if the IoU exceeds a given threshold for new_label", "(dict): global config dict \"\"\" def __init__(self, model, loader, output_file,", "logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=prediction_map, compression=\"gzip\") class LazyPredictor(StandardPredictor): \"\"\"", "index if prediction_channel is not None: # use only the", "# convert to numpy array prediction = prediction.cpu().numpy() # iterate", "prediction_maps inside the H5 for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in", "= self._get_output_dataset_names(output_heads, prefix='normalization') # normalize the prediction_maps inside the H5", "module in evaluation mode explicitly self.model.eval() self.model.testing = True #", "visited_voxels_array in zip(embeddings, output_segmentations, visited_voxels_arrays): # convert to numpy array", "self._merge_labels(output_segmentation[index], new_labels, segmentation) # relabel new segmentation with the merged", "output_file (str): path to the output H5 file config (dict):", "in memory. If the results from the network don't fit", "pred, index in zip(prediction, indices): # save patch index: (C,D,H,W)", "for current_label, new_label in merged_labels: segmentation[segmentation == new_label] = current_label", "+ index if prediction_channel is not None: # use only", "\"\"\" # shape of the output segmentation output_shape = embeddings.shape[1:]", "network. Args: model (Unet3D): trained 3D UNet model used for", "compression=\"gzip\") class LazyPredictor(StandardPredictor): \"\"\" Applies the model on the given", "'eom') logger.info(f'HDBSCAN params: min_cluster_size: {min_cluster_size}, min_samples: {min_samples}') return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples,", "torch.no_grad(): for batch, indices in self.loader: # logger.info(f'Predicting embeddings for", "get only the most frequent overlapping label most_frequent_label = _most_frequent_label(current_segmentation[new_label_mask])", "volume_shape) # accumulate probabilities into the output prediction array prediction_map[u_index]", "4D (CDHW) embeddings tensor Returns: 3D (DHW) segmentation \"\"\" #", "prediction = prediction.cpu().numpy() # iterate sequentially because of the current", "# when the patches overlap with each other normalization_mask[index] =", "np.max(output_segmentation) + 1 # make sure there are no clashes", "import get_logger from pytorch3dunet.unet3d.utils import unpad logger = get_logger('UNet3DPredictor') class", "(1,) + volume_shape logger.info(f'The shape of the output prediction maps", "import unpad logger = get_logger('UNet3DPredictor') class _AbstractPredictor: def __init__(self, model,", "logger.info(f'Normalizing slice: {index}') prediction_map[index] /= normalization_mask[index] # make sure to", "output_segmentations = [np.zeros(volume_shape, dtype='int32') for _ in range(output_heads)] # initialize", "= kwargs.get('cluster_selection_method', 'eom') logger.info(f'HDBSCAN params: min_cluster_size: {min_cluster_size}, min_samples: {min_samples}') return", "visited_voxels_array[index] > 0 # get the new labels inside the", "of overlapping patches normalization_masks = [np.zeros(output_shape, dtype='uint8') for _ in", "use `LazyPredictor` instead. The output dataset names inside the H5", "np.unique(labels, return_counts=True) ind = np.argmax(counts) return unique[ind] result = []", "def predict(self): raise NotImplementedError class StandardPredictor(_AbstractPredictor): \"\"\" Applies the model", "output head from the network if output_heads == 1: predictions", "the the output segmentation volume_shape = self._volume_shape(self.loader.dataset) logger.info(f'The shape of", "the memory separately logger.info(f'Normalizing {prediction_dataset}...') z, y, x = prediction_map.shape[1:]", "RAM. The output dataset names inside the H5 is given", "has been visited already in order to avoid 'double' normalization", "max_label = np.max(output_segmentation) + 1 # make sure there are", "the original volume patch_shape = (z // 3, y //", "we're using for pred, index in zip(prediction, indices): # convert", "relabel new segmentation with the merged labels for current_label, new_label", "1) logger.info(f'Running prediction on {len(self.loader)} batches...') # dimensionality of the", "# skip 'noise' label assigned by hdbscan if new_label ==", "into the memory separately logger.info(f'Normalizing {prediction_dataset}...') z, y, x =", "output_heads, output_file): # initialize the output prediction arrays prediction_maps =", "overlap_mask new_labels = np.unique(segmentation[overlap_mask]) merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation) #", "current_label # update the output_segmentation output_segmentation[index] = segmentation # visit", "the 'prediction_channel' logger.info(f\"Using channel '{prediction_channel}'...\") pred = np.expand_dims(pred[prediction_channel], axis=0) logger.info(f'Saving", "+= 1 else: # accumulate probabilities into the output prediction", "normalization_masks): # convert to numpy array prediction = prediction.cpu().numpy() #", "argument. If the argument is not present in the config", "clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape) logger.info( f'Number of clusters found by {self.clustering}:", "file config (dict): global config dict \"\"\" def __init__(self, model,", "out_channels = self.config['model']['dt_out_channels'] prediction_channel = self.config.get('prediction_channel', None) if prediction_channel is", "import time import h5py import hdbscan import numpy as np", "(`segmentation`) into the `output_segmentation` Args: segmentation (ndarray): segmented patch index", "def _volume_shape(dataset): # TODO: support multiple internal datasets raw =", "to numpy array prediction = prediction.cpu().numpy() # for each batch", "prediction_map[index] += pred # count voxel visits for normalization normalization_mask[index]", "def _most_frequent_label(labels): unique, counts = np.unique(labels, return_counts=True) ind = np.argmax(counts)", "for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks, prediction_datasets): prediction_map =", "current_segmentation, new_labels, new_segmentation): def _most_frequent_label(labels): unique, counts = np.unique(labels, return_counts=True)", "clusters def _merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array): \"\"\" Given the", "Run predictions on the entire input dataset with torch.no_grad(): for", "visit the patch visited_voxels_array[index] += 1 def _merge_labels(self, current_segmentation, new_labels,", "= kwargs.get('metric', 'euclidean') cluster_selection_method = kwargs.get('cluster_selection_method', 'eom') logger.info(f'HDBSCAN params: min_cluster_size:", "output_file.create_dataset(prediction_dataset, data=prediction_map, compression=\"gzip\") class LazyPredictor(StandardPredictor): \"\"\" Applies the model on", "if prediction_channel is not None: logger.info(f\"Using only channel '{prediction_channel}' from", "memory. Since this predictor is slower than the `StandardPredictor` it", "clashes between current segmentation patch and the output_segmentation # but", "_get_clustering(self, clustering_alg, kwargs): logger.info(f'Using {clustering_alg} for clustering') if clustering_alg ==", "# compute Jaccard index iou = np.bitwise_and(new_label_mask, current_label_mask).sum() / np.bitwise_or(new_label_mask,", "model, loader, output_file, config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs): super().__init__(model, loader,", "new_label)) return result def _get_clustering(self, clustering_alg, kwargs): logger.info(f'Using {clustering_alg} for", "in order to average out probabilities of overlapping patches normalization_masks", "output_file, config, **kwargs) def predict(self): out_channels = self.config['model'].get('out_channels') if out_channels", "\"\"\" Cluster embeddings vectors with HDBSCAN and return the segmented", "output H5 file h5_output_file.close() def _allocate_prediction_maps(self, output_shape, output_heads, output_file): #", "pytorch3dunet.unet3d.utils import get_logger from pytorch3dunet.unet3d.utils import unpad logger = get_logger('UNet3DPredictor')", "def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset): # save probability", "number_of_datasets == 1: return [prefix] else: return [f'{prefix}{i}' for i", "output prediction maps (CDHW): {prediction_maps_shape}') avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True) logger.info(f'Avoid", "transpose -> (D * H * W, C) flattened_embeddings =", "= embeddings.reshape(embeddings.shape[0], -1).transpose() logger.info('Clustering embeddings...') # perform clustering and reshape", "pass embeddings = self.model(batch) # wrap predictions into a list", "# count voxel visits for normalization normalization_mask[u_index] += 1 else:", "the noise label noise_mask = segmentation == self.noise_label segmentation +=", "# send batch to device batch = batch.to(device) # forward", "stitch patches self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array) # save results with", "output_segmentation # but keep the noise label noise_mask = segmentation", "raw = dataset.raws[0] if raw.ndim == 3: return raw.shape else:", "the network if output_heads == 1: predictions = [predictions] #", "embedding vectors) obtained by clustering embeddings with HDBSCAN or MeanShift", "[np.zeros(output_shape, dtype='uint8') for _ in range(output_heads)] return prediction_maps, normalization_masks def", "`output_segmentation`); visited voxels will be marked by a number greater", "labels result.append((most_frequent_label, new_label)) return result def _get_clustering(self, clustering_alg, kwargs): logger.info(f'Using", "applied! self.model.testing = True # Run predictions on the entire", "return raw.shape[1:] @staticmethod def _get_output_dataset_names(number_of_datasets, prefix='predictions'): if number_of_datasets == 1:", "get the overlap mask in the current patch overlap_mask =", "output_file, config, **kwargs): self.model = model self.loader = loader self.output_file", "evaluation mode explicitly self.model.eval() self.model.testing = True # Run predictions", "None: prediction_maps_shape = (out_channels,) + volume_shape else: # single channel", "output_heads, output_file): # allocate datasets for probability maps prediction_datasets =", "range(output_heads)] # initialize normalization mask in order to average out", "1) index = (channel_slice,) + index if prediction_channel is not", "output_segmentations, visited_voxels_arrays): # convert to numpy array prediction = prediction.cpu().numpy()", "in the H5 format. Predicted patches are directly saved into", "sure to reset the slice that has been visited already", "tensor Returns: 3D (DHW) segmentation \"\"\" # shape of the", "is not None: logger.info(f\"Using only channel '{prediction_channel}' from the network", "None: out_channels = self.config['model']['dt_out_channels'] prediction_channel = self.config.get('prediction_channel', None) if prediction_channel", "noise_label self.clustering = clustering assert clustering in ['hdbscan', 'meanshift'], 'Only", "prediction on {len(self.loader)} patches...') # dimensionality of the the output", "compute Jaccard index iou = np.bitwise_and(new_label_mask, current_label_mask).sum() / np.bitwise_or(new_label_mask, current_label_mask).sum()", "= self.noise_label # get the overlap mask in the current", "are 1/27 of the original volume patch_shape = (z //", "(necessary for batchnorm/dropout layers if present) self.model.eval() # Set the", "and they won't be stored in memory. Since this predictor", "(C,D,H,W) if prediction_channel is None: channel_slice = slice(0, out_channels) else:", "normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset): # save", "batch to device batch = batch.to(device) # forward pass predictions", "Predicted patches are directly saved into the H5 and they", "that we're using for pred, index in zip(prediction, indices): #", "artifacts: {avoid_block_artifacts}') # create destination H5 file h5_output_file = h5py.File(self.output_file,", "maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') prediction_maps = [ output_file.create_dataset(dataset_name, shape=output_shape,", "3, x // 3) for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape):", "overlap mask in the current patch overlap_mask = visited_voxels_array[index] >", "output_shape, output_heads, output_file): # allocate datasets for probability maps prediction_datasets", "the segmented volume. Args: embeddings (ndarray): 4D (CDHW) embeddings tensor", "h5_output_file.close() def _allocate_prediction_maps(self, output_shape, output_heads, output_file): # initialize the output", "each into the memory separately logger.info(f'Normalizing {prediction_dataset}...') z, y, x", "H5 format. Predictions from the network are kept in memory.", "prediction_datasets, normalization_datasets): # split the volume into 4 parts and", "array prediction = prediction.cpu().numpy() # for each batch sample for", "= unpad(pred, index, volume_shape) # accumulate probabilities into the output", "(ndarray): segmented patch index (tuple): position of the patch inside", "import SliceBuilder from pytorch3dunet.unet3d.utils import get_logger from pytorch3dunet.unet3d.utils import unpad", "network are kept in memory. If the results from the", "kwargs['bandwidth'] logger.info(f'MeanShift params: bandwidth: {bandwidth}, bin_seeding: True') # use fast", "and transpose -> (D * H * W, C) flattened_embeddings", "embeddings vectors with HDBSCAN and return the segmented volume. Args:", "raise NotImplementedError class StandardPredictor(_AbstractPredictor): \"\"\" Applies the model on the", "convert to numpy array prediction = prediction.cpu().numpy() # for each", "the H5 format. Predicted patches are directly saved into the", "predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=output_segmentation, compression=\"gzip\") def _embeddings_to_segmentation(self, embeddings): \"\"\"", "`index` in the `output_segmentation` array and the array visited voxels", "segmentation (DHW): {volume_shape}') logger.info('Allocating segmentation array...') # initialize the output", "vectors) obtained by clustering embeddings with HDBSCAN or MeanShift algorithm", "logger.info(f'IoU threshold: {iou_threshold}') self.clustering_name = clustering self.clustering = self._get_clustering(clustering, kwargs)", "prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape, output_heads, h5_output_file) # Sets the module", "else: bandwidth = kwargs['bandwidth'] logger.info(f'MeanShift params: bandwidth: {bandwidth}, bin_seeding: True')", "pred = np.expand_dims(pred[prediction_channel], axis=0) logger.info(f'Saving predictions for slice:{index}...') if avoid_block_artifacts:", "iou_threshold=0.7, noise_label=-1, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) self.iou_threshold =", "the output prediction array prediction_map[index] += pred # count voxel", "index: (C,D,H,W) if prediction_channel is None: channel_slice = slice(0, out_channels)", "= _most_frequent_label(current_segmentation[new_label_mask]) # skip 'noise' label if most_frequent_label == self.noise_label:", "**kwargs) def _allocate_prediction_maps(self, output_shape, output_heads, output_file): # allocate datasets for", "output_file, config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs): super().__init__(model, loader, output_file, config,", "== most_frequent_label # compute Jaccard index iou = np.bitwise_and(new_label_mask, current_label_mask).sum()", "if prediction_channel is None: channel_slice = slice(0, out_channels) else: channel_slice", "using for pred, index in zip(prediction, indices): # convert embeddings", "batch = batch.to(device) # forward pass predictions = self.model(batch) #", "D * H * W) and transpose -> (D *", "== 1: embeddings = [embeddings] for prediction, output_segmentation, visited_voxels_array in", "map prediction_maps_shape = (1,) + volume_shape logger.info(f'The shape of the", "for dataset_name in normalization_datasets] return prediction_maps, normalization_masks def _save_results(self, prediction_maps,", "{dataset.pad_width}') prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') #", "the number of the output head from the network. Args:", "= clustering self.clustering = self._get_clustering(clustering, kwargs) def predict(self): device =", "to the output H5 file config (dict): global config dict", "TODO: support multiple internal datasets raw = dataset.raws[0] if raw.ndim", "array of voxels visited so far (same size as `output_segmentation`);", "else: return [f'{prefix}{i}' for i in range(number_of_datasets)] def predict(self): raise", "on {len(self.loader)} patches...') # dimensionality of the the output segmentation", "the `StandardPredictor` it should only be used when the predicted", "`n` denotes the number of the output head from the", "logger.info( f'Number of clusters found by {self.clustering}: {np.max(clusters)}. Duration: {time.time()", "= np.expand_dims(pred[prediction_channel], axis=0) logger.info(f'Saving predictions for slice:{index}...') if avoid_block_artifacts: #", "iou > self.iou_threshold: # merge labels result.append((most_frequent_label, new_label)) return result", "= self._volume_shape(self.loader.dataset) logger.info(f'The shape of the output segmentation (DHW): {volume_shape}')", "denotes the number of the output head from the network.", "output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)} patches...') #", "= iou_threshold self.noise_label = noise_label self.clustering = clustering assert clustering", "most frequent overlapping label most_frequent_label = _most_frequent_label(current_segmentation[new_label_mask]) # skip 'noise'", "dict \"\"\" def __init__(self, model, loader, output_file, config, **kwargs): super().__init__(model,", "loader, output_file, config, **kwargs) def _allocate_prediction_maps(self, output_shape, output_heads, output_file): #", "in the `output_file` in the H5 format. Predictions from the", "prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps, normalization_masks, prediction_datasets, normalization_datasets): #", "\"\"\" index = tuple(index) # get new unassigned label max_label", "compression='gzip') for dataset_name in normalization_datasets] return prediction_maps, normalization_masks def _save_results(self,", "the merged labels for current_label, new_label in merged_labels: segmentation[segmentation ==", "to numpy array prediction = prediction.cpu().numpy() # iterate sequentially because", "\"\"\" def __init__(self, model, loader, output_file, config, **kwargs): super().__init__(model, loader,", "threshold for new_label in new_labels: # skip 'noise' label assigned", "visited_voxels_array) # save results with h5py.File(self.output_file, 'w') as output_file: prediction_datasets", "the H5 and they won't be stored in memory. Since", "[] # iterate over new_labels and merge regions if the", "StandardPredictor(_AbstractPredictor): \"\"\" Applies the model on the given dataset and", "= get_logger('UNet3DPredictor') class _AbstractPredictor: def __init__(self, model, loader, output_file, config,", "normalization_mask[index] += 1 # save results to self._save_results(prediction_maps, normalization_masks, output_heads,", "prediction_dataset in zip(output_segmentations, prediction_datasets): logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=output_segmentation,", "and load each into the memory separately logger.info(f'Normalizing {prediction_dataset}...') z,", "= noise_label self.clustering = clustering assert clustering in ['hdbscan', 'meanshift'],", "present in the config 'predictions{n}' is used as a default", "current segmentation patch and the output_segmentation # but keep the", "# accumulate probabilities into the output prediction array prediction_map[u_index] +=", "normalization_masks, output_heads, h5_output_file, self.loader.dataset) # close the output H5 file", "array visited voxels merge the segmented patch (`segmentation`) into the", "a number greater than 0 \"\"\" index = tuple(index) #", "get new unassigned label max_label = np.max(output_segmentation) + 1 #", "the module in evaluation mode explicitly (necessary for batchnorm/dropout layers", "for output_segmentation, prediction_dataset in zip(output_segmentations, prediction_datasets): logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...')", "inside the overlap_mask new_labels = np.unique(segmentation[overlap_mask]) merged_labels = self._merge_labels(output_segmentation[index], new_labels,", "= [np.zeros(volume_shape, dtype='int32') for _ in range(output_heads)] # initialize visited_voxels", "range(number_of_datasets)] def predict(self): raise NotImplementedError class StandardPredictor(_AbstractPredictor): \"\"\" Applies the", "given dataset and saves the result in the `output_file` in", "for _ in range(output_heads)] # initialize visited_voxels arrays visited_voxels_arrays =", "batch = batch.to(device) # forward pass embeddings = self.model(batch) #", "count voxel visits for normalization normalization_mask[index] += 1 # save", "format. The resulting volume is the segmentation itself (not the", "= [np.zeros(output_shape, dtype='uint8') for _ in range(output_heads)] return prediction_maps, normalization_masks", "for batch, indices in self.loader: # logger.info(f'Predicting embeddings for slice:{index}')", "sure there are no clashes between current segmentation patch and", "with h5py.File(self.output_file, 'w') as output_file: prediction_datasets = self._get_output_dataset_names(output_heads, prefix=f'segmentation/{self.clustering_name}') for", "_ in range(output_heads)] # Sets the module in evaluation mode", "= current_segmentation == most_frequent_label # compute Jaccard index iou =", "[predictions] # for each output head for prediction, prediction_map, normalization_mask", "_save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset): if dataset.mirror_padding: logger.warn( f'Mirror", "self._get_output_dataset_names(output_heads, prefix='normalization') normalization_masks = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True, compression='gzip')", "def _allocate_prediction_maps(self, output_shape, output_heads, output_file): # allocate datasets for probability", "output_segmentation[index] = segmentation # visit the patch visited_voxels_array[index] += 1", "prediction array prediction_map[u_index] += u_prediction # count voxel visits for", "def predict(self): device = self.config['device'] output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running", "SliceBuilder from pytorch3dunet.unet3d.utils import get_logger from pytorch3dunet.unet3d.utils import unpad logger", "patches together. \"\"\" def __init__(self, model, loader, output_file, config, clustering,", "counts = np.unique(labels, return_counts=True) ind = np.argmax(counts) return unique[ind] result", "on the entire input dataset with torch.no_grad(): for batch, indices", "are no clashes between current segmentation patch and the output_segmentation", "H5 and they won't be stored in memory. Since this", "['hdbscan', 'meanshift'], 'Only HDBSCAN and MeanShift are supported' logger.info(f'IoU threshold:", "prediction.cpu().numpy() # for each batch sample for pred, index in", "new labels inside the overlap_mask new_labels = np.unique(segmentation[overlap_mask]) merged_labels =", "start = time.time() clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape) logger.info( f'Number of clusters", "/ normalization_mask if dataset.mirror_padding: pad_width = dataset.pad_width logger.info(f'Dataset loaded with", "for new_label in new_labels: # skip 'noise' label assigned by", "wrap predictions into a list if there is only one", "np.bitwise_and(new_label_mask, current_label_mask).sum() / np.bitwise_or(new_label_mask, current_label_mask).sum() if iou > self.iou_threshold: #", "index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape): logger.info(f'Normalizing slice: {index}') prediction_map[index] /=", "= tuple(index) # get new unassigned label max_label = np.max(output_segmentation)", "`StandardPredictor` it should only be used when the predicted volume", "unsupported in LazyPredictor. Output predictions will be padded with pad_width:", "return_counts=True) ind = np.argmax(counts) return unique[ind] result = [] #", "for i in range(number_of_datasets)] def predict(self): raise NotImplementedError class StandardPredictor(_AbstractPredictor):", "predictions volume_shape = self._volume_shape(self.loader.dataset) if prediction_channel is None: prediction_maps_shape =", "each other normalization_mask[index] = 1 logger.info(f'Deleting {normalization_dataset}...') del output_file[normalization_dataset] class", "by patch and then stitching the patches together. \"\"\" def", "parts and load each into the memory separately logger.info(f'Normalizing {prediction_dataset}...')", "probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') for prediction_map, normalization_mask, prediction_dataset", "already in order to avoid 'double' normalization # when the", "arrays logger.info('Allocating prediction and normalization arrays...') prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape,", "format. Predicted patches are directly saved into the H5 and", "h5_output_file) # Sets the module in evaluation mode explicitly (necessary", "[f'{prefix}{i}' for i in range(number_of_datasets)] def predict(self): raise NotImplementedError class", "does not fit into RAM. The output dataset names inside", "load each into the memory separately logger.info(f'Normalizing {prediction_dataset}...') z, y,", "the segmentation volume start = time.time() clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape) logger.info(", "current simple stitching that we're using for pred, index in", "raw.ndim == 3: return raw.shape else: return raw.shape[1:] @staticmethod def", "embeddings (ndarray): 4D (CDHW) embeddings tensor Returns: 3D (DHW) segmentation", "in the `output_file` in the H5 format. The resulting volume", "prediction_map.shape[1:] # take slices which are 1/27 of the original", "the result in the `output_file` in the H5 format. The", "of the output segmentation output_shape = embeddings.shape[1:] # reshape (C,", "index, volume_shape) # accumulate probabilities into the output prediction array", "the output_segmentation # but keep the noise label noise_mask =", "self.model = model self.loader = loader self.output_file = output_file self.config", "= model self.loader = loader self.output_file = output_file self.config =", "/= normalization_mask[index] # make sure to reset the slice that", "vectors with HDBSCAN and return the segmented volume. Args: embeddings", "inside the H5 for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps,", "`output_file` in the H5 format. Predicted patches are directly saved", "be padded with pad_width: {dataset.pad_width}') prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') normalization_datasets", "the embedding model on the given dataset and saves the", "by hdbscan if new_label == self.noise_label: continue new_label_mask = new_segmentation", "in memory. Since this predictor is slower than the `StandardPredictor`", "= self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)} patches...') # dimensionality", "= time.time() clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape) logger.info( f'Number of clusters found", "prediction_map[u_index] += u_prediction # count voxel visits for normalization normalization_mask[u_index]", "prediction, output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations, visited_voxels_arrays): # convert to", "dtype='uint8') for _ in range(output_heads)] return prediction_maps, normalization_masks def _save_results(self,", "normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset): if dataset.mirror_padding:", "in the config 'predictions{n}' is used as a default dataset", "the array visited voxels merge the segmented patch (`segmentation`) into", "otherwise the final Softmax/Sigmoid won't be applied! self.model.testing = True", "H5 for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps, normalization_masks, prediction_datasets,", "segmented patch (`segmentation`) into the `output_segmentation` Args: segmentation (ndarray): segmented", "the the output predictions volume_shape = self._volume_shape(self.loader.dataset) if prediction_channel is", "(channel_slice,) + index if prediction_channel is not None: # use", "3D (DHW) segmentation \"\"\" # shape of the output segmentation", "== self.noise_label: continue new_label_mask = new_segmentation == new_label # get", "W, C) flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose() logger.info('Clustering embeddings...') # perform", "kwargs.get('cluster_selection_method', 'eom') logger.info(f'HDBSCAN params: min_cluster_size: {min_cluster_size}, min_samples: {min_samples}') return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size,", "# iterate sequentially because of the current simple stitching that", "axis=0) logger.info(f'Saving predictions for slice:{index}...') if avoid_block_artifacts: # unpad in", "for slice:{index}') # send batch to device batch = batch.to(device)", "the most frequent overlapping label most_frequent_label = _most_frequent_label(current_segmentation[new_label_mask]) # skip", "evaluation mode explicitly (necessary for batchnorm/dropout layers if present) self.model.eval()", "self.loader.dataset) # close the output H5 file h5_output_file.close() def _allocate_prediction_maps(self,", "and reshape in order to get the segmentation volume start", "The output dataset names inside the H5 is given by", "= self._allocate_prediction_maps(prediction_maps_shape, output_heads, h5_output_file) # Sets the module in evaluation", "index = (channel_slice,) + index if prediction_channel is not None:", "are supported' logger.info(f'IoU threshold: {iou_threshold}') self.clustering_name = clustering self.clustering =", "keep the noise label noise_mask = segmentation == self.noise_label segmentation", "there is only one output head from the network if", "normalization_datasets): # split the volume into 4 parts and load", "_allocate_prediction_maps(self, output_shape, output_heads, output_file): # initialize the output prediction arrays", "logger.info(f'HDBSCAN params: min_cluster_size: {min_cluster_size}, min_samples: {min_samples}') return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric,", "output_segmentation output_segmentation[index] = segmentation # visit the patch visited_voxels_array[index] +=", "argument is not present in the config 'predictions{n}' is used", "the output head from the network. Args: model (Unet3D): trained", "is None: prediction_maps_shape = (out_channels,) + volume_shape else: # single", "result.append((most_frequent_label, new_label)) return result def _get_clustering(self, clustering_alg, kwargs): logger.info(f'Using {clustering_alg}", "input dataset with torch.no_grad(): for batch, indices in self.loader: #", "and the array visited voxels merge the segmented patch (`segmentation`)", "save results with h5py.File(self.output_file, 'w') as output_file: prediction_datasets = self._get_output_dataset_names(output_heads,", "in order to get the segmentation volume start = time.time()", "reshape (C, D, H, W) -> (C, D * H", "embeddings = [embeddings] for prediction, output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations,", "for prediction, output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations, visited_voxels_arrays): # convert", "voxel visits for normalization normalization_mask[index] += 1 # save results", "data_loader (torch.utils.data.DataLoader): input data loader output_file (str): path to the", "stride_shape=patch_shape): logger.info(f'Normalizing slice: {index}') prediction_map[index] /= normalization_mask[index] # make sure", "`des_dataset_name` config argument. If the argument is not present in", "NotImplementedError class StandardPredictor(_AbstractPredictor): \"\"\" Applies the model on the given", "initialize the output prediction arrays prediction_maps = [np.zeros(output_shape, dtype='float32') for", "= self.predictor_config.get('avoid_block_artifacts', True) logger.info(f'Avoid block artifacts: {avoid_block_artifacts}') # create destination", "for _ in range(output_heads)] # initialize normalization mask in order", "channel '{prediction_channel}' from the network output\") device = self.config['device'] output_heads", "close the output H5 file h5_output_file.close() def _allocate_prediction_maps(self, output_shape, output_heads,", "clustering, iou_threshold=0.7, noise_label=-1, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) self.iou_threshold", "embeddings...') # perform clustering and reshape in order to get", "= current_label # update the output_segmentation output_segmentation[index] = segmentation #", "new_labels: # skip 'noise' label assigned by hdbscan if new_label", "/ np.bitwise_or(new_label_mask, current_label_mask).sum() if iou > self.iou_threshold: # merge labels", "to reset the slice that has been visited already in", "dataset_name in prediction_datasets] # allocate datasets for normalization masks normalization_datasets", "'predictions{n}' is used as a default dataset name, where `n`", "for slice:{index}...') if avoid_block_artifacts: # unpad in order to avoid", "is slower than the `StandardPredictor` it should only be used", "= new_segmentation == new_label # get only the most frequent", "logger.info(f'The shape of the output segmentation (DHW): {volume_shape}') logger.info('Allocating segmentation", "True # Run predictions on the entire input dataset with", "pytorch3dunet.datasets.hdf5 import SliceBuilder from pytorch3dunet.unet3d.utils import get_logger from pytorch3dunet.unet3d.utils import", "the H5 for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps, normalization_masks,", "arrays output_segmentations = [np.zeros(volume_shape, dtype='int32') for _ in range(output_heads)] #", "convert to numpy array prediction = prediction.cpu().numpy() # iterate sequentially", "normalization_mask if dataset.mirror_padding: pad_width = dataset.pad_width logger.info(f'Dataset loaded with mirror", "model, loader, output_file, config, **kwargs): super().__init__(model, loader, output_file, config, **kwargs)", "overlapping label most_frequent_label = _most_frequent_label(current_segmentation[new_label_mask]) # skip 'noise' label if", "return [prefix] else: return [f'{prefix}{i}' for i in range(number_of_datasets)] def", "given threshold for new_label in new_labels: # skip 'noise' label", "normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps, normalization_masks, prediction_datasets, normalization_datasets): # split", "prediction_channel is None: prediction_maps_shape = (out_channels,) + volume_shape else: #", "patches...') # dimensionality of the the output segmentation volume_shape =", "output_shape = embeddings.shape[1:] # reshape (C, D, H, W) ->", "volume_shape = self._volume_shape(self.loader.dataset) if prediction_channel is None: prediction_maps_shape = (out_channels,)", "the output H5 file h5_output_file.close() def _allocate_prediction_maps(self, output_shape, output_heads, output_file):", "== self.noise_label: continue current_label_mask = current_segmentation == most_frequent_label # compute", "by clustering embeddings with HDBSCAN or MeanShift algorithm patch by", "return raw.shape else: return raw.shape[1:] @staticmethod def _get_output_dataset_names(number_of_datasets, prefix='predictions'): if", "predictions on the entire input dataset with torch.no_grad(): for batch,", "in the `output_segmentation` array and the array visited voxels merge", "config dict \"\"\" def __init__(self, model, loader, output_file, config, **kwargs):", "output prediction array prediction_map[index] += pred # count voxel visits", "_AbstractPredictor: def __init__(self, model, loader, output_file, config, **kwargs): self.model =", "np.bitwise_or(new_label_mask, current_label_mask).sum() if iou > self.iou_threshold: # merge labels result.append((most_frequent_label,", "= dataset.pad_width logger.info(f'Dataset loaded with mirror padding, pad_width: {pad_width}. Cropping", "noise_mask = segmentation == self.noise_label segmentation += int(max_label) segmentation[noise_mask] =", "dataset.mirror_padding: pad_width = dataset.pad_width logger.info(f'Dataset loaded with mirror padding, pad_width:", "prediction_map = prediction_map / normalization_mask if dataset.mirror_padding: pad_width = dataset.pad_width", "for each batch sample for pred, index in zip(prediction, indices):", "the `output_segmentation` array and the array visited voxels merge the", "multiple internal datasets raw = dataset.raws[0] if raw.ndim == 3:", "model (Unet3D): trained 3D UNet model used for prediction data_loader", "voxel visits for normalization normalization_mask[u_index] += 1 else: # accumulate", "into RAM. The output dataset names inside the H5 is", "in zip(prediction_maps, normalization_masks, prediction_datasets, normalization_datasets): # split the volume into", "but keep the noise label noise_mask = segmentation == self.noise_label", "reset the slice that has been visited already in order", "in the current patch overlap_mask = visited_voxels_array[index] > 0 #", "'w') # allocate prediction and normalization arrays logger.info('Allocating prediction and", "numpy array prediction = prediction.cpu().numpy() # for each batch sample", "it should only be used when the predicted volume does", "in new_labels: # skip 'noise' label assigned by hdbscan if", "output_segmentation, visited_voxels_array) # save results with h5py.File(self.output_file, 'w') as output_file:", "noise_label=-1, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) self.iou_threshold = iou_threshold", "label if most_frequent_label == self.noise_label: continue current_label_mask = current_segmentation ==", "and then stitching the patches together. \"\"\" def __init__(self, model,", "pass predictions = self.model(batch) # wrap predictions into a list", "# Sets the module in evaluation mode explicitly self.model.eval() self.model.testing", "order to avoid 'double' normalization # when the patches overlap", "config, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) def _allocate_prediction_maps(self, output_shape,", "dataset with torch.no_grad(): for batch, indices in self.loader: # logger.info(f'Predicting", "assert clustering in ['hdbscan', 'meanshift'], 'Only HDBSCAN and MeanShift are", "# single channel prediction map prediction_maps_shape = (1,) + volume_shape", "where `n` denotes the number of the output head from", "\"\"\" Applies the model on the given dataset and saves", "predictions = self.model(batch) # wrap predictions into a list if", "{min_samples}') return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric, cluster_selection_method=cluster_selection_method) else: bandwidth = kwargs['bandwidth']", "the output prediction arrays prediction_maps = [np.zeros(output_shape, dtype='float32') for _", "normalize the prediction_maps inside the H5 for prediction_map, normalization_mask, prediction_dataset,", "prediction arrays prediction_maps = [np.zeros(output_shape, dtype='float32') for _ in range(output_heads)]", "np.expand_dims(pred[prediction_channel], axis=0) logger.info(f'Saving predictions for slice:{index}...') if avoid_block_artifacts: # unpad", "# Set the `testing=true` flag otherwise the final Softmax/Sigmoid won't", "output_file.create_dataset(prediction_dataset, data=output_segmentation, compression=\"gzip\") def _embeddings_to_segmentation(self, embeddings): \"\"\" Cluster embeddings vectors", "# create destination H5 file h5_output_file = h5py.File(self.output_file, 'w') #", "the patch inside `output_segmentation` volume output_segmentation (ndarray): current state of", "if there is only one output head from the network", "output head for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps, normalization_masks):", "the embedding vectors) obtained by clustering embeddings with HDBSCAN or", "the H5 is given by `des_dataset_name` config argument. If the", "= (z // 3, y // 3, x // 3)", "import torch from sklearn.cluster import MeanShift from pytorch3dunet.datasets.hdf5 import SliceBuilder", "Predictions from the network are kept in memory. If the", "prediction_maps, normalization_masks): # convert to numpy array prediction = prediction.cpu().numpy()", "Returns: 3D (DHW) segmentation \"\"\" # shape of the output", "H5 file h5_output_file.close() def _allocate_prediction_maps(self, output_shape, output_heads, output_file): # initialize", "the overlap_mask new_labels = np.unique(segmentation[overlap_mask]) merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation)", "the output segmentation volume_shape = self._volume_shape(self.loader.dataset) logger.info(f'The shape of the", "and return the segmented volume. Args: embeddings (ndarray): 4D (CDHW)", "avoid block artifacts in the output probability maps u_prediction, u_index", "[ output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True, compression='gzip') for dataset_name in normalization_datasets]", "dataset with torch.no_grad(): for batch, indices in self.loader: # send", "= visited_voxels_array[index] > 0 # get the new labels inside", "from the network output\") device = self.config['device'] output_heads = self.config['model'].get('output_heads',", "unpad logger = get_logger('UNet3DPredictor') class _AbstractPredictor: def __init__(self, model, loader,", "head for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps, normalization_masks): #", "slice(0, 1) index = (channel_slice,) + index if prediction_channel is", "in the output probability maps u_prediction, u_index = unpad(pred, index,", "EmbeddingsPredictor(_AbstractPredictor): \"\"\" Applies the embedding model on the given dataset", "# get new unassigned label max_label = np.max(output_segmentation) + 1", "# initialize visited_voxels arrays visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for _", "# dimensionality of the the output predictions volume_shape = self._volume_shape(self.loader.dataset)", "normalization normalization_mask[u_index] += 1 else: # accumulate probabilities into the", "normalization # when the patches overlap with each other normalization_mask[index]", "prediction_map / normalization_mask if dataset.mirror_padding: pad_width = dataset.pad_width logger.info(f'Dataset loaded", "if most_frequent_label == self.noise_label: continue current_label_mask = current_segmentation == most_frequent_label", "= np.max(output_segmentation) + 1 # make sure there are no", "with HDBSCAN or MeanShift algorithm patch by patch and then", "accumulate probabilities into the output prediction array prediction_map[u_index] += u_prediction", "RAM use `LazyPredictor` instead. The output dataset names inside the", "config, **kwargs) def _allocate_prediction_maps(self, output_shape, output_heads, output_file): # allocate datasets", "visited_voxels_array): \"\"\" Given the `segmentation` patch, its `index` in the", "self._get_output_dataset_names(output_heads, prefix='predictions') prediction_maps = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True, compression='gzip')", "from pytorch3dunet.datasets.hdf5 import SliceBuilder from pytorch3dunet.unet3d.utils import get_logger from pytorch3dunet.unet3d.utils", "patch and then stitching the patches together. \"\"\" def __init__(self,", "pred, index in zip(prediction, indices): # convert embeddings to segmentation", "1 else: # accumulate probabilities into the output prediction array", "to get the segmentation volume start = time.time() clusters =", "sequentially because of the current simple stitching that we're using", "kwargs): logger.info(f'Using {clustering_alg} for clustering') if clustering_alg == 'hdbscan': min_cluster_size", "the output prediction array prediction_map[u_index] += u_prediction # count voxel", "get the new labels inside the overlap_mask new_labels = np.unique(segmentation[overlap_mask])", "prefix='predictions') prediction_maps = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True, compression='gzip') for", "in zip(prediction, indices): # convert embeddings to segmentation with hdbscan", "voxels merge the segmented patch (`segmentation`) into the `output_segmentation` Args:", "config argument. If the argument is not present in the", "prediction array prediction_map[index] += pred # count voxel visits for", "clustering') if clustering_alg == 'hdbscan': min_cluster_size = kwargs.get('min_cluster_size', 50) min_samples", "merge labels result.append((most_frequent_label, new_label)) return result def _get_clustering(self, clustering_alg, kwargs):", "so far (same size as `output_segmentation`); visited voxels will be", "new segmentation with the merged labels for current_label, new_label in", "head from the network if output_heads == 1: embeddings =", "index, output_segmentation, visited_voxels_array): \"\"\" Given the `segmentation` patch, its `index`", "prediction_maps = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True, compression='gzip') for dataset_name", "zip(predictions, prediction_maps, normalization_masks): # convert to numpy array prediction =", "new_label_mask = new_segmentation == new_label # get only the most", "= embeddings.shape[1:] # reshape (C, D, H, W) -> (C,", "cluster_selection_method=cluster_selection_method) else: bandwidth = kwargs['bandwidth'] logger.info(f'MeanShift params: bandwidth: {bandwidth}, bin_seeding:", "the output predictions volume_shape = self._volume_shape(self.loader.dataset) if prediction_channel is None:", "HDBSCAN and MeanShift are supported' logger.info(f'IoU threshold: {iou_threshold}') self.clustering_name =", "of voxels visited so far (same size as `output_segmentation`); visited", "def predict(self): out_channels = self.config['model'].get('out_channels') if out_channels is None: out_channels", "= segmentation == self.noise_label segmentation += int(max_label) segmentation[noise_mask] = self.noise_label", "= batch.to(device) # forward pass embeddings = self.model(batch) # wrap", "unpad in order to avoid block artifacts in the output", "is only one output head from the network if output_heads", "= self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)} batches...') # dimensionality", "output_segmentation (ndarray): current state of the output segmentation visited_voxels_array (ndarray):", "visited voxels merge the segmented patch (`segmentation`) into the `output_segmentation`", "for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps, normalization_masks): # convert", "`segmentation` patch, its `index` in the `output_segmentation` array and the", "(C, D * H * W) and transpose -> (D", "arrays visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for _ in range(output_heads)] #", "# normalize the prediction_maps inside the H5 for prediction_map, normalization_mask,", "= [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True, compression='gzip') for dataset_name in", "of the current simple stitching that we're using for pred,", "by a number greater than 0 \"\"\" index = tuple(index)", "if iou > self.iou_threshold: # merge labels result.append((most_frequent_label, new_label)) return", "embeddings.shape[1:] # reshape (C, D, H, W) -> (C, D", "for batch, indices in self.loader: # send batch to device", "{iou_threshold}') self.clustering_name = clustering self.clustering = self._get_clustering(clustering, kwargs) def predict(self):", "from pytorch3dunet.unet3d.utils import get_logger from pytorch3dunet.unet3d.utils import unpad logger =", "# but keep the noise label noise_mask = segmentation ==", "output_file): # initialize the output prediction arrays prediction_maps = [np.zeros(output_shape,", "= self._volume_shape(self.loader.dataset) if prediction_channel is None: prediction_maps_shape = (out_channels,) +", "are kept in memory. If the results from the network", "the segmented patch (`segmentation`) into the `output_segmentation` Args: segmentation (ndarray):", "+= int(max_label) segmentation[noise_mask] = self.noise_label # get the overlap mask", "_embeddings_to_segmentation(self, embeddings): \"\"\" Cluster embeddings vectors with HDBSCAN and return", "normalization arrays logger.info('Allocating prediction and normalization arrays...') prediction_maps, normalization_masks =", "into 4 parts and load each into the memory separately", "mirror padding, pad_width: {pad_width}. Cropping before saving...') prediction_map = prediction_map[:,", "voxels will be marked by a number greater than 0", "forward pass embeddings = self.model(batch) # wrap predictions into a", "self.clustering = self._get_clustering(clustering, kwargs) def predict(self): device = self.config['device'] output_heads", "'meanshift'], 'Only HDBSCAN and MeanShift are supported' logger.info(f'IoU threshold: {iou_threshold}')", "dataset): if dataset.mirror_padding: logger.warn( f'Mirror padding unsupported in LazyPredictor. Output", "iterate sequentially because of the current simple stitching that we're", "out_channels) else: channel_slice = slice(0, 1) index = (channel_slice,) +", "segmented patch index (tuple): position of the patch inside `output_segmentation`", "probabilities of overlapping patches normalization_masks = [np.zeros(output_shape, dtype='uint8') for _", "sample for pred, index in zip(prediction, indices): # save patch", "config 'predictions{n}' is used as a default dataset name, where", "= self._get_output_dataset_names(output_heads, prefix='predictions') prediction_maps = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True,", "embeddings.reshape(embeddings.shape[0], -1).transpose() logger.info('Clustering embeddings...') # perform clustering and reshape in", "new_label in merged_labels: segmentation[segmentation == new_label] = current_label # update", "visited_voxels_array[index] += 1 def _merge_labels(self, current_segmentation, new_labels, new_segmentation): def _most_frequent_label(labels):", "the segmentation itself (not the embedding vectors) obtained by clustering", "patch visited_voxels_array[index] += 1 def _merge_labels(self, current_segmentation, new_labels, new_segmentation): def", "logger.info(f'Dataset loaded with mirror padding, pad_width: {pad_width}. Cropping before saving...')", "merged_labels: segmentation[segmentation == new_label] = current_label # update the output_segmentation", "normalization_masks = [np.zeros(output_shape, dtype='uint8') for _ in range(output_heads)] return prediction_maps,", "kwargs) def predict(self): device = self.config['device'] output_heads = self.config['model'].get('output_heads', 1)", "clustering in ['hdbscan', 'meanshift'], 'Only HDBSCAN and MeanShift are supported'", "not fit into RAM. The output dataset names inside the", "for _ in range(output_heads)] return prediction_maps, normalization_masks def _save_results(self, prediction_maps,", "volume_shape else: # single channel prediction map prediction_maps_shape = (1,)", "prediction_datasets): logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=output_segmentation, compression=\"gzip\") def _embeddings_to_segmentation(self,", "continue new_label_mask = new_segmentation == new_label # get only the", "logger.info(f'Saving predictions for slice:{index}...') if avoid_block_artifacts: # unpad in order", "prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset): #", "# TODO: support multiple internal datasets raw = dataset.raws[0] if", "loader, output_file, config, **kwargs) def predict(self): out_channels = self.config['model'].get('out_channels') if", "probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') prediction_maps = [ output_file.create_dataset(dataset_name,", "get the segmentation volume start = time.time() clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape)", "convert embeddings to segmentation with hdbscan clustering segmentation = self._embeddings_to_segmentation(pred)", "dtype='float32', chunks=True, compression='gzip') for dataset_name in prediction_datasets] # allocate datasets", "prediction_dataset, normalization_dataset in zip(prediction_maps, normalization_masks, prediction_datasets, normalization_datasets): # split the", "* W) and transpose -> (D * H * W,", "avoid_block_artifacts: # unpad in order to avoid block artifacts in" ]
[ "to make `spack fetch --dependencies visionary-defaults` work url = 'https://github.com/electronicvisions/spack/archive/v0.8.tar.gz'", "depends_on('git+tcltk') depends_on('git-fat-git') depends_on('gtkplus') depends_on('imagemagick') depends_on('jq') depends_on('libpcap') depends_on('libtool') depends_on('llvm+visionary+python~libcxx build_type=Release') depends_on('mercurial')", "+X') depends_on('cloc') depends_on('cmake') depends_on('connect-proxy') depends_on('cppcheck +htmlreport') depends_on('cquery') depends_on('doxygen+graphviz') depends_on('emacs ~X')", "package. filename = osp.basename(osp.dirname(__file__)) # gives name of parent folder", "fail on gcc@10.1.0 depends_on('tig') depends_on('time') depends_on('tmux') depends_on('units') depends_on('valgrind') depends_on('verilator') depends_on('vim", "been merged, please update this package version('1.0', '372ce038842f20bf0ae02de50c26e85d', url='https://github.com/electronicvisions/spack/archive/v0.8.tar.gz') depends_on('ack')", "depends_on('units') depends_on('valgrind') depends_on('verilator') depends_on('vim +python +ruby +perl +cscope +huge +x')", "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other", "visionary development meta packages. Application specific build tools belong to", "depends_on('bear') depends_on('cairo +X') depends_on('cloc') depends_on('cmake') depends_on('connect-proxy') depends_on('cppcheck +htmlreport') depends_on('cquery') depends_on('doxygen+graphviz')", "depends_on('octave+fftw') depends_on('openssh') depends_on('pigz') depends_on('pkg-config') depends_on('py-autopep8') depends_on('py-black', when=\"^python@3.6.0:\") depends_on('py-configargparse') depends_on('py-doxypypy') depends_on('py-flake8')", "2013-2019 Lawrence Livermore National Security, LLC and other # Spack", "~X') depends_on('gdb') depends_on('genpybind') depends_on('git+tcltk') depends_on('git-fat-git') depends_on('gtkplus') depends_on('imagemagick') depends_on('jq') depends_on('libpcap') depends_on('libtool')", "depends_on('py-nose') depends_on('py-nose2') depends_on('py-memory-profiler') depends_on('py-pudb') depends_on('py-pylint@:1.999.999', when=\"^python@:2.999.999\") depends_on('py-pylint', when=\"^python@3.4.0:\") depends_on('py-pyserial') depends_on('py-pytest')", "removed 'the-silver-searcher' due to build fail on gcc@10.1.0 depends_on('tig') depends_on('time')", "to build fail on gcc@10.1.0 depends_on('tig') depends_on('time') depends_on('tmux') depends_on('units') depends_on('valgrind')", "a copy of this package. filename = osp.basename(osp.dirname(__file__)) # gives", "this package. filename = osp.basename(osp.dirname(__file__)) # gives name of parent", "osp class VisionaryDevTools(Package): \"\"\"Developer convenience packages common to all visionary", "depends_on('texinfo') # ECM (2020-05-14): removed 'the-silver-searcher' due to build fail", "between version numbers) # TODO: as soon as a MetaPackage-concept", "# SPDX-License-Identifier: (Apache-2.0 OR MIT) import os.path as osp class", "gives name of parent folder install(__file__, join_path(prefix.etc, filename + '.py'))", "the dedicated meta packages.\"\"\" homepage = '' # some random", "class VisionaryDevTools(Package): \"\"\"Developer convenience packages common to all visionary development", "depends_on('llvm+visionary+python~libcxx build_type=Release') depends_on('mercurial') depends_on('mosh') depends_on('munge') depends_on('ncdu') depends_on('node-js') depends_on('octave+fftw') depends_on('openssh') depends_on('pigz')", "depends_on('py-ipython') depends_on('py-jedi') depends_on('py-junit-xml') depends_on('py-language-server') depends_on('py-line-profiler') depends_on('py-nose') depends_on('py-nose2') depends_on('py-memory-profiler') depends_on('py-pudb') depends_on('py-pylint@:1.999.999',", "depends_on('py-xmlrunner') depends_on('py-yq') depends_on('rtags') depends_on('tar') depends_on('texinfo') # ECM (2020-05-14): removed 'the-silver-searcher'", "ECM (2020-05-14): removed 'the-silver-searcher' due to build fail on gcc@10.1.0", "dummy tarball (see difference between version numbers) # TODO: as", "SPDX-License-Identifier: (Apache-2.0 OR MIT) import os.path as osp class VisionaryDevTools(Package):", "depends_on('libtool') depends_on('llvm+visionary+python~libcxx build_type=Release') depends_on('mercurial') depends_on('mosh') depends_on('munge') depends_on('ncdu') depends_on('node-js') depends_on('octave+fftw') depends_on('openssh')", "depends_on('doxygen+graphviz') depends_on('emacs ~X') depends_on('gdb') depends_on('genpybind') depends_on('git+tcltk') depends_on('git-fat-git') depends_on('gtkplus') depends_on('imagemagick') depends_on('jq')", "depends_on('valgrind') depends_on('verilator') depends_on('vim +python +ruby +perl +cscope +huge +x') depends_on('visionary-xilinx')", "as osp class VisionaryDevTools(Package): \"\"\"Developer convenience packages common to all", "depends_on('cmake') depends_on('connect-proxy') depends_on('cppcheck +htmlreport') depends_on('cquery') depends_on('doxygen+graphviz') depends_on('emacs ~X') depends_on('gdb') depends_on('genpybind')", "build tools belong to the dedicated meta packages.\"\"\" homepage =", "depends_on('py-junit-xml') depends_on('py-language-server') depends_on('py-line-profiler') depends_on('py-nose') depends_on('py-nose2') depends_on('py-memory-profiler') depends_on('py-pudb') depends_on('py-pylint@:1.999.999', when=\"^python@:2.999.999\") depends_on('py-pylint',", "'https://github.com/electronicvisions/spack/archive/v0.8.tar.gz' # This is only a dummy tarball (see difference", "depends_on('py-flake8') depends_on('py-gdbgui') depends_on('py-git-review') depends_on('py-ipython') depends_on('py-jedi') depends_on('py-junit-xml') depends_on('py-language-server') depends_on('py-line-profiler') depends_on('py-nose') depends_on('py-nose2')", "depends_on('py-pylint', when=\"^python@3.4.0:\") depends_on('py-pyserial') depends_on('py-pytest') depends_on('py-pytest-xdist') depends_on('py-ranger-fm') depends_on('py-sqlalchemy') depends_on('py-virtualenv') depends_on('py-xmlrunner') depends_on('py-yq')", "depends_on('time') depends_on('tmux') depends_on('units') depends_on('valgrind') depends_on('verilator') depends_on('vim +python +ruby +perl +cscope", "Lawrence Livermore National Security, LLC and other # Spack Project", "work url = 'https://github.com/electronicvisions/spack/archive/v0.8.tar.gz' # This is only a dummy", "homepage = '' # some random tarball, to make `spack", "depends_on('openssh') depends_on('pigz') depends_on('pkg-config') depends_on('py-autopep8') depends_on('py-black', when=\"^python@3.6.0:\") depends_on('py-configargparse') depends_on('py-doxypypy') depends_on('py-flake8') depends_on('py-gdbgui')", "# TODO: as soon as a MetaPackage-concept has been merged,", "depends_on('node-js') depends_on('octave+fftw') depends_on('openssh') depends_on('pigz') depends_on('pkg-config') depends_on('py-autopep8') depends_on('py-black', when=\"^python@3.6.0:\") depends_on('py-configargparse') depends_on('py-doxypypy')", "belong to the dedicated meta packages.\"\"\" homepage = '' #", "depends_on('py-language-server') depends_on('py-line-profiler') depends_on('py-nose') depends_on('py-nose2') depends_on('py-memory-profiler') depends_on('py-pudb') depends_on('py-pylint@:1.999.999', when=\"^python@:2.999.999\") depends_on('py-pylint', when=\"^python@3.4.0:\")", "of this package. filename = osp.basename(osp.dirname(__file__)) # gives name of", "packages. Application specific build tools belong to the dedicated meta", "depends_on('git-fat-git') depends_on('gtkplus') depends_on('imagemagick') depends_on('jq') depends_on('libpcap') depends_on('libtool') depends_on('llvm+visionary+python~libcxx build_type=Release') depends_on('mercurial') depends_on('mosh')", "depends_on('py-memory-profiler') depends_on('py-pudb') depends_on('py-pylint@:1.999.999', when=\"^python@:2.999.999\") depends_on('py-pylint', when=\"^python@3.4.0:\") depends_on('py-pyserial') depends_on('py-pytest') depends_on('py-pytest-xdist') depends_on('py-ranger-fm')", "# store a copy of this package. filename = osp.basename(osp.dirname(__file__))", "depends_on('cquery') depends_on('doxygen+graphviz') depends_on('emacs ~X') depends_on('gdb') depends_on('genpybind') depends_on('git+tcltk') depends_on('git-fat-git') depends_on('gtkplus') depends_on('imagemagick')", "mkdirp(prefix.etc) # store a copy of this package. filename =", "as soon as a MetaPackage-concept has been merged, please update", "due to build fail on gcc@10.1.0 depends_on('tig') depends_on('time') depends_on('tmux') depends_on('units')", "install(self, spec, prefix): mkdirp(prefix.etc) # store a copy of this", "for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os.path", "'' # some random tarball, to make `spack fetch --dependencies", "depends_on('gdb') depends_on('genpybind') depends_on('git+tcltk') depends_on('git-fat-git') depends_on('gtkplus') depends_on('imagemagick') depends_on('jq') depends_on('libpcap') depends_on('libtool') depends_on('llvm+visionary+python~libcxx", "Spack Project Developers. See the top-level COPYRIGHT file for details.", "convenience packages common to all visionary development meta packages. Application", "National Security, LLC and other # Spack Project Developers. See", "depends_on('cloc') depends_on('cmake') depends_on('connect-proxy') depends_on('cppcheck +htmlreport') depends_on('cquery') depends_on('doxygen+graphviz') depends_on('emacs ~X') depends_on('gdb')", "= '' # some random tarball, to make `spack fetch", "depends_on('py-nose2') depends_on('py-memory-profiler') depends_on('py-pudb') depends_on('py-pylint@:1.999.999', when=\"^python@:2.999.999\") depends_on('py-pylint', when=\"^python@3.4.0:\") depends_on('py-pyserial') depends_on('py-pytest') depends_on('py-pytest-xdist')", "depends_on('py-ranger-fm') depends_on('py-sqlalchemy') depends_on('py-virtualenv') depends_on('py-xmlrunner') depends_on('py-yq') depends_on('rtags') depends_on('tar') depends_on('texinfo') # ECM", "depends_on('zsh') def install(self, spec, prefix): mkdirp(prefix.etc) # store a copy", "Security, LLC and other # Spack Project Developers. See the", "# # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os.path as osp", "tarball, to make `spack fetch --dependencies visionary-defaults` work url =", "MetaPackage-concept has been merged, please update this package version('1.0', '372ce038842f20bf0ae02de50c26e85d',", "depends_on('tar') depends_on('texinfo') # ECM (2020-05-14): removed 'the-silver-searcher' due to build", "`spack fetch --dependencies visionary-defaults` work url = 'https://github.com/electronicvisions/spack/archive/v0.8.tar.gz' # This", "depends_on('yaml-cpp+shared') depends_on('zsh') def install(self, spec, prefix): mkdirp(prefix.etc) # store a", "dedicated meta packages.\"\"\" homepage = '' # some random tarball,", "build fail on gcc@10.1.0 depends_on('tig') depends_on('time') depends_on('tmux') depends_on('units') depends_on('valgrind') depends_on('verilator')", "difference between version numbers) # TODO: as soon as a", "\"\"\"Developer convenience packages common to all visionary development meta packages.", "a MetaPackage-concept has been merged, please update this package version('1.0',", "+ruby +perl +cscope +huge +x') depends_on('visionary-xilinx') depends_on('wget') depends_on('yaml-cpp+shared') depends_on('zsh') def", "development meta packages. Application specific build tools belong to the", "numbers) # TODO: as soon as a MetaPackage-concept has been", "depends_on('py-configargparse') depends_on('py-doxypypy') depends_on('py-flake8') depends_on('py-gdbgui') depends_on('py-git-review') depends_on('py-ipython') depends_on('py-jedi') depends_on('py-junit-xml') depends_on('py-language-server') depends_on('py-line-profiler')", "import os.path as osp class VisionaryDevTools(Package): \"\"\"Developer convenience packages common", "join_path(prefix.etc, filename + '.py')) # we could create some filesystem", "parent folder install(__file__, join_path(prefix.etc, filename + '.py')) # we could", "the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0", "version numbers) # TODO: as soon as a MetaPackage-concept has", "# Spack Project Developers. See the top-level COPYRIGHT file for", "depends_on('mercurial') depends_on('mosh') depends_on('munge') depends_on('ncdu') depends_on('node-js') depends_on('octave+fftw') depends_on('openssh') depends_on('pigz') depends_on('pkg-config') depends_on('py-autopep8')", "all visionary development meta packages. Application specific build tools belong", "url = 'https://github.com/electronicvisions/spack/archive/v0.8.tar.gz' # This is only a dummy tarball", "depends_on('imagemagick') depends_on('jq') depends_on('libpcap') depends_on('libtool') depends_on('llvm+visionary+python~libcxx build_type=Release') depends_on('mercurial') depends_on('mosh') depends_on('munge') depends_on('ncdu')", "package version('1.0', '372ce038842f20bf0ae02de50c26e85d', url='https://github.com/electronicvisions/spack/archive/v0.8.tar.gz') depends_on('ack') depends_on('autoconf') depends_on('automake') depends_on('bash-completion') depends_on('bazel') depends_on('bear')", "depends_on('jq') depends_on('libpcap') depends_on('libtool') depends_on('llvm+visionary+python~libcxx build_type=Release') depends_on('mercurial') depends_on('mosh') depends_on('munge') depends_on('ncdu') depends_on('node-js')", "'372ce038842f20bf0ae02de50c26e85d', url='https://github.com/electronicvisions/spack/archive/v0.8.tar.gz') depends_on('ack') depends_on('autoconf') depends_on('automake') depends_on('bash-completion') depends_on('bazel') depends_on('bear') depends_on('cairo +X')", "store a copy of this package. filename = osp.basename(osp.dirname(__file__)) #", "depends_on('bash-completion') depends_on('bazel') depends_on('bear') depends_on('cairo +X') depends_on('cloc') depends_on('cmake') depends_on('connect-proxy') depends_on('cppcheck +htmlreport')", "Livermore National Security, LLC and other # Spack Project Developers.", "update this package version('1.0', '372ce038842f20bf0ae02de50c26e85d', url='https://github.com/electronicvisions/spack/archive/v0.8.tar.gz') depends_on('ack') depends_on('autoconf') depends_on('automake') depends_on('bash-completion')", "filename + '.py')) # we could create some filesystem view", "VisionaryDevTools(Package): \"\"\"Developer convenience packages common to all visionary development meta", "depends_on('py-pytest-xdist') depends_on('py-ranger-fm') depends_on('py-sqlalchemy') depends_on('py-virtualenv') depends_on('py-xmlrunner') depends_on('py-yq') depends_on('rtags') depends_on('tar') depends_on('texinfo') #", "when=\"^python@:2.999.999\") depends_on('py-pylint', when=\"^python@3.4.0:\") depends_on('py-pyserial') depends_on('py-pytest') depends_on('py-pytest-xdist') depends_on('py-ranger-fm') depends_on('py-sqlalchemy') depends_on('py-virtualenv') depends_on('py-xmlrunner')", "+huge +x') depends_on('visionary-xilinx') depends_on('wget') depends_on('yaml-cpp+shared') depends_on('zsh') def install(self, spec, prefix):", "+ '.py')) # we could create some filesystem view here?", "when=\"^python@3.4.0:\") depends_on('py-pyserial') depends_on('py-pytest') depends_on('py-pytest-xdist') depends_on('py-ranger-fm') depends_on('py-sqlalchemy') depends_on('py-virtualenv') depends_on('py-xmlrunner') depends_on('py-yq') depends_on('rtags')", "Application specific build tools belong to the dedicated meta packages.\"\"\"", "depends_on('py-black', when=\"^python@3.6.0:\") depends_on('py-configargparse') depends_on('py-doxypypy') depends_on('py-flake8') depends_on('py-gdbgui') depends_on('py-git-review') depends_on('py-ipython') depends_on('py-jedi') depends_on('py-junit-xml')", "specific build tools belong to the dedicated meta packages.\"\"\" homepage", "depends_on('tmux') depends_on('units') depends_on('valgrind') depends_on('verilator') depends_on('vim +python +ruby +perl +cscope +huge", "depends_on('visionary-xilinx') depends_on('wget') depends_on('yaml-cpp+shared') depends_on('zsh') def install(self, spec, prefix): mkdirp(prefix.etc) #", "only a dummy tarball (see difference between version numbers) #", "fetch --dependencies visionary-defaults` work url = 'https://github.com/electronicvisions/spack/archive/v0.8.tar.gz' # This is", "some random tarball, to make `spack fetch --dependencies visionary-defaults` work", "LLC and other # Spack Project Developers. See the top-level", "depends_on('py-jedi') depends_on('py-junit-xml') depends_on('py-language-server') depends_on('py-line-profiler') depends_on('py-nose') depends_on('py-nose2') depends_on('py-memory-profiler') depends_on('py-pudb') depends_on('py-pylint@:1.999.999', when=\"^python@:2.999.999\")", "on gcc@10.1.0 depends_on('tig') depends_on('time') depends_on('tmux') depends_on('units') depends_on('valgrind') depends_on('verilator') depends_on('vim +python", "# gives name of parent folder install(__file__, join_path(prefix.etc, filename +", "depends_on('py-line-profiler') depends_on('py-nose') depends_on('py-nose2') depends_on('py-memory-profiler') depends_on('py-pudb') depends_on('py-pylint@:1.999.999', when=\"^python@:2.999.999\") depends_on('py-pylint', when=\"^python@3.4.0:\") depends_on('py-pyserial')", "def install(self, spec, prefix): mkdirp(prefix.etc) # store a copy of", "common to all visionary development meta packages. Application specific build", "to the dedicated meta packages.\"\"\" homepage = '' # some", "packages.\"\"\" homepage = '' # some random tarball, to make", "+x') depends_on('visionary-xilinx') depends_on('wget') depends_on('yaml-cpp+shared') depends_on('zsh') def install(self, spec, prefix): mkdirp(prefix.etc)", "tarball (see difference between version numbers) # TODO: as soon", "depends_on('tig') depends_on('time') depends_on('tmux') depends_on('units') depends_on('valgrind') depends_on('verilator') depends_on('vim +python +ruby +perl", "os.path as osp class VisionaryDevTools(Package): \"\"\"Developer convenience packages common to", "(see difference between version numbers) # TODO: as soon as", "depends_on('automake') depends_on('bash-completion') depends_on('bazel') depends_on('bear') depends_on('cairo +X') depends_on('cloc') depends_on('cmake') depends_on('connect-proxy') depends_on('cppcheck", "random tarball, to make `spack fetch --dependencies visionary-defaults` work url", "make `spack fetch --dependencies visionary-defaults` work url = 'https://github.com/electronicvisions/spack/archive/v0.8.tar.gz' #", "soon as a MetaPackage-concept has been merged, please update this", "as a MetaPackage-concept has been merged, please update this package", "please update this package version('1.0', '372ce038842f20bf0ae02de50c26e85d', url='https://github.com/electronicvisions/spack/archive/v0.8.tar.gz') depends_on('ack') depends_on('autoconf') depends_on('automake')", "depends_on('bazel') depends_on('bear') depends_on('cairo +X') depends_on('cloc') depends_on('cmake') depends_on('connect-proxy') depends_on('cppcheck +htmlreport') depends_on('cquery')", "visionary-defaults` work url = 'https://github.com/electronicvisions/spack/archive/v0.8.tar.gz' # This is only a", "(2020-05-14): removed 'the-silver-searcher' due to build fail on gcc@10.1.0 depends_on('tig')", "depends_on('py-yq') depends_on('rtags') depends_on('tar') depends_on('texinfo') # ECM (2020-05-14): removed 'the-silver-searcher' due", "depends_on('py-sqlalchemy') depends_on('py-virtualenv') depends_on('py-xmlrunner') depends_on('py-yq') depends_on('rtags') depends_on('tar') depends_on('texinfo') # ECM (2020-05-14):", "meta packages. Application specific build tools belong to the dedicated", "--dependencies visionary-defaults` work url = 'https://github.com/electronicvisions/spack/archive/v0.8.tar.gz' # This is only", "depends_on('verilator') depends_on('vim +python +ruby +perl +cscope +huge +x') depends_on('visionary-xilinx') depends_on('wget')", "Developers. See the top-level COPYRIGHT file for details. # #", "copy of this package. filename = osp.basename(osp.dirname(__file__)) # gives name", "depends_on('libpcap') depends_on('libtool') depends_on('llvm+visionary+python~libcxx build_type=Release') depends_on('mercurial') depends_on('mosh') depends_on('munge') depends_on('ncdu') depends_on('node-js') depends_on('octave+fftw')", "+perl +cscope +huge +x') depends_on('visionary-xilinx') depends_on('wget') depends_on('yaml-cpp+shared') depends_on('zsh') def install(self,", "url='https://github.com/electronicvisions/spack/archive/v0.8.tar.gz') depends_on('ack') depends_on('autoconf') depends_on('automake') depends_on('bash-completion') depends_on('bazel') depends_on('bear') depends_on('cairo +X') depends_on('cloc')", "packages common to all visionary development meta packages. Application specific", "install(__file__, join_path(prefix.etc, filename + '.py')) # we could create some", "depends_on('gtkplus') depends_on('imagemagick') depends_on('jq') depends_on('libpcap') depends_on('libtool') depends_on('llvm+visionary+python~libcxx build_type=Release') depends_on('mercurial') depends_on('mosh') depends_on('munge')", "'the-silver-searcher' due to build fail on gcc@10.1.0 depends_on('tig') depends_on('time') depends_on('tmux')", "of parent folder install(__file__, join_path(prefix.etc, filename + '.py')) # we", "depends_on('vim +python +ruby +perl +cscope +huge +x') depends_on('visionary-xilinx') depends_on('wget') depends_on('yaml-cpp+shared')", "depends_on('pkg-config') depends_on('py-autopep8') depends_on('py-black', when=\"^python@3.6.0:\") depends_on('py-configargparse') depends_on('py-doxypypy') depends_on('py-flake8') depends_on('py-gdbgui') depends_on('py-git-review') depends_on('py-ipython')", "depends_on('autoconf') depends_on('automake') depends_on('bash-completion') depends_on('bazel') depends_on('bear') depends_on('cairo +X') depends_on('cloc') depends_on('cmake') depends_on('connect-proxy')", "# some random tarball, to make `spack fetch --dependencies visionary-defaults`", "is only a dummy tarball (see difference between version numbers)", "details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os.path as", "build_type=Release') depends_on('mercurial') depends_on('mosh') depends_on('munge') depends_on('ncdu') depends_on('node-js') depends_on('octave+fftw') depends_on('openssh') depends_on('pigz') depends_on('pkg-config')", "MIT) import os.path as osp class VisionaryDevTools(Package): \"\"\"Developer convenience packages", "tools belong to the dedicated meta packages.\"\"\" homepage = ''", "depends_on('py-pytest') depends_on('py-pytest-xdist') depends_on('py-ranger-fm') depends_on('py-sqlalchemy') depends_on('py-virtualenv') depends_on('py-xmlrunner') depends_on('py-yq') depends_on('rtags') depends_on('tar') depends_on('texinfo')", "# ECM (2020-05-14): removed 'the-silver-searcher' due to build fail on", "depends_on('connect-proxy') depends_on('cppcheck +htmlreport') depends_on('cquery') depends_on('doxygen+graphviz') depends_on('emacs ~X') depends_on('gdb') depends_on('genpybind') depends_on('git+tcltk')", "depends_on('py-doxypypy') depends_on('py-flake8') depends_on('py-gdbgui') depends_on('py-git-review') depends_on('py-ipython') depends_on('py-jedi') depends_on('py-junit-xml') depends_on('py-language-server') depends_on('py-line-profiler') depends_on('py-nose')", "depends_on('cppcheck +htmlreport') depends_on('cquery') depends_on('doxygen+graphviz') depends_on('emacs ~X') depends_on('gdb') depends_on('genpybind') depends_on('git+tcltk') depends_on('git-fat-git')", "depends_on('wget') depends_on('yaml-cpp+shared') depends_on('zsh') def install(self, spec, prefix): mkdirp(prefix.etc) # store", "depends_on('munge') depends_on('ncdu') depends_on('node-js') depends_on('octave+fftw') depends_on('openssh') depends_on('pigz') depends_on('pkg-config') depends_on('py-autopep8') depends_on('py-black', when=\"^python@3.6.0:\")", "depends_on('py-git-review') depends_on('py-ipython') depends_on('py-jedi') depends_on('py-junit-xml') depends_on('py-language-server') depends_on('py-line-profiler') depends_on('py-nose') depends_on('py-nose2') depends_on('py-memory-profiler') depends_on('py-pudb')", "depends_on('py-pyserial') depends_on('py-pytest') depends_on('py-pytest-xdist') depends_on('py-ranger-fm') depends_on('py-sqlalchemy') depends_on('py-virtualenv') depends_on('py-xmlrunner') depends_on('py-yq') depends_on('rtags') depends_on('tar')", "spec, prefix): mkdirp(prefix.etc) # store a copy of this package.", "this package version('1.0', '372ce038842f20bf0ae02de50c26e85d', url='https://github.com/electronicvisions/spack/archive/v0.8.tar.gz') depends_on('ack') depends_on('autoconf') depends_on('automake') depends_on('bash-completion') depends_on('bazel')", "meta packages.\"\"\" homepage = '' # some random tarball, to", "osp.basename(osp.dirname(__file__)) # gives name of parent folder install(__file__, join_path(prefix.etc, filename", "name of parent folder install(__file__, join_path(prefix.etc, filename + '.py')) #", "depends_on('pigz') depends_on('pkg-config') depends_on('py-autopep8') depends_on('py-black', when=\"^python@3.6.0:\") depends_on('py-configargparse') depends_on('py-doxypypy') depends_on('py-flake8') depends_on('py-gdbgui') depends_on('py-git-review')", "depends_on('mosh') depends_on('munge') depends_on('ncdu') depends_on('node-js') depends_on('octave+fftw') depends_on('openssh') depends_on('pigz') depends_on('pkg-config') depends_on('py-autopep8') depends_on('py-black',", "depends_on('py-autopep8') depends_on('py-black', when=\"^python@3.6.0:\") depends_on('py-configargparse') depends_on('py-doxypypy') depends_on('py-flake8') depends_on('py-gdbgui') depends_on('py-git-review') depends_on('py-ipython') depends_on('py-jedi')", "depends_on('rtags') depends_on('tar') depends_on('texinfo') # ECM (2020-05-14): removed 'the-silver-searcher' due to", "See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier:", "(Apache-2.0 OR MIT) import os.path as osp class VisionaryDevTools(Package): \"\"\"Developer", "has been merged, please update this package version('1.0', '372ce038842f20bf0ae02de50c26e85d', url='https://github.com/electronicvisions/spack/archive/v0.8.tar.gz')", "This is only a dummy tarball (see difference between version", "depends_on('ack') depends_on('autoconf') depends_on('automake') depends_on('bash-completion') depends_on('bazel') depends_on('bear') depends_on('cairo +X') depends_on('cloc') depends_on('cmake')", "depends_on('py-virtualenv') depends_on('py-xmlrunner') depends_on('py-yq') depends_on('rtags') depends_on('tar') depends_on('texinfo') # ECM (2020-05-14): removed", "and other # Spack Project Developers. See the top-level COPYRIGHT", "prefix): mkdirp(prefix.etc) # store a copy of this package. filename", "file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import", "folder install(__file__, join_path(prefix.etc, filename + '.py')) # we could create", "a dummy tarball (see difference between version numbers) # TODO:", "depends_on('cairo +X') depends_on('cloc') depends_on('cmake') depends_on('connect-proxy') depends_on('cppcheck +htmlreport') depends_on('cquery') depends_on('doxygen+graphviz') depends_on('emacs", "+htmlreport') depends_on('cquery') depends_on('doxygen+graphviz') depends_on('emacs ~X') depends_on('gdb') depends_on('genpybind') depends_on('git+tcltk') depends_on('git-fat-git') depends_on('gtkplus')", "merged, please update this package version('1.0', '372ce038842f20bf0ae02de50c26e85d', url='https://github.com/electronicvisions/spack/archive/v0.8.tar.gz') depends_on('ack') depends_on('autoconf')", "filename = osp.basename(osp.dirname(__file__)) # gives name of parent folder install(__file__,", "when=\"^python@3.6.0:\") depends_on('py-configargparse') depends_on('py-doxypypy') depends_on('py-flake8') depends_on('py-gdbgui') depends_on('py-git-review') depends_on('py-ipython') depends_on('py-jedi') depends_on('py-junit-xml') depends_on('py-language-server')", "# This is only a dummy tarball (see difference between", "Project Developers. See the top-level COPYRIGHT file for details. #", "depends_on('genpybind') depends_on('git+tcltk') depends_on('git-fat-git') depends_on('gtkplus') depends_on('imagemagick') depends_on('jq') depends_on('libpcap') depends_on('libtool') depends_on('llvm+visionary+python~libcxx build_type=Release')", "depends_on('ncdu') depends_on('node-js') depends_on('octave+fftw') depends_on('openssh') depends_on('pigz') depends_on('pkg-config') depends_on('py-autopep8') depends_on('py-black', when=\"^python@3.6.0:\") depends_on('py-configargparse')", "depends_on('py-gdbgui') depends_on('py-git-review') depends_on('py-ipython') depends_on('py-jedi') depends_on('py-junit-xml') depends_on('py-language-server') depends_on('py-line-profiler') depends_on('py-nose') depends_on('py-nose2') depends_on('py-memory-profiler')", "Copyright 2013-2019 Lawrence Livermore National Security, LLC and other #", "OR MIT) import os.path as osp class VisionaryDevTools(Package): \"\"\"Developer convenience", "to all visionary development meta packages. Application specific build tools", "depends_on('py-pylint@:1.999.999', when=\"^python@:2.999.999\") depends_on('py-pylint', when=\"^python@3.4.0:\") depends_on('py-pyserial') depends_on('py-pytest') depends_on('py-pytest-xdist') depends_on('py-ranger-fm') depends_on('py-sqlalchemy') depends_on('py-virtualenv')", "+cscope +huge +x') depends_on('visionary-xilinx') depends_on('wget') depends_on('yaml-cpp+shared') depends_on('zsh') def install(self, spec,", "= 'https://github.com/electronicvisions/spack/archive/v0.8.tar.gz' # This is only a dummy tarball (see", "other # Spack Project Developers. See the top-level COPYRIGHT file", "= osp.basename(osp.dirname(__file__)) # gives name of parent folder install(__file__, join_path(prefix.etc,", "depends_on('emacs ~X') depends_on('gdb') depends_on('genpybind') depends_on('git+tcltk') depends_on('git-fat-git') depends_on('gtkplus') depends_on('imagemagick') depends_on('jq') depends_on('libpcap')", "TODO: as soon as a MetaPackage-concept has been merged, please", "gcc@10.1.0 depends_on('tig') depends_on('time') depends_on('tmux') depends_on('units') depends_on('valgrind') depends_on('verilator') depends_on('vim +python +ruby", "+python +ruby +perl +cscope +huge +x') depends_on('visionary-xilinx') depends_on('wget') depends_on('yaml-cpp+shared') depends_on('zsh')", "COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT)", "top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR", "version('1.0', '372ce038842f20bf0ae02de50c26e85d', url='https://github.com/electronicvisions/spack/archive/v0.8.tar.gz') depends_on('ack') depends_on('autoconf') depends_on('automake') depends_on('bash-completion') depends_on('bazel') depends_on('bear') depends_on('cairo", "depends_on('py-pudb') depends_on('py-pylint@:1.999.999', when=\"^python@:2.999.999\") depends_on('py-pylint', when=\"^python@3.4.0:\") depends_on('py-pyserial') depends_on('py-pytest') depends_on('py-pytest-xdist') depends_on('py-ranger-fm') depends_on('py-sqlalchemy')" ]
[ "if not line : break; seq = line.split('\\t')[3] real_seq =", "output_file.write(\"\\n\") try: subprocess.check_output(cmd_bedtools, shell = True) except Exception as exc:", "$1, $2, $3, $16, $6}' cmd_paste = \"paste %s %s", "(\"\\t+ File %s already exists\" %pilfer_tmp) else: ## paste Aligned.sortedByCoord.out.bed", "%bed_file) else: cmd_bedtools = \"%s bamtobed -i %s > %s\"", "+ '.pilfer.bed' ## START print (\"\\n+ Converting BAM file into", "# Open file IN fileHandler = open (pilfer_tmp, \"r\") while", "bam_file, sam_file) output_file.write(cmd_samtools) output_file.write(\"\\n\") try: subprocess.check_output(cmd_samtools, shell = True) except", "%exc) exit() ## generate samtools if (os.path.isfile(sam_file)): print (\"\\t+ File", "view %s > %s\" %(samtools_exe, bam_file, sam_file) output_file.write(cmd_samtools) output_file.write(\"\\n\") try:", "folder + '/' + split_name[0] + '.sam' pilfer_tmp = folder", "('***ERROR:') print (cmd_samtools) print('samtools view command generated an exception: %s'", "= \"%s bamtobed -i %s > %s\" %(bedtools_exe, bam_file, bed_file)", "start output_file = open(logFile, 'a') output_file.write(\"\\nConvert BAM to Pilfer Input", "= previous_line counter += 1 else: line_split = previous_line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n'", "next_line counter = 1 break; ## close and finish fileHandler.close()", "= os.path.abspath(argv[1]) folder = argv[2] bedtools_exe = argv[3] samtools_exe =", "PILFER input file\") ## generate bed file with bedtools bamtobed", "# Open file OUT output_file = open(pilfer_file, 'w') # Open", "(sys.argv) < 5: print (\"\\nUsage:\") print (\"python3 %s bam_file folder", "os import re import sys from sys import argv import", "line next_line = fileHandler.readline().strip() if (next_line == line): counter +=", "= True) except Exception as exc: print ('***ERROR:') print (cmd_samtools)", "'/' + split_name[0] + '.bed' sam_file = folder + '/'", "# start output_file = open(logFile, 'a') output_file.write(\"\\nConvert BAM to Pilfer", "%s > %s\" %(samtools_exe, bam_file, sam_file) output_file.write(cmd_samtools) output_file.write(\"\\n\") try: subprocess.check_output(cmd_samtools,", "\"r\") while True: # Get next line from file line", "(\"\\nUsage:\") print (\"python3 %s bam_file folder bedtools_bin samtools_bin logfile\\n\" %os.path.realpath(__file__))", "pilfer_file = folder + '/' + split_name[0] + '.pilfer.bed' ##", "line_split[4])) #counter += 1 while True: #get next line next_line", "paste Aligned.sortedByCoord.out.bed Aligned.sortedByCoord.out.sam | awk -v \"OFS=\\t\" '{print $1, $2,", "bedtools bamtobed -i bam_file if (os.path.isfile(bed_file)): print (\"\\t+ File %s", "open (pilfer_tmp, \"r\") while True: # Get next line from", "samtools if (os.path.isfile(sam_file)): print (\"\\t+ File %s already exists\" %sam_file)", "print (\"\\nUsage:\") print (\"python3 %s bam_file folder bedtools_bin samtools_bin logfile\\n\"", "bedtools_bin samtools_bin logfile\\n\" %os.path.realpath(__file__)) exit() bam_file = os.path.abspath(argv[1]) folder =", "bam_file if (os.path.isfile(bed_file)): print (\"\\t+ File %s already exists\" %bed_file)", "= \"paste %s %s | awk -v \\\"OFS=\\t\\\" \\'{print $1,", "output_file = open(logFile, 'a') output_file.write(\"\\nConvert BAM to Pilfer Input file:\\n\")", "() # Open file OUT output_file = open(pilfer_file, 'w') #", "-i bam_file if (os.path.isfile(bed_file)): print (\"\\t+ File %s already exists\"", "#counter += 1 while True: #get next line next_line =", "parse pilfer tmp file counter = 1 previous_line = ()", "%(bed_file, sam_file, pilfer_tmp) output_file.write(cmd_paste) output_file.write(\"\\n\") try: subprocess.check_output(cmd_paste, shell = True)", "File %s already exists\" %sam_file) else: cmd_samtools = \"%s view", "Get next line from file line = fileHandler.readline().strip() # If", "== line): counter += 1 else: line_split = line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n'", "as exc: print ('***ERROR:') print (cmd_paste) print('paste bed sam command", "= 1 previous_line = () # Open file OUT output_file", "+ split_name[0] + '.sam' pilfer_tmp = folder + '/' +", "sys from sys import argv import subprocess ## ARGV if", "file with bedtools bamtobed -i bam_file if (os.path.isfile(bed_file)): print (\"\\t+", "$6}' cmd_paste = \"paste %s %s | awk -v \\\"OFS=\\t\\\"", "else: cmd_samtools = \"%s view %s > %s\" %(samtools_exe, bam_file,", "import subprocess ## ARGV if len (sys.argv) < 5: print", "\\\"OFS=\\t\\\" \\'{print $1, $2, $3, $16, $6}\\' > %s\" %(bed_file,", "break; seq = line.split('\\t')[3] real_seq = seq.split('::PU') seq_len = len(str(real_seq[0]))", "line = previous_line counter += 1 else: line_split = previous_line.split('\\t')", "imports import time import io import os import re import", "previous_line = next_line counter = 1 break; ## close and", "file into PILFER input file\") ## generate bed file with", "line_split[1], line_split[2], line_split[3], counter, line_split[4])) previous_line = next_line counter =", "tmp file if (os.path.isfile(pilfer_tmp)): print (\"\\t+ File %s already exists\"", "ARGV if len (sys.argv) < 5: print (\"\\nUsage:\") print (\"python3", "generate bed file with bedtools bamtobed -i bam_file if (os.path.isfile(bed_file)):", "samtools_bin logfile\\n\" %os.path.realpath(__file__)) exit() bam_file = os.path.abspath(argv[1]) folder = argv[2]", "line_split[2], line_split[3], counter, line_split[4])) #counter += 1 while True: #get", "(\"\\t+ File %s already exists\" %sam_file) else: cmd_samtools = \"%s", "awk -v \\\"OFS=\\t\\\" \\'{print $1, $2, $3, $16, $6}\\' >", "%s' %exc) exit() ## parse pilfer tmp file counter =", "print (cmd_paste) print('paste bed sam command generated an exception: %s'", "next line from file line = fileHandler.readline().strip() # If line", "line_split[2], line_split[3], counter, line_split[4])) previous_line = next_line counter = 1", "File %s already exists\" %bed_file) else: cmd_bedtools = \"%s bamtobed", "## START print (\"\\n+ Converting BAM file into PILFER input", "+ '/' + split_name[0] + '.pilfer.bed' ## START print (\"\\n+", "(cmd_bedtools) print('bedtools command generated an exception: %s' %exc) exit() ##", "line_split[3], counter, line_split[4])) #counter += 1 while True: #get next", "## generate bed file with bedtools bamtobed -i bam_file if", "%s already exists\" %sam_file) else: cmd_samtools = \"%s view %s", "else: line_split = previous_line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter,", "python ## useful imports import time import io import os", "= True) except Exception as exc: print ('***ERROR:') print (cmd_paste)", "%s' %exc) exit() ## generate samtools if (os.path.isfile(sam_file)): print (\"\\t+", "%s' %exc) exit() ## generate paste filter tmp file if", "+ split_name[0] + '.tmp.pilfer.bed' pilfer_file = folder + '/' +", "an exception: %s' %exc) exit() ## generate paste filter tmp", "line_split[1], line_split[2], line_split[3], counter, line_split[4])) #counter += 1 while True:", "+ '/' + split_name[0] + '.sam' pilfer_tmp = folder +", "shell = True) except Exception as exc: print ('***ERROR:') print", "output_file.write(cmd_samtools) output_file.write(\"\\n\") try: subprocess.check_output(cmd_samtools, shell = True) except Exception as", "'.pilfer.bed' ## START print (\"\\n+ Converting BAM file into PILFER", "with bedtools bamtobed -i bam_file if (os.path.isfile(bed_file)): print (\"\\t+ File", "cmd_samtools = \"%s view %s > %s\" %(samtools_exe, bam_file, sam_file)", "command generated an exception: %s' %exc) exit() ## generate samtools", "output_file = open(pilfer_file, 'w') # Open file IN fileHandler =", "logfile\\n\" %os.path.realpath(__file__)) exit() bam_file = os.path.abspath(argv[1]) folder = argv[2] bedtools_exe", "+ '.bed' sam_file = folder + '/' + split_name[0] +", "print (\"\\t+ File %s already exists\" %sam_file) else: cmd_samtools =", "print('bedtools command generated an exception: %s' %exc) exit() ## generate", "already exists\" %sam_file) else: cmd_samtools = \"%s view %s >", "= seq.split('::PU') seq_len = len(str(real_seq[0])) ## Discard smaller if (previous_line):", "Aligned.sortedByCoord.out.sam | awk -v \"OFS=\\t\" '{print $1, $2, $3, $16,", "import sys from sys import argv import subprocess ## ARGV", "$16, $6}' cmd_paste = \"paste %s %s | awk -v", "else: line_split = line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter,", "file reached if not line : break; seq = line.split('\\t')[3]", "input file\") ## generate bed file with bedtools bamtobed -i", "not line : break; seq = line.split('\\t')[3] real_seq = seq.split('::PU')", "argv[4] logFile = argv[5] # start output_file = open(logFile, 'a')", "real_seq = seq.split('::PU') seq_len = len(str(real_seq[0])) ## Discard smaller if", "logFile = argv[5] # start output_file = open(logFile, 'a') output_file.write(\"\\nConvert", "exc: print ('***ERROR:') print (cmd_bedtools) print('bedtools command generated an exception:", "counter, line_split[4])) previous_line = next_line counter = 1 break; ##", "bed_file) output_file.write(cmd_bedtools) output_file.write(\"\\n\") try: subprocess.check_output(cmd_bedtools, shell = True) except Exception", "print (\"\\t+ File %s already exists\" %pilfer_tmp) else: ## paste", "%(bedtools_exe, bam_file, bed_file) output_file.write(cmd_bedtools) output_file.write(\"\\n\") try: subprocess.check_output(cmd_bedtools, shell = True)", "print (cmd_samtools) print('samtools view command generated an exception: %s' %exc)", "## parse pilfer tmp file counter = 1 previous_line =", "1 while True: #get next line next_line = fileHandler.readline().strip() if", "reached if not line : break; seq = line.split('\\t')[3] real_seq", "dirname_name = os.path.dirname(bam_file) split_name = os.path.splitext( os.path.basename(bam_file) ) bed_file =", "tmp file counter = 1 previous_line = () # Open", "else: ## paste Aligned.sortedByCoord.out.bed Aligned.sortedByCoord.out.sam | awk -v \"OFS=\\t\" '{print", "+ '.sam' pilfer_tmp = folder + '/' + split_name[0] +", "# Get next line from file line = fileHandler.readline().strip() #", "next line next_line = fileHandler.readline().strip() if (next_line == line): counter", "= open(pilfer_file, 'w') # Open file IN fileHandler = open", "line): line = previous_line counter += 1 else: line_split =", "= fileHandler.readline().strip() if (next_line == line): counter += 1 else:", "from file line = fileHandler.readline().strip() # If line is empty", "exit() bam_file = os.path.abspath(argv[1]) folder = argv[2] bedtools_exe = argv[3]", "to Pilfer Input file:\\n\") ## Variables dirname_name = os.path.dirname(bam_file) split_name", "pilfer_tmp) output_file.write(cmd_paste) output_file.write(\"\\n\") try: subprocess.check_output(cmd_paste, shell = True) except Exception", "Exception as exc: print ('***ERROR:') print (cmd_bedtools) print('bedtools command generated", "open(logFile, 'a') output_file.write(\"\\nConvert BAM to Pilfer Input file:\\n\") ## Variables", "+ split_name[0] + '.bed' sam_file = folder + '/' +", "cmd_bedtools = \"%s bamtobed -i %s > %s\" %(bedtools_exe, bam_file,", "line_split = previous_line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4]))", "5: print (\"\\nUsage:\") print (\"python3 %s bam_file folder bedtools_bin samtools_bin", "bed sam command generated an exception: %s' %exc) exit() ##", "generate paste filter tmp file if (os.path.isfile(pilfer_tmp)): print (\"\\t+ File", "bam_file folder bedtools_bin samtools_bin logfile\\n\" %os.path.realpath(__file__)) exit() bam_file = os.path.abspath(argv[1])", "Open file OUT output_file = open(pilfer_file, 'w') # Open file", "an exception: %s' %exc) exit() ## generate samtools if (os.path.isfile(sam_file)):", "argv[3] samtools_exe = argv[4] logFile = argv[5] # start output_file", "of file reached if not line : break; seq =", "exists\" %bed_file) else: cmd_bedtools = \"%s bamtobed -i %s >", "generated an exception: %s' %exc) exit() ## generate paste filter", "+= 1 else: line_split = previous_line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1], line_split[2],", "%s\" %(samtools_exe, bam_file, sam_file) output_file.write(cmd_samtools) output_file.write(\"\\n\") try: subprocess.check_output(cmd_samtools, shell =", "'w') # Open file IN fileHandler = open (pilfer_tmp, \"r\")", "output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4])) previous_line = next_line", "fileHandler = open (pilfer_tmp, \"r\") while True: # Get next", "OUT output_file = open(pilfer_file, 'w') # Open file IN fileHandler", "> %s\" %(samtools_exe, bam_file, sam_file) output_file.write(cmd_samtools) output_file.write(\"\\n\") try: subprocess.check_output(cmd_samtools, shell", "IN fileHandler = open (pilfer_tmp, \"r\") while True: # Get", "+ '/' + split_name[0] + '.tmp.pilfer.bed' pilfer_file = folder +", "sam_file, pilfer_tmp) output_file.write(cmd_paste) output_file.write(\"\\n\") try: subprocess.check_output(cmd_paste, shell = True) except", "+= 1 else: line_split = line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1], line_split[2],", "folder = argv[2] bedtools_exe = argv[3] samtools_exe = argv[4] logFile", "print (\"\\n+ Converting BAM file into PILFER input file\") ##", "%exc) exit() ## parse pilfer tmp file counter = 1", "BAM to Pilfer Input file:\\n\") ## Variables dirname_name = os.path.dirname(bam_file)", "import os import re import sys from sys import argv", "%s\" %(bed_file, sam_file, pilfer_tmp) output_file.write(cmd_paste) output_file.write(\"\\n\") try: subprocess.check_output(cmd_paste, shell =", "fileHandler.readline().strip() # If line is empty then end of file", "from sys import argv import subprocess ## ARGV if len", "Exception as exc: print ('***ERROR:') print (cmd_paste) print('paste bed sam", "= line.split('\\t')[3] real_seq = seq.split('::PU') seq_len = len(str(real_seq[0])) ## Discard", "$2, $3, $16, $6}\\' > %s\" %(bed_file, sam_file, pilfer_tmp) output_file.write(cmd_paste)", "> %s\" %(bedtools_exe, bam_file, bed_file) output_file.write(cmd_bedtools) output_file.write(\"\\n\") try: subprocess.check_output(cmd_bedtools, shell", "counter, line_split[4])) #counter += 1 while True: #get next line", "+ split_name[0] + '.pilfer.bed' ## START print (\"\\n+ Converting BAM", "sam_file) output_file.write(cmd_samtools) output_file.write(\"\\n\") try: subprocess.check_output(cmd_samtools, shell = True) except Exception", "%exc) exit() ## generate paste filter tmp file if (os.path.isfile(pilfer_tmp)):", "print (\"python3 %s bam_file folder bedtools_bin samtools_bin logfile\\n\" %os.path.realpath(__file__)) exit()", "output_file.write(\"\\nConvert BAM to Pilfer Input file:\\n\") ## Variables dirname_name =", "except Exception as exc: print ('***ERROR:') print (cmd_paste) print('paste bed", "1 previous_line = () # Open file OUT output_file =", "Open file IN fileHandler = open (pilfer_tmp, \"r\") while True:", "open(pilfer_file, 'w') # Open file IN fileHandler = open (pilfer_tmp,", "'a') output_file.write(\"\\nConvert BAM to Pilfer Input file:\\n\") ## Variables dirname_name", "True) except Exception as exc: print ('***ERROR:') print (cmd_bedtools) print('bedtools", "(cmd_paste) print('paste bed sam command generated an exception: %s' %exc)", "('***ERROR:') print (cmd_paste) print('paste bed sam command generated an exception:", "len(str(real_seq[0])) ## Discard smaller if (previous_line): if (previous_line == line):", "then end of file reached if not line : break;", "io import os import re import sys from sys import", "subprocess.check_output(cmd_samtools, shell = True) except Exception as exc: print ('***ERROR:')", "os.path.abspath(argv[1]) folder = argv[2] bedtools_exe = argv[3] samtools_exe = argv[4]", "generated an exception: %s' %exc) exit() ## parse pilfer tmp", "True: # Get next line from file line = fileHandler.readline().strip()", "= fileHandler.readline().strip() # If line is empty then end of", "Pilfer Input file:\\n\") ## Variables dirname_name = os.path.dirname(bam_file) split_name =", "line_split[4])) previous_line = next_line counter = 1 break; ## close", "command generated an exception: %s' %exc) exit() ## parse pilfer", "## useful imports import time import io import os import", "split_name[0] + '.tmp.pilfer.bed' pilfer_file = folder + '/' + split_name[0]", "-v \\\"OFS=\\t\\\" \\'{print $1, $2, $3, $16, $6}\\' > %s\"", "len (sys.argv) < 5: print (\"\\nUsage:\") print (\"python3 %s bam_file", "import time import io import os import re import sys", "= line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4])) previous_line", "print('paste bed sam command generated an exception: %s' %exc) exit()", "already exists\" %bed_file) else: cmd_bedtools = \"%s bamtobed -i %s", "output_file.write(cmd_paste) output_file.write(\"\\n\") try: subprocess.check_output(cmd_paste, shell = True) except Exception as", "= len(str(real_seq[0])) ## Discard smaller if (previous_line): if (previous_line ==", "## Variables dirname_name = os.path.dirname(bam_file) split_name = os.path.splitext( os.path.basename(bam_file) )", "$3, $16, $6}\\' > %s\" %(bed_file, sam_file, pilfer_tmp) output_file.write(cmd_paste) output_file.write(\"\\n\")", "(pilfer_tmp, \"r\") while True: # Get next line from file", "sys import argv import subprocess ## ARGV if len (sys.argv)", "exception: %s' %exc) exit() ## parse pilfer tmp file counter", "line): counter += 1 else: line_split = line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0],", "try: subprocess.check_output(cmd_paste, shell = True) except Exception as exc: print", "exc: print ('***ERROR:') print (cmd_samtools) print('samtools view command generated an", "folder + '/' + split_name[0] + '.tmp.pilfer.bed' pilfer_file = folder", "filter tmp file if (os.path.isfile(pilfer_tmp)): print (\"\\t+ File %s already", "If line is empty then end of file reached if", "| awk -v \"OFS=\\t\" '{print $1, $2, $3, $16, $6}'", "%s already exists\" %bed_file) else: cmd_bedtools = \"%s bamtobed -i", "previous_line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4])) #counter +=", "folder + '/' + split_name[0] + '.pilfer.bed' ## START print", "is empty then end of file reached if not line", "= \"%s view %s > %s\" %(samtools_exe, bam_file, sam_file) output_file.write(cmd_samtools)", "import re import sys from sys import argv import subprocess", "sam_file = folder + '/' + split_name[0] + '.sam' pilfer_tmp", "= folder + '/' + split_name[0] + '.tmp.pilfer.bed' pilfer_file =", "if (next_line == line): counter += 1 else: line_split =", "pilfer tmp file counter = 1 previous_line = () #", "generate samtools if (os.path.isfile(sam_file)): print (\"\\t+ File %s already exists\"", "print ('***ERROR:') print (cmd_paste) print('paste bed sam command generated an", "seq = line.split('\\t')[3] real_seq = seq.split('::PU') seq_len = len(str(real_seq[0])) ##", "file line = fileHandler.readline().strip() # If line is empty then", "exit() ## generate samtools if (os.path.isfile(sam_file)): print (\"\\t+ File %s", "exit() ## generate paste filter tmp file if (os.path.isfile(pilfer_tmp)): print", "bamtobed -i bam_file if (os.path.isfile(bed_file)): print (\"\\t+ File %s already", "split_name[0] + '.pilfer.bed' ## START print (\"\\n+ Converting BAM file", "print ('***ERROR:') print (cmd_samtools) print('samtools view command generated an exception:", "= open(logFile, 'a') output_file.write(\"\\nConvert BAM to Pilfer Input file:\\n\") ##", "= argv[5] # start output_file = open(logFile, 'a') output_file.write(\"\\nConvert BAM", "< 5: print (\"\\nUsage:\") print (\"python3 %s bam_file folder bedtools_bin", "(\"python3 %s bam_file folder bedtools_bin samtools_bin logfile\\n\" %os.path.realpath(__file__)) exit() bam_file", "('***ERROR:') print (cmd_bedtools) print('bedtools command generated an exception: %s' %exc)", "file\") ## generate bed file with bedtools bamtobed -i bam_file", "#get next line next_line = fileHandler.readline().strip() if (next_line == line):", "an exception: %s' %exc) exit() ## parse pilfer tmp file", "next_line = fileHandler.readline().strip() if (next_line == line): counter += 1", "output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4])) #counter += 1", "previous_line counter += 1 else: line_split = previous_line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0],", "line from file line = fileHandler.readline().strip() # If line is", "subprocess.check_output(cmd_bedtools, shell = True) except Exception as exc: print ('***ERROR:')", "if (os.path.isfile(sam_file)): print (\"\\t+ File %s already exists\" %sam_file) else:", "%s | awk -v \\\"OFS=\\t\\\" \\'{print $1, $2, $3, $16,", "START print (\"\\n+ Converting BAM file into PILFER input file\")", "bam_file = os.path.abspath(argv[1]) folder = argv[2] bedtools_exe = argv[3] samtools_exe", "bed_file = folder + '/' + split_name[0] + '.bed' sam_file", "split_name[0] + '.bed' sam_file = folder + '/' + split_name[0]", "Variables dirname_name = os.path.dirname(bam_file) split_name = os.path.splitext( os.path.basename(bam_file) ) bed_file", "as exc: print ('***ERROR:') print (cmd_bedtools) print('bedtools command generated an", "$2, $3, $16, $6}' cmd_paste = \"paste %s %s |", "if (previous_line): if (previous_line == line): line = previous_line counter", "%(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4])) previous_line = next_line counter", "File %s already exists\" %pilfer_tmp) else: ## paste Aligned.sortedByCoord.out.bed Aligned.sortedByCoord.out.sam", "import io import os import re import sys from sys", "Exception as exc: print ('***ERROR:') print (cmd_samtools) print('samtools view command", "bam_file, bed_file) output_file.write(cmd_bedtools) output_file.write(\"\\n\") try: subprocess.check_output(cmd_bedtools, shell = True) except", "'.tmp.pilfer.bed' pilfer_file = folder + '/' + split_name[0] + '.pilfer.bed'", "cmd_paste = \"paste %s %s | awk -v \\\"OFS=\\t\\\" \\'{print", "output_file.write(\"\\n\") try: subprocess.check_output(cmd_paste, shell = True) except Exception as exc:", "subprocess.check_output(cmd_paste, shell = True) except Exception as exc: print ('***ERROR:')", "if (os.path.isfile(bed_file)): print (\"\\t+ File %s already exists\" %bed_file) else:", "time import io import os import re import sys from", "$1, $2, $3, $16, $6}\\' > %s\" %(bed_file, sam_file, pilfer_tmp)", "print ('***ERROR:') print (cmd_bedtools) print('bedtools command generated an exception: %s'", "argv[2] bedtools_exe = argv[3] samtools_exe = argv[4] logFile = argv[5]", "argv import subprocess ## ARGV if len (sys.argv) < 5:", "file if (os.path.isfile(pilfer_tmp)): print (\"\\t+ File %s already exists\" %pilfer_tmp)", "line.split('\\t')[3] real_seq = seq.split('::PU') seq_len = len(str(real_seq[0])) ## Discard smaller", "folder + '/' + split_name[0] + '.bed' sam_file = folder", "exit() ## parse pilfer tmp file counter = 1 previous_line", "(previous_line == line): line = previous_line counter += 1 else:", "= folder + '/' + split_name[0] + '.bed' sam_file =", "split_name[0] + '.sam' pilfer_tmp = folder + '/' + split_name[0]", "except Exception as exc: print ('***ERROR:') print (cmd_bedtools) print('bedtools command", "1 else: line_split = line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1], line_split[2], line_split[3],", "%s already exists\" %pilfer_tmp) else: ## paste Aligned.sortedByCoord.out.bed Aligned.sortedByCoord.out.sam |", "file:\\n\") ## Variables dirname_name = os.path.dirname(bam_file) split_name = os.path.splitext( os.path.basename(bam_file)", "exists\" %pilfer_tmp) else: ## paste Aligned.sortedByCoord.out.bed Aligned.sortedByCoord.out.sam | awk -v", "re import sys from sys import argv import subprocess ##", "## ARGV if len (sys.argv) < 5: print (\"\\nUsage:\") print", "try: subprocess.check_output(cmd_samtools, shell = True) except Exception as exc: print", "view command generated an exception: %s' %exc) exit() ## generate", "-v \"OFS=\\t\" '{print $1, $2, $3, $16, $6}' cmd_paste =", "except Exception as exc: print ('***ERROR:') print (cmd_samtools) print('samtools view", "pilfer_tmp = folder + '/' + split_name[0] + '.tmp.pilfer.bed' pilfer_file", "subprocess ## ARGV if len (sys.argv) < 5: print (\"\\nUsage:\")", "%sam_file) else: cmd_samtools = \"%s view %s > %s\" %(samtools_exe,", "'/' + split_name[0] + '.sam' pilfer_tmp = folder + '/'", "= open (pilfer_tmp, \"r\") while True: # Get next line", "fileHandler.readline().strip() if (next_line == line): counter += 1 else: line_split", "(os.path.isfile(pilfer_tmp)): print (\"\\t+ File %s already exists\" %pilfer_tmp) else: ##", "== line): line = previous_line counter += 1 else: line_split", "= () # Open file OUT output_file = open(pilfer_file, 'w')", "seq_len = len(str(real_seq[0])) ## Discard smaller if (previous_line): if (previous_line", "print (cmd_bedtools) print('bedtools command generated an exception: %s' %exc) exit()", "if (previous_line == line): line = previous_line counter += 1", "True) except Exception as exc: print ('***ERROR:') print (cmd_samtools) print('samtools", "## paste Aligned.sortedByCoord.out.bed Aligned.sortedByCoord.out.sam | awk -v \"OFS=\\t\" '{print $1,", "line is empty then end of file reached if not", "$6}\\' > %s\" %(bed_file, sam_file, pilfer_tmp) output_file.write(cmd_paste) output_file.write(\"\\n\") try: subprocess.check_output(cmd_paste,", "(previous_line): if (previous_line == line): line = previous_line counter +=", "file OUT output_file = open(pilfer_file, 'w') # Open file IN", "folder bedtools_bin samtools_bin logfile\\n\" %os.path.realpath(__file__)) exit() bam_file = os.path.abspath(argv[1]) folder", "exception: %s' %exc) exit() ## generate samtools if (os.path.isfile(sam_file)): print", "file IN fileHandler = open (pilfer_tmp, \"r\") while True: #", "already exists\" %pilfer_tmp) else: ## paste Aligned.sortedByCoord.out.bed Aligned.sortedByCoord.out.sam | awk", "= folder + '/' + split_name[0] + '.pilfer.bed' ## START", "= os.path.dirname(bam_file) split_name = os.path.splitext( os.path.basename(bam_file) ) bed_file = folder", "%s > %s\" %(bedtools_exe, bam_file, bed_file) output_file.write(cmd_bedtools) output_file.write(\"\\n\") try: subprocess.check_output(cmd_bedtools,", "os.path.splitext( os.path.basename(bam_file) ) bed_file = folder + '/' + split_name[0]", "= argv[3] samtools_exe = argv[4] logFile = argv[5] # start", "= next_line counter = 1 break; ## close and finish", "Input file:\\n\") ## Variables dirname_name = os.path.dirname(bam_file) split_name = os.path.splitext(", "(os.path.isfile(bed_file)): print (\"\\t+ File %s already exists\" %bed_file) else: cmd_bedtools", "sam command generated an exception: %s' %exc) exit() ## parse", "awk -v \"OFS=\\t\" '{print $1, $2, $3, $16, $6}' cmd_paste", "%pilfer_tmp) else: ## paste Aligned.sortedByCoord.out.bed Aligned.sortedByCoord.out.sam | awk -v \"OFS=\\t\"", "bedtools_exe = argv[3] samtools_exe = argv[4] logFile = argv[5] #", "file counter = 1 previous_line = () # Open file", "True: #get next line next_line = fileHandler.readline().strip() if (next_line ==", "print('samtools view command generated an exception: %s' %exc) exit() ##", "%(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4])) #counter += 1 while", "Converting BAM file into PILFER input file\") ## generate bed", "%s\" %(bedtools_exe, bam_file, bed_file) output_file.write(cmd_bedtools) output_file.write(\"\\n\") try: subprocess.check_output(cmd_bedtools, shell =", "> %s\" %(bed_file, sam_file, pilfer_tmp) output_file.write(cmd_paste) output_file.write(\"\\n\") try: subprocess.check_output(cmd_paste, shell", "line_split[3], counter, line_split[4])) previous_line = next_line counter = 1 break;", "| awk -v \\\"OFS=\\t\\\" \\'{print $1, $2, $3, $16, $6}\\'", "$16, $6}\\' > %s\" %(bed_file, sam_file, pilfer_tmp) output_file.write(cmd_paste) output_file.write(\"\\n\") try:", "Discard smaller if (previous_line): if (previous_line == line): line =", "'.bed' sam_file = folder + '/' + split_name[0] + '.sam'", "'/' + split_name[0] + '.tmp.pilfer.bed' pilfer_file = folder + '/'", "useful imports import time import io import os import re", "# If line is empty then end of file reached", "= previous_line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4])) #counter", "print (\"\\t+ File %s already exists\" %bed_file) else: cmd_bedtools =", "as exc: print ('***ERROR:') print (cmd_samtools) print('samtools view command generated", "\"%s bamtobed -i %s > %s\" %(bedtools_exe, bam_file, bed_file) output_file.write(cmd_bedtools)", "paste filter tmp file if (os.path.isfile(pilfer_tmp)): print (\"\\t+ File %s", "(cmd_samtools) print('samtools view command generated an exception: %s' %exc) exit()", "command generated an exception: %s' %exc) exit() ## generate paste", "exists\" %sam_file) else: cmd_samtools = \"%s view %s > %s\"", "previous_line = () # Open file OUT output_file = open(pilfer_file,", "counter = 1 previous_line = () # Open file OUT", "1 else: line_split = previous_line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1], line_split[2], line_split[3],", "= True) except Exception as exc: print ('***ERROR:') print (cmd_bedtools)", "+ '/' + split_name[0] + '.bed' sam_file = folder +", "%os.path.realpath(__file__)) exit() bam_file = os.path.abspath(argv[1]) folder = argv[2] bedtools_exe =", "= argv[2] bedtools_exe = argv[3] samtools_exe = argv[4] logFile =", "= argv[4] logFile = argv[5] # start output_file = open(logFile,", "into PILFER input file\") ## generate bed file with bedtools", "(\"\\n+ Converting BAM file into PILFER input file\") ## generate", ": break; seq = line.split('\\t')[3] real_seq = seq.split('::PU') seq_len =", "seq.split('::PU') seq_len = len(str(real_seq[0])) ## Discard smaller if (previous_line): if", "counter += 1 else: line_split = previous_line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1],", "samtools_exe = argv[4] logFile = argv[5] # start output_file =", "BAM file into PILFER input file\") ## generate bed file", "empty then end of file reached if not line :", "exc: print ('***ERROR:') print (cmd_paste) print('paste bed sam command generated", "if len (sys.argv) < 5: print (\"\\nUsage:\") print (\"python3 %s", "os.path.dirname(bam_file) split_name = os.path.splitext( os.path.basename(bam_file) ) bed_file = folder +", "end of file reached if not line : break; seq", "line : break; seq = line.split('\\t')[3] real_seq = seq.split('::PU') seq_len", "line_split = line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4]))", "generated an exception: %s' %exc) exit() ## generate samtools if", ") bed_file = folder + '/' + split_name[0] + '.bed'", "import argv import subprocess ## ARGV if len (sys.argv) <", "counter += 1 else: line_split = line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1],", "if (os.path.isfile(pilfer_tmp)): print (\"\\t+ File %s already exists\" %pilfer_tmp) else:", "%s %s | awk -v \\\"OFS=\\t\\\" \\'{print $1, $2, $3,", "else: cmd_bedtools = \"%s bamtobed -i %s > %s\" %(bedtools_exe,", "\"OFS=\\t\" '{print $1, $2, $3, $16, $6}' cmd_paste = \"paste", "%(samtools_exe, bam_file, sam_file) output_file.write(cmd_samtools) output_file.write(\"\\n\") try: subprocess.check_output(cmd_samtools, shell = True)", "+ '.tmp.pilfer.bed' pilfer_file = folder + '/' + split_name[0] +", "## generate samtools if (os.path.isfile(sam_file)): print (\"\\t+ File %s already", "Aligned.sortedByCoord.out.bed Aligned.sortedByCoord.out.sam | awk -v \"OFS=\\t\" '{print $1, $2, $3,", "+= 1 while True: #get next line next_line = fileHandler.readline().strip()", "#usr/bin/env python ## useful imports import time import io import", "os.path.basename(bam_file) ) bed_file = folder + '/' + split_name[0] +", "## Discard smaller if (previous_line): if (previous_line == line): line", "smaller if (previous_line): if (previous_line == line): line = previous_line", "$3, $16, $6}' cmd_paste = \"paste %s %s | awk", "'.sam' pilfer_tmp = folder + '/' + split_name[0] + '.tmp.pilfer.bed'", "argv[5] # start output_file = open(logFile, 'a') output_file.write(\"\\nConvert BAM to", "## generate paste filter tmp file if (os.path.isfile(pilfer_tmp)): print (\"\\t+", "(\"\\t+ File %s already exists\" %bed_file) else: cmd_bedtools = \"%s", "= folder + '/' + split_name[0] + '.sam' pilfer_tmp =", "output_file.write(cmd_bedtools) output_file.write(\"\\n\") try: subprocess.check_output(cmd_bedtools, shell = True) except Exception as", "output_file.write(\"\\n\") try: subprocess.check_output(cmd_samtools, shell = True) except Exception as exc:", "(next_line == line): counter += 1 else: line_split = line.split('\\t')", "line.split('\\t') output_file.write('%s\\t%s\\t%s\\t%s::PI\\t%s\\t%s\\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4])) previous_line =", "(os.path.isfile(sam_file)): print (\"\\t+ File %s already exists\" %sam_file) else: cmd_samtools", "\"paste %s %s | awk -v \\\"OFS=\\t\\\" \\'{print $1, $2,", "%s bam_file folder bedtools_bin samtools_bin logfile\\n\" %os.path.realpath(__file__)) exit() bam_file =", "split_name = os.path.splitext( os.path.basename(bam_file) ) bed_file = folder + '/'", "True) except Exception as exc: print ('***ERROR:') print (cmd_paste) print('paste", "while True: # Get next line from file line =", "bamtobed -i %s > %s\" %(bedtools_exe, bam_file, bed_file) output_file.write(cmd_bedtools) output_file.write(\"\\n\")", "\"%s view %s > %s\" %(samtools_exe, bam_file, sam_file) output_file.write(cmd_samtools) output_file.write(\"\\n\")", "= os.path.splitext( os.path.basename(bam_file) ) bed_file = folder + '/' +", "while True: #get next line next_line = fileHandler.readline().strip() if (next_line", "try: subprocess.check_output(cmd_bedtools, shell = True) except Exception as exc: print", "-i %s > %s\" %(bedtools_exe, bam_file, bed_file) output_file.write(cmd_bedtools) output_file.write(\"\\n\") try:", "line = fileHandler.readline().strip() # If line is empty then end", "exception: %s' %exc) exit() ## generate paste filter tmp file", "counter = 1 break; ## close and finish fileHandler.close() output_file.close()", "'/' + split_name[0] + '.pilfer.bed' ## START print (\"\\n+ Converting", "\\'{print $1, $2, $3, $16, $6}\\' > %s\" %(bed_file, sam_file,", "bed file with bedtools bamtobed -i bam_file if (os.path.isfile(bed_file)): print", "'{print $1, $2, $3, $16, $6}' cmd_paste = \"paste %s" ]
[ "the butter was bitter so betty bought a better butter", "a=\"<NAME>ought a butter the butter was bitter so betty bought", "bought a better butter which was not bitter\" v=[a[-1] for", "better butter which was not bitter\" v=[a[-1] for a in", "was bitter so betty bought a better butter which was", "a butter the butter was bitter so betty bought a", "so betty bought a better butter which was not bitter\"", "was not bitter\" v=[a[-1] for a in a.split() if(len(a)%2==0)] print(v)", "butter which was not bitter\" v=[a[-1] for a in a.split()", "butter was bitter so betty bought a better butter which", "butter the butter was bitter so betty bought a better", "betty bought a better butter which was not bitter\" v=[a[-1]", "bitter so betty bought a better butter which was not", "which was not bitter\" v=[a[-1] for a in a.split() if(len(a)%2==0)]", "a better butter which was not bitter\" v=[a[-1] for a" ]
[ "Point, WritePrecision class Reader: def __init__(self, host, port, token, organization,", "= \"%d/%m/%Y %H:%M:%S\" level = logging.INFO if (verbosity): level =", "based on InfluxDB This implementation does its best to follow", "self.__token = token self.__organization = organization self.__bucket = bucket self.__mutex", "(verbosity): level = logging.DEBUG logging.basicConfig(filename=filename, filemode='a', format=format, level=level, datefmt=datefmt) def", "logging.DEBUG logging.basicConfig(filename=filename, filemode='a', format=format, level=level, datefmt=datefmt) def setup(self): self.__reader =", "raw_data['tags'], \"fields\": raw_data['fields'], \"time\": raw_data['time'] } ] write_api.write(bucket, organization, data)", "organization, bucket): self.__mutex.acquire() q = persistqueue.SQLiteQueue('data', multithreading=True, auto_commit=True) self.__mutex.release() client", "= threading.Thread( target = self.__reader_job, args = (self.__url, self.__token, self.__organization,", "if (verbosity): level = logging.DEBUG logging.basicConfig(filename=filename, filemode='a', format=format, level=level, datefmt=datefmt)", "guidelines. The comments follows the Google Python Style Guide: https://github.com/google/styleguide/blob/gh-pages/pyguide.md", "token self.__organization = organization self.__bucket = bucket self.__mutex = mutex", "self.__reader = None self.__setup_logging(verbosity) def __setup_logging(self, verbosity): format = \"%(asctime)s", "mutex, verbosity): self.__url = \"http://%s:%s\" % (host, port) self.__token =", "multithreading=True, auto_commit=True) self.__mutex.release() client = InfluxDBClient(url=url, token=token) write_api = client.write_api(write_options=SYNCHRONOUS)", "token, organization, bucket): self.__mutex.acquire() q = persistqueue.SQLiteQueue('data', multithreading=True, auto_commit=True) self.__mutex.release()", "Guide: https://github.com/google/styleguide/blob/gh-pages/pyguide.md \"\"\" __copyright__ = 'Copyright 2021, FCRlab at University", "self.__bucket = bucket self.__mutex = mutex self.__reader = None self.__setup_logging(verbosity)", "\"tags\": raw_data['tags'], \"fields\": raw_data['fields'], \"time\": raw_data['time'] } ] write_api.write(bucket, organization,", "class based on InfluxDB This implementation does its best to", "= None self.__setup_logging(verbosity) def __setup_logging(self, verbosity): format = \"%(asctime)s %(filename)s:%(lineno)d", "import InfluxDBClient, Point, WritePrecision class Reader: def __init__(self, host, port,", "= 'Writer class based on InfluxDB' import time import logging", "mutex self.__reader = None self.__setup_logging(verbosity) def __setup_logging(self, verbosity): format =", "the Google Python Style Guide: https://github.com/google/styleguide/blob/gh-pages/pyguide.md \"\"\" __copyright__ = 'Copyright", "filename='log/mqtt2influx.log' datefmt = \"%d/%m/%Y %H:%M:%S\" level = logging.INFO if (verbosity):", "utf-8 -*- #!/usr/bin/env python \"\"\"Writer class based on InfluxDB This", "'Writer class based on InfluxDB' import time import logging import", "q.get() logging.debug(\"Just got new data\") logging.debug(\"Parsing data points\") data =", "got new data\") logging.debug(\"Parsing data points\") data = [ {", "bucket, mutex, verbosity): self.__url = \"http://%s:%s\" % (host, port) self.__token", "token=token) write_api = client.write_api(write_options=SYNCHRONOUS) try: while (True): raw_data = q.get()", "= \"http://%s:%s\" % (host, port) self.__token = token self.__organization =", "format=format, level=level, datefmt=datefmt) def setup(self): self.__reader = threading.Thread( target =", "= InfluxDBClient(url=url, token=token) write_api = client.write_api(write_options=SYNCHRONOUS) try: while (True): raw_data", "client.write_api(write_options=SYNCHRONOUS) try: while (True): raw_data = q.get() logging.debug(\"Just got new", "https://github.com/google/styleguide/blob/gh-pages/pyguide.md \"\"\" __copyright__ = 'Copyright 2021, FCRlab at University of", "on InfluxDB This implementation does its best to follow the", "\"http://%s:%s\" % (host, port) self.__token = token self.__organization = organization", "organization self.__bucket = bucket self.__mutex = mutex self.__reader = None", "logging.basicConfig(filename=filename, filemode='a', format=format, level=level, datefmt=datefmt) def setup(self): self.__reader = threading.Thread(", "\"%d/%m/%Y %H:%M:%S\" level = logging.INFO if (verbosity): level = logging.DEBUG", "Google Python Style Guide: https://github.com/google/styleguide/blob/gh-pages/pyguide.md \"\"\" __copyright__ = 'Copyright 2021,", "FCRlab at University of Messina' __author__ = '<NAME> <<EMAIL>>' __credits__", "%(levelname)s - %(message)s\" filename='log/mqtt2influx.log' datefmt = \"%d/%m/%Y %H:%M:%S\" level =", "Clean code guidelines. The comments follows the Google Python Style", "coding: utf-8 -*- #!/usr/bin/env python \"\"\"Writer class based on InfluxDB", "from influxdb_client import InfluxDBClient, Point, WritePrecision class Reader: def __init__(self,", "= q.get() logging.debug(\"Just got new data\") logging.debug(\"Parsing data points\") data", "This implementation does its best to follow the Robert Martin's", "self.__organization, self.__bucket) ) def __reader_job(self, url, token, organization, bucket): self.__mutex.acquire()", "\"time\": raw_data['time'] } ] write_api.write(bucket, organization, data) logging.info(\"Data into InfluxDB\")", "self.__setup_logging(verbosity) def __setup_logging(self, verbosity): format = \"%(asctime)s %(filename)s:%(lineno)d %(levelname)s -", "Robert Martin's Clean code guidelines. The comments follows the Google", "- %(message)s\" filename='log/mqtt2influx.log' datefmt = \"%d/%m/%Y %H:%M:%S\" level = logging.INFO", "__init__(self, host, port, token, organization, bucket, mutex, verbosity): self.__url =", "self.__token, self.__organization, self.__bucket) ) def __reader_job(self, url, token, organization, bucket):", "try: while (True): raw_data = q.get() logging.debug(\"Just got new data\")", "q = persistqueue.SQLiteQueue('data', multithreading=True, auto_commit=True) self.__mutex.release() client = InfluxDBClient(url=url, token=token)", "WritePrecision class Reader: def __init__(self, host, port, token, organization, bucket,", "filemode='a', format=format, level=level, datefmt=datefmt) def setup(self): self.__reader = threading.Thread( target", "= [ { \"measurement\": raw_data['measurement'], \"tags\": raw_data['tags'], \"fields\": raw_data['fields'], \"time\":", "at University of Messina' __author__ = '<NAME> <<EMAIL>>' __credits__ =", ") def __reader_job(self, url, token, organization, bucket): self.__mutex.acquire() q =", "InfluxDB This implementation does its best to follow the Robert", "# -*- coding: utf-8 -*- #!/usr/bin/env python \"\"\"Writer class based", "level = logging.DEBUG logging.basicConfig(filename=filename, filemode='a', format=format, level=level, datefmt=datefmt) def setup(self):", "The comments follows the Google Python Style Guide: https://github.com/google/styleguide/blob/gh-pages/pyguide.md \"\"\"", "best to follow the Robert Martin's Clean code guidelines. The", "format = \"%(asctime)s %(filename)s:%(lineno)d %(levelname)s - %(message)s\" filename='log/mqtt2influx.log' datefmt =", "2021, FCRlab at University of Messina' __author__ = '<NAME> <<EMAIL>>'", "data = [ { \"measurement\": raw_data['measurement'], \"tags\": raw_data['tags'], \"fields\": raw_data['fields'],", "host, port, token, organization, bucket, mutex, verbosity): self.__url = \"http://%s:%s\"", "(True): raw_data = q.get() logging.debug(\"Just got new data\") logging.debug(\"Parsing data", "token, organization, bucket, mutex, verbosity): self.__url = \"http://%s:%s\" % (host,", "\"\"\" __copyright__ = 'Copyright 2021, FCRlab at University of Messina'", "__copyright__ = 'Copyright 2021, FCRlab at University of Messina' __author__", "'Copyright 2021, FCRlab at University of Messina' __author__ = '<NAME>", "threading import persistqueue from datetime import datetime from influxdb_client.client.write_api import", "import datetime from influxdb_client.client.write_api import SYNCHRONOUS from influxdb_client import InfluxDBClient,", "= logging.DEBUG logging.basicConfig(filename=filename, filemode='a', format=format, level=level, datefmt=datefmt) def setup(self): self.__reader", "the Robert Martin's Clean code guidelines. The comments follows the", "write_api = client.write_api(write_options=SYNCHRONOUS) try: while (True): raw_data = q.get() logging.debug(\"Just", "port, token, organization, bucket, mutex, verbosity): self.__url = \"http://%s:%s\" %", "\"\"\"Writer class based on InfluxDB This implementation does its best", "InfluxDB' import time import logging import threading import persistqueue from", "logging.debug(\"Parsing data points\") data = [ { \"measurement\": raw_data['measurement'], \"tags\":", "#!/usr/bin/env python \"\"\"Writer class based on InfluxDB This implementation does", "comments follows the Google Python Style Guide: https://github.com/google/styleguide/blob/gh-pages/pyguide.md \"\"\" __copyright__", "Style Guide: https://github.com/google/styleguide/blob/gh-pages/pyguide.md \"\"\" __copyright__ = 'Copyright 2021, FCRlab at", "datetime from influxdb_client.client.write_api import SYNCHRONOUS from influxdb_client import InfluxDBClient, Point,", "organization, bucket, mutex, verbosity): self.__url = \"http://%s:%s\" % (host, port)", "threading.Thread( target = self.__reader_job, args = (self.__url, self.__token, self.__organization, self.__bucket)", "time import logging import threading import persistqueue from datetime import", "setup(self): self.__reader = threading.Thread( target = self.__reader_job, args = (self.__url,", "while (True): raw_data = q.get() logging.debug(\"Just got new data\") logging.debug(\"Parsing", "\"measurement\": raw_data['measurement'], \"tags\": raw_data['tags'], \"fields\": raw_data['fields'], \"time\": raw_data['time'] } ]", "= logging.INFO if (verbosity): level = logging.DEBUG logging.basicConfig(filename=filename, filemode='a', format=format,", "raw_data['time'] } ] write_api.write(bucket, organization, data) logging.info(\"Data into InfluxDB\") time.sleep(0.3)", "<<EMAIL>>' __credits__ = '' __description__ = 'Writer class based on", "import logging import threading import persistqueue from datetime import datetime", "import persistqueue from datetime import datetime from influxdb_client.client.write_api import SYNCHRONOUS", "% (host, port) self.__token = token self.__organization = organization self.__bucket", "def setup(self): self.__reader = threading.Thread( target = self.__reader_job, args =", "-*- #!/usr/bin/env python \"\"\"Writer class based on InfluxDB This implementation", "def __init__(self, host, port, token, organization, bucket, mutex, verbosity): self.__url", "based on InfluxDB' import time import logging import threading import", "self.__bucket) ) def __reader_job(self, url, token, organization, bucket): self.__mutex.acquire() q", "InfluxDBClient(url=url, token=token) write_api = client.write_api(write_options=SYNCHRONOUS) try: while (True): raw_data =", "influxdb_client import InfluxDBClient, Point, WritePrecision class Reader: def __init__(self, host,", "SYNCHRONOUS from influxdb_client import InfluxDBClient, Point, WritePrecision class Reader: def", "def __reader_job(self, url, token, organization, bucket): self.__mutex.acquire() q = persistqueue.SQLiteQueue('data',", "%(message)s\" filename='log/mqtt2influx.log' datefmt = \"%d/%m/%Y %H:%M:%S\" level = logging.INFO if", "auto_commit=True) self.__mutex.release() client = InfluxDBClient(url=url, token=token) write_api = client.write_api(write_options=SYNCHRONOUS) try:", "= 'Copyright 2021, FCRlab at University of Messina' __author__ =", "InfluxDBClient, Point, WritePrecision class Reader: def __init__(self, host, port, token,", "__author__ = '<NAME> <<EMAIL>>' __credits__ = '' __description__ = 'Writer", "= token self.__organization = organization self.__bucket = bucket self.__mutex =", "logging.debug(\"Just got new data\") logging.debug(\"Parsing data points\") data = [", "persistqueue from datetime import datetime from influxdb_client.client.write_api import SYNCHRONOUS from", "new data\") logging.debug(\"Parsing data points\") data = [ { \"measurement\":", "args = (self.__url, self.__token, self.__organization, self.__bucket) ) def __reader_job(self, url,", "= mutex self.__reader = None self.__setup_logging(verbosity) def __setup_logging(self, verbosity): format", "target = self.__reader_job, args = (self.__url, self.__token, self.__organization, self.__bucket) )", "'' __description__ = 'Writer class based on InfluxDB' import time", "= client.write_api(write_options=SYNCHRONOUS) try: while (True): raw_data = q.get() logging.debug(\"Just got", "self.__reader = threading.Thread( target = self.__reader_job, args = (self.__url, self.__token,", "from datetime import datetime from influxdb_client.client.write_api import SYNCHRONOUS from influxdb_client", "to follow the Robert Martin's Clean code guidelines. The comments", "bucket self.__mutex = mutex self.__reader = None self.__setup_logging(verbosity) def __setup_logging(self,", "self.__reader_job, args = (self.__url, self.__token, self.__organization, self.__bucket) ) def __reader_job(self,", "raw_data = q.get() logging.debug(\"Just got new data\") logging.debug(\"Parsing data points\")", "follows the Google Python Style Guide: https://github.com/google/styleguide/blob/gh-pages/pyguide.md \"\"\" __copyright__ =", "} ] write_api.write(bucket, organization, data) logging.info(\"Data into InfluxDB\") time.sleep(0.3) except", "__setup_logging(self, verbosity): format = \"%(asctime)s %(filename)s:%(lineno)d %(levelname)s - %(message)s\" filename='log/mqtt2influx.log'", "%H:%M:%S\" level = logging.INFO if (verbosity): level = logging.DEBUG logging.basicConfig(filename=filename,", "url, token, organization, bucket): self.__mutex.acquire() q = persistqueue.SQLiteQueue('data', multithreading=True, auto_commit=True)", "data\") logging.debug(\"Parsing data points\") data = [ { \"measurement\": raw_data['measurement'],", "%(filename)s:%(lineno)d %(levelname)s - %(message)s\" filename='log/mqtt2influx.log' datefmt = \"%d/%m/%Y %H:%M:%S\" level", "points\") data = [ { \"measurement\": raw_data['measurement'], \"tags\": raw_data['tags'], \"fields\":", "its best to follow the Robert Martin's Clean code guidelines.", "python \"\"\"Writer class based on InfluxDB This implementation does its", "= '<NAME> <<EMAIL>>' __credits__ = '' __description__ = 'Writer class", "datefmt = \"%d/%m/%Y %H:%M:%S\" level = logging.INFO if (verbosity): level", "__reader_job(self, url, token, organization, bucket): self.__mutex.acquire() q = persistqueue.SQLiteQueue('data', multithreading=True,", "None self.__setup_logging(verbosity) def __setup_logging(self, verbosity): format = \"%(asctime)s %(filename)s:%(lineno)d %(levelname)s", "(host, port) self.__token = token self.__organization = organization self.__bucket =", "Messina' __author__ = '<NAME> <<EMAIL>>' __credits__ = '' __description__ =", "(self.__url, self.__token, self.__organization, self.__bucket) ) def __reader_job(self, url, token, organization,", "does its best to follow the Robert Martin's Clean code", "self.__url = \"http://%s:%s\" % (host, port) self.__token = token self.__organization", "client = InfluxDBClient(url=url, token=token) write_api = client.write_api(write_options=SYNCHRONOUS) try: while (True):", "data points\") data = [ { \"measurement\": raw_data['measurement'], \"tags\": raw_data['tags'],", "[ { \"measurement\": raw_data['measurement'], \"tags\": raw_data['tags'], \"fields\": raw_data['fields'], \"time\": raw_data['time']", "implementation does its best to follow the Robert Martin's Clean", "-*- coding: utf-8 -*- #!/usr/bin/env python \"\"\"Writer class based on", "__description__ = 'Writer class based on InfluxDB' import time import", "level=level, datefmt=datefmt) def setup(self): self.__reader = threading.Thread( target = self.__reader_job,", "level = logging.INFO if (verbosity): level = logging.DEBUG logging.basicConfig(filename=filename, filemode='a',", "persistqueue.SQLiteQueue('data', multithreading=True, auto_commit=True) self.__mutex.release() client = InfluxDBClient(url=url, token=token) write_api =", "follow the Robert Martin's Clean code guidelines. The comments follows", "= bucket self.__mutex = mutex self.__reader = None self.__setup_logging(verbosity) def", "from influxdb_client.client.write_api import SYNCHRONOUS from influxdb_client import InfluxDBClient, Point, WritePrecision", "logging.info(\"Data into InfluxDB\") time.sleep(0.3) except KeyboardInterrupt: pass def start(self): self.__reader.start()", "class based on InfluxDB' import time import logging import threading", "= (self.__url, self.__token, self.__organization, self.__bucket) ) def __reader_job(self, url, token,", "\"%(asctime)s %(filename)s:%(lineno)d %(levelname)s - %(message)s\" filename='log/mqtt2influx.log' datefmt = \"%d/%m/%Y %H:%M:%S\"", "self.__organization = organization self.__bucket = bucket self.__mutex = mutex self.__reader", "organization, data) logging.info(\"Data into InfluxDB\") time.sleep(0.3) except KeyboardInterrupt: pass def", "= '' __description__ = 'Writer class based on InfluxDB' import", "on InfluxDB' import time import logging import threading import persistqueue", "import SYNCHRONOUS from influxdb_client import InfluxDBClient, Point, WritePrecision class Reader:", "= persistqueue.SQLiteQueue('data', multithreading=True, auto_commit=True) self.__mutex.release() client = InfluxDBClient(url=url, token=token) write_api", "__credits__ = '' __description__ = 'Writer class based on InfluxDB'", "Reader: def __init__(self, host, port, token, organization, bucket, mutex, verbosity):", "import threading import persistqueue from datetime import datetime from influxdb_client.client.write_api", "port) self.__token = token self.__organization = organization self.__bucket = bucket", "def __setup_logging(self, verbosity): format = \"%(asctime)s %(filename)s:%(lineno)d %(levelname)s - %(message)s\"", "raw_data['fields'], \"time\": raw_data['time'] } ] write_api.write(bucket, organization, data) logging.info(\"Data into", "self.__mutex = mutex self.__reader = None self.__setup_logging(verbosity) def __setup_logging(self, verbosity):", "'<NAME> <<EMAIL>>' __credits__ = '' __description__ = 'Writer class based", "University of Messina' __author__ = '<NAME> <<EMAIL>>' __credits__ = ''", "logging.INFO if (verbosity): level = logging.DEBUG logging.basicConfig(filename=filename, filemode='a', format=format, level=level,", "datetime import datetime from influxdb_client.client.write_api import SYNCHRONOUS from influxdb_client import", "datefmt=datefmt) def setup(self): self.__reader = threading.Thread( target = self.__reader_job, args", "self.__mutex.release() client = InfluxDBClient(url=url, token=token) write_api = client.write_api(write_options=SYNCHRONOUS) try: while", "] write_api.write(bucket, organization, data) logging.info(\"Data into InfluxDB\") time.sleep(0.3) except KeyboardInterrupt:", "verbosity): format = \"%(asctime)s %(filename)s:%(lineno)d %(levelname)s - %(message)s\" filename='log/mqtt2influx.log' datefmt", "logging import threading import persistqueue from datetime import datetime from", "Martin's Clean code guidelines. The comments follows the Google Python", "= \"%(asctime)s %(filename)s:%(lineno)d %(levelname)s - %(message)s\" filename='log/mqtt2influx.log' datefmt = \"%d/%m/%Y", "raw_data['measurement'], \"tags\": raw_data['tags'], \"fields\": raw_data['fields'], \"time\": raw_data['time'] } ] write_api.write(bucket,", "{ \"measurement\": raw_data['measurement'], \"tags\": raw_data['tags'], \"fields\": raw_data['fields'], \"time\": raw_data['time'] }", "\"fields\": raw_data['fields'], \"time\": raw_data['time'] } ] write_api.write(bucket, organization, data) logging.info(\"Data", "code guidelines. The comments follows the Google Python Style Guide:", "bucket): self.__mutex.acquire() q = persistqueue.SQLiteQueue('data', multithreading=True, auto_commit=True) self.__mutex.release() client =", "Python Style Guide: https://github.com/google/styleguide/blob/gh-pages/pyguide.md \"\"\" __copyright__ = 'Copyright 2021, FCRlab", "import time import logging import threading import persistqueue from datetime", "verbosity): self.__url = \"http://%s:%s\" % (host, port) self.__token = token", "write_api.write(bucket, organization, data) logging.info(\"Data into InfluxDB\") time.sleep(0.3) except KeyboardInterrupt: pass", "of Messina' __author__ = '<NAME> <<EMAIL>>' __credits__ = '' __description__", "= self.__reader_job, args = (self.__url, self.__token, self.__organization, self.__bucket) ) def", "= organization self.__bucket = bucket self.__mutex = mutex self.__reader =", "influxdb_client.client.write_api import SYNCHRONOUS from influxdb_client import InfluxDBClient, Point, WritePrecision class", "self.__mutex.acquire() q = persistqueue.SQLiteQueue('data', multithreading=True, auto_commit=True) self.__mutex.release() client = InfluxDBClient(url=url,", "data) logging.info(\"Data into InfluxDB\") time.sleep(0.3) except KeyboardInterrupt: pass def start(self):", "class Reader: def __init__(self, host, port, token, organization, bucket, mutex,", "<reponame>lcarnevale/proxy-mqtt2influx<filename>app/reader.py<gh_stars>0 # -*- coding: utf-8 -*- #!/usr/bin/env python \"\"\"Writer class" ]
[ "{ENV['m']}x{ENV['n']}-grid is tilable. If you can not believe a tiling", "'check_my_tiling'.\")) if ENV['my_conjecture'] == \"no\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-not-tilable\", f\"You are perfecty", "you want to exhibit us a tiling for this grid", "('ISATTY',bool), ] ENV =Env(problem, service, args_list) TAc =TALcolors(ENV) LANG=Lang(ENV, TAc,", "if (ENV['m'] * ENV['n']) % 2 == 0: if ENV['my_conjecture']", "NOT tilable.\")) if (ENV['m'] * ENV['n']) % 2 == 0:", "{ENV['m']}x{ENV['n']}-grid is NOT tilable. If you are not convinced you", "START CODING YOUR SERVICE: assert ENV['h']==1 assert ENV['k']==2 print() if", "exit, argv from random import randrange #from TALinputs import TALinput", "('n',int), ('my_conjecture',str), ('h',int), ('k',int), ('lang',str), ('ISATTY',bool), ] ENV =Env(problem, service,", "* ENV['n']) % 2 == 1: if ENV['my_conjecture'] == \"yes\":", "a tiling for this grid you can submit it to", "the {ENV['m']}x{ENV['n']}-grid is NOT tilable.\")) if (ENV['m'] * ENV['n']) %", "to what you have asserted, the {ENV['m']}x{ENV['n']}-grid is NOT tilable.", "If you can not believe a tiling of the {ENV['m']}x{ENV['n']}-grid", "ENV['k']==2 print() if (ENV['m'] * ENV['n']) % 2 == 1:", "eval(f\"f'{fstring}'\")) TAc.print(LANG.opening_msg, \"green\") # START CODING YOUR SERVICE: assert ENV['h']==1", "tiling of the {ENV['m']}x{ENV['n']}-grid exists try the service 'gimme_hints_on_a_tiling'.\")) exit(0)", "on the fact that the {ENV['m']}x{ENV['n']}-grid is tilable. If you", "import Env, Lang, TALcolors # METADATA OF THIS TAL_SERVICE: problem=\"tiling_mxn-boards_with_1x2-boards\"", "METADATA OF THIS TAL_SERVICE: problem=\"tiling_mxn-boards_with_1x2-boards\" service=\"is_tilable\" args_list = [ ('m',int),", "\"no\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-not-tilable\", f\"You are perfecty right: the {ENV['m']}x{ENV['n']}-grid is", "'check_my_tiling'.\")) if ENV['my_conjecture'] == \"no\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-tilable\", f\"No, the {ENV['m']}x{ENV['n']}-grid", "is NOT tilable. If you are not convinced you can", "can not believe a tiling of the {ENV['m']}x{ENV['n']}-grid exists try", "If you are not convinced you can submit a tiling", "OF THIS TAL_SERVICE: problem=\"tiling_mxn-boards_with_1x2-boards\" service=\"is_tilable\" args_list = [ ('m',int), ('n',int),", "\"green\") # START CODING YOUR SERVICE: assert ENV['h']==1 assert ENV['k']==2", "\"yes\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-not-tilable\", f\"Contrary to what you have asserted, the", "us a tiling for this grid you can submit it", "2 == 1: if ENV['my_conjecture'] == \"yes\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-not-tilable\", f\"Contrary", "== \"no\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-not-tilable\", f\"You are perfecty right: the {ENV['m']}x{ENV['n']}-grid", "ENV['my_conjecture'] == \"yes\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-not-tilable\", f\"Contrary to what you have", "Env, Lang, TALcolors # METADATA OF THIS TAL_SERVICE: problem=\"tiling_mxn-boards_with_1x2-boards\" service=\"is_tilable\"", "for this grid you can submit it to the service", "NOT tilable. If you are not convinced you can submit", "ENV['my_conjecture'] == \"yes\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-tilable\", f\"We agree on the fact", "LANG=Lang(ENV, TAc, lambda fstring: eval(f\"f'{fstring}'\")) TAc.print(LANG.opening_msg, \"green\") # START CODING", "If you want to exhibit us a tiling for this", "('h',int), ('k',int), ('lang',str), ('ISATTY',bool), ] ENV =Env(problem, service, args_list) TAc", "have asserted, the {ENV['m']}x{ENV['n']}-grid is NOT tilable. If you are", "grid you can submit it to the service 'check_my_tiling'.\")) if", "a tiling of that grid to the service 'check_my_tiling'.\")) if", "print(LANG.render_feedback(\"TRUE-is-not-tilable\", f\"You are perfecty right: the {ENV['m']}x{ENV['n']}-grid is NOT tilable.\"))", "TAc =TALcolors(ENV) LANG=Lang(ENV, TAc, lambda fstring: eval(f\"f'{fstring}'\")) TAc.print(LANG.opening_msg, \"green\") #", "that the {ENV['m']}x{ENV['n']}-grid is tilable. If you want to exhibit", "#from TALinputs import TALinput from multilanguage import Env, Lang, TALcolors", "[ ('m',int), ('n',int), ('my_conjecture',str), ('h',int), ('k',int), ('lang',str), ('ISATTY',bool), ] ENV", "tiling for this grid you can submit it to the", "print() if (ENV['m'] * ENV['n']) % 2 == 1: if", "it to the service 'check_my_tiling'.\")) if ENV['my_conjecture'] == \"no\": TAc.NO()", "f\"No, the {ENV['m']}x{ENV['n']}-grid is tilable. If you can not believe", "CODING YOUR SERVICE: assert ENV['h']==1 assert ENV['k']==2 print() if (ENV['m']", "a tiling of the {ENV['m']}x{ENV['n']}-grid exists try the service 'gimme_hints_on_a_tiling'.\"))", "random import randrange #from TALinputs import TALinput from multilanguage import", "=TALcolors(ENV) LANG=Lang(ENV, TAc, lambda fstring: eval(f\"f'{fstring}'\")) TAc.print(LANG.opening_msg, \"green\") # START", "is tilable. If you can not believe a tiling of", "(ENV['m'] * ENV['n']) % 2 == 1: if ENV['my_conjecture'] ==", "Lang, TALcolors # METADATA OF THIS TAL_SERVICE: problem=\"tiling_mxn-boards_with_1x2-boards\" service=\"is_tilable\" args_list", "submit it to the service 'check_my_tiling'.\")) if ENV['my_conjecture'] == \"no\":", "2 == 0: if ENV['my_conjecture'] == \"yes\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-tilable\", f\"We", "not convinced you can submit a tiling of that grid", "TALinput from multilanguage import Env, Lang, TALcolors # METADATA OF", "if ENV['my_conjecture'] == \"yes\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-not-tilable\", f\"Contrary to what you", "print(LANG.render_feedback(\"TRUE-is-tilable\", f\"We agree on the fact that the {ENV['m']}x{ENV['n']}-grid is", "ENV =Env(problem, service, args_list) TAc =TALcolors(ENV) LANG=Lang(ENV, TAc, lambda fstring:", "service, args_list) TAc =TALcolors(ENV) LANG=Lang(ENV, TAc, lambda fstring: eval(f\"f'{fstring}'\")) TAc.print(LANG.opening_msg,", "fact that the {ENV['m']}x{ENV['n']}-grid is tilable. If you want to", "assert ENV['k']==2 print() if (ENV['m'] * ENV['n']) % 2 ==", "from sys import stderr, exit, argv from random import randrange", "python3 from sys import stderr, exit, argv from random import", "('m',int), ('n',int), ('my_conjecture',str), ('h',int), ('k',int), ('lang',str), ('ISATTY',bool), ] ENV =Env(problem,", "ENV['my_conjecture'] == \"no\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-not-tilable\", f\"You are perfecty right: the", "service=\"is_tilable\" args_list = [ ('m',int), ('n',int), ('my_conjecture',str), ('h',int), ('k',int), ('lang',str),", "you are not convinced you can submit a tiling of", "fstring: eval(f\"f'{fstring}'\")) TAc.print(LANG.opening_msg, \"green\") # START CODING YOUR SERVICE: assert", "\"no\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-tilable\", f\"No, the {ENV['m']}x{ENV['n']}-grid is tilable. If you", "if (ENV['m'] * ENV['n']) % 2 == 1: if ENV['my_conjecture']", "TAc.NO() print(LANG.render_feedback(\"FALSE-is-not-tilable\", f\"Contrary to what you have asserted, the {ENV['m']}x{ENV['n']}-grid", "assert ENV['h']==1 assert ENV['k']==2 print() if (ENV['m'] * ENV['n']) %", "== 0: if ENV['my_conjecture'] == \"yes\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-tilable\", f\"We agree", "== \"yes\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-tilable\", f\"We agree on the fact that", "agree on the fact that the {ENV['m']}x{ENV['n']}-grid is tilable. If", "import stderr, exit, argv from random import randrange #from TALinputs", "import randrange #from TALinputs import TALinput from multilanguage import Env,", "args_list = [ ('m',int), ('n',int), ('my_conjecture',str), ('h',int), ('k',int), ('lang',str), ('ISATTY',bool),", "multilanguage import Env, Lang, TALcolors # METADATA OF THIS TAL_SERVICE:", "f\"You are perfecty right: the {ENV['m']}x{ENV['n']}-grid is NOT tilable.\")) if", "you have asserted, the {ENV['m']}x{ENV['n']}-grid is NOT tilable. If you", "('k',int), ('lang',str), ('ISATTY',bool), ] ENV =Env(problem, service, args_list) TAc =TALcolors(ENV)", "tilable. If you want to exhibit us a tiling for", "ENV['my_conjecture'] == \"no\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-tilable\", f\"No, the {ENV['m']}x{ENV['n']}-grid is tilable.", "THIS TAL_SERVICE: problem=\"tiling_mxn-boards_with_1x2-boards\" service=\"is_tilable\" args_list = [ ('m',int), ('n',int), ('my_conjecture',str),", "tilable. If you can not believe a tiling of the", "] ENV =Env(problem, service, args_list) TAc =TALcolors(ENV) LANG=Lang(ENV, TAc, lambda", "the fact that the {ENV['m']}x{ENV['n']}-grid is tilable. If you want", "# METADATA OF THIS TAL_SERVICE: problem=\"tiling_mxn-boards_with_1x2-boards\" service=\"is_tilable\" args_list = [", "service 'check_my_tiling'.\")) if ENV['my_conjecture'] == \"no\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-tilable\", f\"No, the", "are not convinced you can submit a tiling of that", "ENV['n']) % 2 == 0: if ENV['my_conjecture'] == \"yes\": TAc.OK()", "= [ ('m',int), ('n',int), ('my_conjecture',str), ('h',int), ('k',int), ('lang',str), ('ISATTY',bool), ]", "ENV['n']) % 2 == 1: if ENV['my_conjecture'] == \"yes\": TAc.NO()", "if ENV['my_conjecture'] == \"no\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-not-tilable\", f\"You are perfecty right:", "can submit it to the service 'check_my_tiling'.\")) if ENV['my_conjecture'] ==", "if ENV['my_conjecture'] == \"no\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-tilable\", f\"No, the {ENV['m']}x{ENV['n']}-grid is", "asserted, the {ENV['m']}x{ENV['n']}-grid is NOT tilable. If you are not", "\"yes\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-tilable\", f\"We agree on the fact that the", "that grid to the service 'check_my_tiling'.\")) if ENV['my_conjecture'] == \"no\":", "TAc, lambda fstring: eval(f\"f'{fstring}'\")) TAc.print(LANG.opening_msg, \"green\") # START CODING YOUR", "service 'check_my_tiling'.\")) if ENV['my_conjecture'] == \"no\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-not-tilable\", f\"You are", "exhibit us a tiling for this grid you can submit", "sys import stderr, exit, argv from random import randrange #from", "argv from random import randrange #from TALinputs import TALinput from", "% 2 == 1: if ENV['my_conjecture'] == \"yes\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-not-tilable\",", "(ENV['m'] * ENV['n']) % 2 == 0: if ENV['my_conjecture'] ==", "f\"We agree on the fact that the {ENV['m']}x{ENV['n']}-grid is tilable.", "grid to the service 'check_my_tiling'.\")) if ENV['my_conjecture'] == \"no\": TAc.OK()", "is NOT tilable.\")) if (ENV['m'] * ENV['n']) % 2 ==", "is tilable. If you want to exhibit us a tiling", "can submit a tiling of that grid to the service", "import TALinput from multilanguage import Env, Lang, TALcolors # METADATA", "== \"yes\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-not-tilable\", f\"Contrary to what you have asserted,", "you can not believe a tiling of the {ENV['m']}x{ENV['n']}-grid exists", "to exhibit us a tiling for this grid you can", "SERVICE: assert ENV['h']==1 assert ENV['k']==2 print() if (ENV['m'] * ENV['n'])", "f\"Contrary to what you have asserted, the {ENV['m']}x{ENV['n']}-grid is NOT", "from random import randrange #from TALinputs import TALinput from multilanguage", "you can submit a tiling of that grid to the", "stderr, exit, argv from random import randrange #from TALinputs import", "ENV['h']==1 assert ENV['k']==2 print() if (ENV['m'] * ENV['n']) % 2", "what you have asserted, the {ENV['m']}x{ENV['n']}-grid is NOT tilable. If", "want to exhibit us a tiling for this grid you", "this grid you can submit it to the service 'check_my_tiling'.\"))", "tilable. If you are not convinced you can submit a", "the {ENV['m']}x{ENV['n']}-grid is tilable. If you want to exhibit us", "TAc.print(LANG.opening_msg, \"green\") # START CODING YOUR SERVICE: assert ENV['h']==1 assert", "== 1: if ENV['my_conjecture'] == \"yes\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-not-tilable\", f\"Contrary to", "* ENV['n']) % 2 == 0: if ENV['my_conjecture'] == \"yes\":", "if ENV['my_conjecture'] == \"yes\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-tilable\", f\"We agree on the", "the service 'check_my_tiling'.\")) if ENV['my_conjecture'] == \"no\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-tilable\", f\"No,", "1: if ENV['my_conjecture'] == \"yes\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-not-tilable\", f\"Contrary to what", "you can submit it to the service 'check_my_tiling'.\")) if ENV['my_conjecture']", "problem=\"tiling_mxn-boards_with_1x2-boards\" service=\"is_tilable\" args_list = [ ('m',int), ('n',int), ('my_conjecture',str), ('h',int), ('k',int),", "# START CODING YOUR SERVICE: assert ENV['h']==1 assert ENV['k']==2 print()", "convinced you can submit a tiling of that grid to", "tiling of that grid to the service 'check_my_tiling'.\")) if ENV['my_conjecture']", "lambda fstring: eval(f\"f'{fstring}'\")) TAc.print(LANG.opening_msg, \"green\") # START CODING YOUR SERVICE:", "{ENV['m']}x{ENV['n']}-grid is tilable. If you want to exhibit us a", "TAc.NO() print(LANG.render_feedback(\"FALSE-is-tilable\", f\"No, the {ENV['m']}x{ENV['n']}-grid is tilable. If you can", "the {ENV['m']}x{ENV['n']}-grid is tilable. If you can not believe a", "right: the {ENV['m']}x{ENV['n']}-grid is NOT tilable.\")) if (ENV['m'] * ENV['n'])", "{ENV['m']}x{ENV['n']}-grid is NOT tilable.\")) if (ENV['m'] * ENV['n']) % 2", "perfecty right: the {ENV['m']}x{ENV['n']}-grid is NOT tilable.\")) if (ENV['m'] *", "TAL_SERVICE: problem=\"tiling_mxn-boards_with_1x2-boards\" service=\"is_tilable\" args_list = [ ('m',int), ('n',int), ('my_conjecture',str), ('h',int),", "('my_conjecture',str), ('h',int), ('k',int), ('lang',str), ('ISATTY',bool), ] ENV =Env(problem, service, args_list)", "args_list) TAc =TALcolors(ENV) LANG=Lang(ENV, TAc, lambda fstring: eval(f\"f'{fstring}'\")) TAc.print(LANG.opening_msg, \"green\")", "TAc.OK() print(LANG.render_feedback(\"TRUE-is-not-tilable\", f\"You are perfecty right: the {ENV['m']}x{ENV['n']}-grid is NOT", "TALcolors # METADATA OF THIS TAL_SERVICE: problem=\"tiling_mxn-boards_with_1x2-boards\" service=\"is_tilable\" args_list =", "from multilanguage import Env, Lang, TALcolors # METADATA OF THIS", "submit a tiling of that grid to the service 'check_my_tiling'.\"))", "TAc.OK() print(LANG.render_feedback(\"TRUE-is-tilable\", f\"We agree on the fact that the {ENV['m']}x{ENV['n']}-grid", "#!/usr/bin/env python3 from sys import stderr, exit, argv from random", "randrange #from TALinputs import TALinput from multilanguage import Env, Lang,", "tilable.\")) if (ENV['m'] * ENV['n']) % 2 == 0: if", "% 2 == 0: if ENV['my_conjecture'] == \"yes\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-tilable\",", "believe a tiling of the {ENV['m']}x{ENV['n']}-grid exists try the service", "=Env(problem, service, args_list) TAc =TALcolors(ENV) LANG=Lang(ENV, TAc, lambda fstring: eval(f\"f'{fstring}'\"))", "print(LANG.render_feedback(\"FALSE-is-tilable\", f\"No, the {ENV['m']}x{ENV['n']}-grid is tilable. If you can not", "to the service 'check_my_tiling'.\")) if ENV['my_conjecture'] == \"no\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-not-tilable\",", "the service 'check_my_tiling'.\")) if ENV['my_conjecture'] == \"no\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-not-tilable\", f\"You", "are perfecty right: the {ENV['m']}x{ENV['n']}-grid is NOT tilable.\")) if (ENV['m']", "TALinputs import TALinput from multilanguage import Env, Lang, TALcolors #", "of that grid to the service 'check_my_tiling'.\")) if ENV['my_conjecture'] ==", "0: if ENV['my_conjecture'] == \"yes\": TAc.OK() print(LANG.render_feedback(\"TRUE-is-tilable\", f\"We agree on", "('lang',str), ('ISATTY',bool), ] ENV =Env(problem, service, args_list) TAc =TALcolors(ENV) LANG=Lang(ENV,", "YOUR SERVICE: assert ENV['h']==1 assert ENV['k']==2 print() if (ENV['m'] *", "== \"no\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-tilable\", f\"No, the {ENV['m']}x{ENV['n']}-grid is tilable. If", "the {ENV['m']}x{ENV['n']}-grid is NOT tilable. If you are not convinced", "print(LANG.render_feedback(\"FALSE-is-not-tilable\", f\"Contrary to what you have asserted, the {ENV['m']}x{ENV['n']}-grid is", "to the service 'check_my_tiling'.\")) if ENV['my_conjecture'] == \"no\": TAc.NO() print(LANG.render_feedback(\"FALSE-is-tilable\",", "not believe a tiling of the {ENV['m']}x{ENV['n']}-grid exists try the" ]
[ "be skipped') def test_ssh_pod(): p = set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000'])", "jina.peapods.pods import BasePod from jina.peapods.runtimes.remote.ssh import SSHRuntime from jina.proto import", "pp.status is None @pytest.mark.skip('not implemented yet') def test_ssh_mutable_pod(): p =", "'5000']) with SSHRuntime(p, kind='pod') as pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY", "is None @pytest.mark.skip('not implemented yet') def test_ssh_mutable_pod(): p = set_pod_parser().parse_args(['--host',", "assert pp.status is None @pytest.mark.skip('not implemented yet') def test_flow(): f", "test_ssh_pea(): p = set_pea_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) with SSHRuntime(p, kind='pea')", "has to be skipped') def test_ssh_pod(): p = set_pod_parser().parse_args(['--host', 'pi@172.16.1.110',", "Flow from jina.parser import set_pea_parser, set_pod_parser from jina.peapods.pods import BasePod", "kind='pod') as pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert pp.status is", "I find out how to mock ssh, this has to", "test_ssh_mutable_pod(): p = set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) p = BasePod(p)", "from jina.flow import Flow from jina.parser import set_pea_parser, set_pod_parser from", "find out how to mock ssh, this has to be", "'--timeout', '5000']) with SSHRuntime(p, kind='pea') as pp: assert pp.status.envelope.status.code ==", "jina_pb2 @pytest.mark.skip('works locally, but until I findout how to mock", "be skipped') def test_ssh_pea(): p = set_pea_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000'])", "pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert pp.status is None @pytest.mark.skip('works", "'pi@172.16.1.110', '--timeout', '5000']) with SSHRuntime(p, kind='pod') as pp: assert pp.status.envelope.status.code", "assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert pp.status is None @pytest.mark.skip('works locally,", "== jina_pb2.StatusProto.READY assert pp.status is None @pytest.mark.skip('not implemented yet') def", "to mock ssh, this has to be skipped') def test_ssh_pod():", "yet') def test_ssh_mutable_pod(): p = set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) p", "from jina.proto import jina_pb2 @pytest.mark.skip('works locally, but until I findout", "import SSHRuntime from jina.proto import jina_pb2 @pytest.mark.skip('works locally, but until", "to mock ssh, this has to be skipped') def test_ssh_pea():", "mock ssh, this has to be skipped') def test_ssh_pod(): p", "implemented yet') def test_ssh_mutable_pod(): p = set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000'])", "pp.status is None @pytest.mark.skip('not implemented yet') def test_flow(): f =", "pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert pp.status is None @pytest.mark.skip('not", "== jina_pb2.StatusProto.READY assert pp.status is None @pytest.mark.skip('works locally, but until", "def test_ssh_mutable_pod(): p = set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) p =", "import pytest from jina.enums import RemoteAccessType from jina.flow import Flow", "with SSHRuntime(p, kind='pea') as pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert", "set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) with SSHRuntime(p, kind='pod') as pp: assert", "set_pea_parser, set_pod_parser from jina.peapods.pods import BasePod from jina.peapods.runtimes.remote.ssh import SSHRuntime", "BasePod(p) with SSHRuntime(p, kind='pod') as pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY", "set_pea_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) with SSHRuntime(p, kind='pea') as pp: assert", "from jina.parser import set_pea_parser, set_pod_parser from jina.peapods.pods import BasePod from", "@pytest.mark.skip('not implemented yet') def test_ssh_mutable_pod(): p = set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout',", "jina.parser import set_pea_parser, set_pod_parser from jina.peapods.pods import BasePod from jina.peapods.runtimes.remote.ssh", "'--timeout', '5000']) with SSHRuntime(p, kind='pod') as pp: assert pp.status.envelope.status.code ==", "to be skipped') def test_ssh_pea(): p = set_pea_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout',", "mock ssh, this has to be skipped') def test_ssh_pea(): p", "as pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert pp.status is None", "is None @pytest.mark.skip('works locally, but until I find out how", "findout how to mock ssh, this has to be skipped')", "but until I find out how to mock ssh, this", "assert pp.status is None @pytest.mark.skip('works locally, but until I find", "def test_ssh_pod(): p = set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) with SSHRuntime(p,", "locally, but until I find out how to mock ssh,", "p = set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) p = BasePod(p) with", "until I findout how to mock ssh, this has to", "'--timeout', '5000']) p = BasePod(p) with SSHRuntime(p, kind='pod') as pp:", "SSHRuntime(p, kind='pod') as pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert pp.status", "'5000']) with SSHRuntime(p, kind='pea') as pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY", "import Flow from jina.parser import set_pea_parser, set_pod_parser from jina.peapods.pods import", "from jina.peapods.runtimes.remote.ssh import SSHRuntime from jina.proto import jina_pb2 @pytest.mark.skip('works locally,", "'pi@172.16.1.110', '--timeout', '5000']) with SSHRuntime(p, kind='pea') as pp: assert pp.status.envelope.status.code", "implemented yet') def test_flow(): f = Flow().add().add(host='pi@172.16.1.110', remote_access=RemoteAccessType.SSH) with f:", "jina.proto import jina_pb2 @pytest.mark.skip('works locally, but until I findout how", "locally, but until I findout how to mock ssh, this", "@pytest.mark.skip('works locally, but until I findout how to mock ssh,", "ssh, this has to be skipped') def test_ssh_pod(): p =", "jina_pb2.StatusProto.READY assert pp.status is None @pytest.mark.skip('not implemented yet') def test_flow():", "yet') def test_flow(): f = Flow().add().add(host='pi@172.16.1.110', remote_access=RemoteAccessType.SSH) with f: pass", "from jina.enums import RemoteAccessType from jina.flow import Flow from jina.parser", "test_ssh_pod(): p = set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) with SSHRuntime(p, kind='pod')", "p = BasePod(p) with SSHRuntime(p, kind='pod') as pp: assert pp.status.envelope.status.code", "set_pod_parser from jina.peapods.pods import BasePod from jina.peapods.runtimes.remote.ssh import SSHRuntime from", "jina_pb2.StatusProto.READY assert pp.status is None @pytest.mark.skip('works locally, but until I", "with SSHRuntime(p, kind='pod') as pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert", "pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert pp.status is None @pytest.mark.skip('works locally, but", "SSHRuntime(p, kind='pea') as pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert pp.status", "'pi@172.16.1.110', '--timeout', '5000']) p = BasePod(p) with SSHRuntime(p, kind='pod') as", "I findout how to mock ssh, this has to be", "jina.peapods.runtimes.remote.ssh import SSHRuntime from jina.proto import jina_pb2 @pytest.mark.skip('works locally, but", "is None @pytest.mark.skip('not implemented yet') def test_flow(): f = Flow().add().add(host='pi@172.16.1.110',", "this has to be skipped') def test_ssh_pea(): p = set_pea_parser().parse_args(['--host',", "import RemoteAccessType from jina.flow import Flow from jina.parser import set_pea_parser,", "how to mock ssh, this has to be skipped') def", "assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert pp.status is None @pytest.mark.skip('not implemented", "@pytest.mark.skip('not implemented yet') def test_flow(): f = Flow().add().add(host='pi@172.16.1.110', remote_access=RemoteAccessType.SSH) with", "import BasePod from jina.peapods.runtimes.remote.ssh import SSHRuntime from jina.proto import jina_pb2", "BasePod from jina.peapods.runtimes.remote.ssh import SSHRuntime from jina.proto import jina_pb2 @pytest.mark.skip('works", "'5000']) p = BasePod(p) with SSHRuntime(p, kind='pod') as pp: assert", "import set_pea_parser, set_pod_parser from jina.peapods.pods import BasePod from jina.peapods.runtimes.remote.ssh import", "p = set_pea_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) with SSHRuntime(p, kind='pea') as", "ssh, this has to be skipped') def test_ssh_pea(): p =", "assert pp.status is None @pytest.mark.skip('not implemented yet') def test_ssh_mutable_pod(): p", "= set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) p = BasePod(p) with SSHRuntime(p,", "def test_ssh_pea(): p = set_pea_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) with SSHRuntime(p,", "from jina.peapods.pods import BasePod from jina.peapods.runtimes.remote.ssh import SSHRuntime from jina.proto", "to be skipped') def test_ssh_pod(): p = set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout',", "jina.flow import Flow from jina.parser import set_pea_parser, set_pod_parser from jina.peapods.pods", "jina_pb2.StatusProto.READY assert pp.status is None @pytest.mark.skip('not implemented yet') def test_ssh_mutable_pod():", "= set_pea_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) with SSHRuntime(p, kind='pea') as pp:", "until I find out how to mock ssh, this has", "None @pytest.mark.skip('not implemented yet') def test_ssh_mutable_pod(): p = set_pod_parser().parse_args(['--host', 'pi@172.16.1.110',", "out how to mock ssh, this has to be skipped')", "SSHRuntime from jina.proto import jina_pb2 @pytest.mark.skip('works locally, but until I", "skipped') def test_ssh_pea(): p = set_pea_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) with", "@pytest.mark.skip('works locally, but until I find out how to mock", "pytest from jina.enums import RemoteAccessType from jina.flow import Flow from", "p = set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) with SSHRuntime(p, kind='pod') as", "= BasePod(p) with SSHRuntime(p, kind='pod') as pp: assert pp.status.envelope.status.code ==", "set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) p = BasePod(p) with SSHRuntime(p, kind='pod')", "has to be skipped') def test_ssh_pea(): p = set_pea_parser().parse_args(['--host', 'pi@172.16.1.110',", "this has to be skipped') def test_ssh_pod(): p = set_pod_parser().parse_args(['--host',", "kind='pea') as pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert pp.status is", "skipped') def test_ssh_pod(): p = set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) with", "= set_pod_parser().parse_args(['--host', 'pi@172.16.1.110', '--timeout', '5000']) with SSHRuntime(p, kind='pod') as pp:", "None @pytest.mark.skip('works locally, but until I find out how to", "None @pytest.mark.skip('not implemented yet') def test_flow(): f = Flow().add().add(host='pi@172.16.1.110', remote_access=RemoteAccessType.SSH)", "RemoteAccessType from jina.flow import Flow from jina.parser import set_pea_parser, set_pod_parser", "but until I findout how to mock ssh, this has", "jina.enums import RemoteAccessType from jina.flow import Flow from jina.parser import", "pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert pp.status is None @pytest.mark.skip('not implemented yet')", "pp.status is None @pytest.mark.skip('works locally, but until I find out", "import jina_pb2 @pytest.mark.skip('works locally, but until I findout how to" ]
[ "@dataclass class PayloadSender: phone: int name: str @dataclass class PayloadBaseModel:", "PayloadSender: phone: int name: str @dataclass class PayloadBaseModel: sender: PayloadSender", "dataclass @dataclass class PayloadSender: phone: int name: str @dataclass class", "dataclasses import dataclass @dataclass class PayloadSender: phone: int name: str", "from dataclasses import dataclass @dataclass class PayloadSender: phone: int name:", "phone: int name: str @dataclass class PayloadBaseModel: sender: PayloadSender payload_id:", "int name: str @dataclass class PayloadBaseModel: sender: PayloadSender payload_id: str", "import dataclass @dataclass class PayloadSender: phone: int name: str @dataclass", "class PayloadSender: phone: int name: str @dataclass class PayloadBaseModel: sender:" ]
[ "closingIndex = closingParams.find(c) if openingIndex is not closingIndex: return False", "[] for c in expr: if c in openingParams: stack.append(c)", "if openingIndex is not closingIndex: return False if len(stack) ==", "in expr: if c in openingParams: stack.append(c) elif c in", "openingIndex = openingParams.find(topOfStack) closingIndex = closingParams.find(c) if openingIndex is not", "stack.append(c) elif c in closingParams: topOfStack = stack.pop() openingIndex =", "0: return True return False if __name__ =='__main__': print main('{(abc})')", "main(expr): openingParams = '({[' closingParams = ')}]' stack = []", "len(stack) == 0: return True return False if __name__ =='__main__':", "closingParams.find(c) if openingIndex is not closingIndex: return False if len(stack)", "openingParams = '({[' closingParams = ')}]' stack = [] for", "expr: if c in openingParams: stack.append(c) elif c in closingParams:", "'({[' closingParams = ')}]' stack = [] for c in", "openingParams: stack.append(c) elif c in closingParams: topOfStack = stack.pop() openingIndex", "for c in expr: if c in openingParams: stack.append(c) elif", "closingIndex: return False if len(stack) == 0: return True return", "= openingParams.find(topOfStack) closingIndex = closingParams.find(c) if openingIndex is not closingIndex:", "in closingParams: topOfStack = stack.pop() openingIndex = openingParams.find(topOfStack) closingIndex =", "closingParams = ')}]' stack = [] for c in expr:", "if c in openingParams: stack.append(c) elif c in closingParams: topOfStack", "stack = [] for c in expr: if c in", "topOfStack = stack.pop() openingIndex = openingParams.find(topOfStack) closingIndex = closingParams.find(c) if", "')}]' stack = [] for c in expr: if c", "c in expr: if c in openingParams: stack.append(c) elif c", "= stack.pop() openingIndex = openingParams.find(topOfStack) closingIndex = closingParams.find(c) if openingIndex", "openingParams.find(topOfStack) closingIndex = closingParams.find(c) if openingIndex is not closingIndex: return", "stack.pop() openingIndex = openingParams.find(topOfStack) closingIndex = closingParams.find(c) if openingIndex is", "if len(stack) == 0: return True return False if __name__", "not closingIndex: return False if len(stack) == 0: return True", "False if len(stack) == 0: return True return False if", "return False if len(stack) == 0: return True return False", "closingParams: topOfStack = stack.pop() openingIndex = openingParams.find(topOfStack) closingIndex = closingParams.find(c)", "= '({[' closingParams = ')}]' stack = [] for c", "== 0: return True return False if __name__ =='__main__': print", "openingIndex is not closingIndex: return False if len(stack) == 0:", "in openingParams: stack.append(c) elif c in closingParams: topOfStack = stack.pop()", "c in openingParams: stack.append(c) elif c in closingParams: topOfStack =", "= ')}]' stack = [] for c in expr: if", "= closingParams.find(c) if openingIndex is not closingIndex: return False if", "= [] for c in expr: if c in openingParams:", "elif c in closingParams: topOfStack = stack.pop() openingIndex = openingParams.find(topOfStack)", "c in closingParams: topOfStack = stack.pop() openingIndex = openingParams.find(topOfStack) closingIndex", "is not closingIndex: return False if len(stack) == 0: return", "def main(expr): openingParams = '({[' closingParams = ')}]' stack =" ]
[ "each bin \" \"to 10x Genomics datasets without realigning\", },", "int elif arg_type == \"float\": arg_type = float elif arg_type", "pipelines = [ GenomeIndexPipeline(config), MappableRegionsPipeline(config), BinsPipeline(config), ] return CompositePipeline(config, pipelines)", "== \"string\": arg_type = str elif arg_type == \"integer\": arg_type", "SGAINS_COMMANDS = { \"genomeindex\": { \"config_groups\": [\"aligner\", \"genome\"], \"help\": \"builds", "is not None, command executor = Executor(config) executor.run_pipeline(pipeline) if __name__", "\"genome\", \"mappable_regions\", \"bins\", \"sge\"], \"help\": \"combines all preparation steps ('genome',", "command in SGAINS_COMMANDS command = SGAINS_COMMANDS[command] config_groups = command[\"config_groups\"] for", ") parser.add_argument( \"--parallel\", \"-p\", dest=\"parallel\", help=\"number of task to run", "results directory\", default=False ) parser.add_argument( \"--parallel\", \"-p\", dest=\"parallel\", help=\"number of", "arg_value is not None: group_result[arg_name] = arg_value else: config_value =", "\"mapping\", \"sge\"], \"help\": \"performs mapping of cells reads to the", "\"config_groups\": [ \"data_10x\", \"reads\", \"sge\"], \"help\": \"extracts cells reads from", "SCGV input data\" }, \"process\": { \"config_groups\": [ \"aligner\", \"genome\",", "elif command == \"mapping\": return MappingPipeline(config) elif command == \"varbin\":", "return BinsPipeline(config) elif command == \"mapping\": return MappingPipeline(config) elif command", "all process steps ('mapping', 'varbin' \" \"and 'scclust') into single", "= config.config.get(group_name) if group is None: return None result =", "meta_data is not None: help_data = meta_data.get(\"help\") arg_default = _get_config_value(config,", "the reference genome\", }, \"extract_10x\": { \"config_groups\": [ \"data_10x\", \"reads\",", "to the reference genome\", }, \"extract_10x\": { \"config_groups\": [ \"data_10x\",", "None meta_data = arg_spec.get(\"meta\") if meta_data is not None: help_data", "config_value if group_result: result[group_name] = group_result config = Config.from_dict(result, work_dirname)", "arg_value = getattr(args, arg_name, None) if arg_value is not None:", "collections import defaultdict import yaml from argparse import ArgumentParser,\\ RawDescriptionHelpFormatter,", "else: raise ValueError(f\"wrong argument type {arg_type}\") help_data = None meta_data", "command == \"mappable_regions\": return MappableRegionsPipeline(config) elif command == \"bins\": return", "and not sge_flag: continue group = validator.schema.get(group_name) group_parser = argparser.add_argument_group(f\"{group_name}", "bowtie index for the \" \"reference genome\", }, \"mappable_regions\": {", "import yaml from argparse import ArgumentParser,\\ RawDescriptionHelpFormatter, ArgumentDefaultsHelpFormatter from sgains.configuration.parser", "subcommands\" ) for command in SGAINS_COMMANDS: command_name = command.replace(\"_\", \"-\")", "single command\" }, } def build_common_options(parser): parser.add_argument( \"-v\", \"--verbose\", dest=\"verbose\",", "}, \"bins\": { \"config_groups\": [\"genome\", \"mappable_regions\", \"bins\", \"sge\"], \"help\": \"calculates", "\"mapping\": { \"config_groups\": [\"aligner\", \"genome\", \"reads\", \"mapping\", \"sge\"], \"help\": \"performs", "algorithm to count read mappings in each bin\", }, \"varbin_10x\":", "= float elif arg_type == \"list\": arg_type = list else:", "[\"aligner\", \"genome\", \"reads\", \"mapping\", \"sge\"], \"help\": \"performs mapping of cells", "reads to the reference genome\", }, \"extract_10x\": { \"config_groups\": [", "open(args.config, \"r\") as infile: config_dict = yaml.safe_load(infile) work_dirname = os.path.dirname(args.config)", "validator.schema.get(group_name) group_schema = group.get(\"schema\") if group_schema is None: continue group_result", "not None: help_data = meta_data.get(\"help\") arg_default = _get_config_value(config, group_name, arg_name)", "arg_name, None) if arg_value is not None: group_result[arg_name] = arg_value", "sys.stderr.write(indent + \" for help use --help\") sys.stderr.write('\\n') return 2", "\"extract_10x\": return Extract10xPipeline(config) elif command == \"varbin_10x\": return Varbin10xPipeline(config) elif", "deepcopy(sgains_schema), work_dirname=work_dirname) if command is None: config_groups = list(validator.schema.keys()) else:", "config.verbose = args.verbose config.config_file = args.config config.dry_run = args.dry_run config.force", "def build_common_options(parser): parser.add_argument( \"-v\", \"--verbose\", dest=\"verbose\", action=\"count\", help=\"set verbosity level", "\"scclust\"], \"help\": \"segmentation and clustering based bin counts and \"", "\" \"and 'bins') into single command\", }, \"mapping\": { \"config_groups\":", "from sgains.pipelines.bins_pipeline import BinsPipeline from sgains.pipelines.mapping_pipeline import MappingPipeline from sgains.pipelines.extract_10x_pipeline", "group_parser.add_argument( name, help=help_data, dest=arg_name, type=arg_type, default=arg_default) return argparser def parse_cli_options(args):", "== \"scclust\": return Rpipeline(config) elif command == \"extract_10x\": return Extract10xPipeline(config)", "deepcopy(sgains_schema), work_dirname=work_dirname) result = defaultdict(dict) config_groups = list(validator.schema.keys()) for group_name", "import SgainsValidator, Config from sgains.configuration.schema import sgains_schema from sgains.executor import", "is not None: work_dirname = config.work_dirname validator = SgainsValidator( deepcopy(sgains_schema),", "== \"list\": arg_type = list else: raise ValueError(f\"wrong argument type", "[ \"aligner\", \"genome\", \"reads\", \"mapping\", \"bins\", \"varbin\", \"scclust\", \"sge\"], \"help\":", "== \"integer\": arg_type = int elif arg_type == \"float\": arg_type", "return argparser def parse_cli_options(args): config_dict = defaultdict(dict) work_dirname = os.getcwd()", "counts and \" \"preparation of the SCGV input data\" },", "run with no changes made\", default=False ) parser.add_argument( \"--force\", \"-F\",", "\"config_groups\": [\"aligner\", \"genome\"], \"help\": \"builds appropriate hisat2 or bowtie index", "dest=\"verbose\", action=\"count\", help=\"set verbosity level [default: %(default)s]\", default=0 ) parser.add_argument(", "\"and read length\", }, \"prepare\": { \"config_groups\": [ \"aligner\", \"genome\",", "group_schema.items(): name = f\"--{arg_name.replace('_', '-')}\" arg_type = str arg_type =", "os.getcwd() if config is not None: work_dirname = config.work_dirname validator", "\"data_10x\", \"bins\", \"varbin\", \"sge\"], \"help\": \"applies varbin algorithm to count", "reads from 10x Genomics datasets\", }, \"varbin\": { \"config_groups\": [\"bins\",", "BinsPipeline(config), ] return CompositePipeline(config, pipelines) elif command == \"process\": pipelines", "[\"aligner\", \"genome\"], \"help\": \"builds appropriate hisat2 or bowtie index for", "MappingPipeline(config) elif command == \"varbin\": return VarbinPipeline(config) elif command ==", "command == \"prepare\": pipelines = [ GenomeIndexPipeline(config), MappableRegionsPipeline(config), BinsPipeline(config), ]", "from 10x Genomics datasets\", }, \"varbin\": { \"config_groups\": [\"bins\", \"mapping\",", ") parser.add_argument( \"--force\", \"-F\", dest=\"force\", action=\"store_true\", help=\"allows overwriting nonempty results", "SGAINS_COMMANDS[command].get(\"help\", \"\") subparser = subparsers.add_parser( name=command_name, help=command_help, formatter_class=ArgumentDefaultsHelpFormatter ) build_cli_options(subparser,", "if command == \"genomeindex\": return GenomeIndexPipeline(config) elif command == \"mappable_regions\":", "= yaml.safe_load(infile) work_dirname = os.path.dirname(args.config) validator = SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname)", "GenomeIndexPipeline from sgains.pipelines.bins_pipeline import BinsPipeline from sgains.pipelines.mapping_pipeline import MappingPipeline from", "args.config config.dry_run = args.dry_run config.force = args.force config.parallel = args.parallel", "build_common_options(argparser) subparsers = argparser.add_subparsers( title=\"sGAINS subcommands\" ) for command in", "cells reads from 10x Genomics datasets\", }, \"varbin\": { \"config_groups\":", "if args.config is not None: assert os.path.exists(args.config), args.config with open(args.config,", "genome\", }, \"mappable_regions\": { \"config_groups\": [\"aligner\", \"genome\", \"mappable_regions\", \"sge\"], \"help\":", "import defaultdict import yaml from argparse import ArgumentParser,\\ RawDescriptionHelpFormatter, ArgumentDefaultsHelpFormatter", "if config_value is not None: config_value = config_value.get(arg_name, None) if", "= len(program_name) * \" \" sys.stderr.write(program_name + \": \" +", "run in parallel\", type=int, default=1 ) parser.add_argument( \"--sge\", dest=\"sge\", action=\"store_true\",", "len(program_name) * \" \" sys.stderr.write(program_name + \": \" + repr(e)", "ArgumentParser,\\ RawDescriptionHelpFormatter, ArgumentDefaultsHelpFormatter from sgains.configuration.parser import SgainsValidator, Config from sgains.configuration.schema", "\"config_groups\": [\"aligner\", \"genome\", \"reads\", \"mapping\", \"sge\"], \"help\": \"performs mapping of", "args.force config.parallel = args.parallel config.sge = args.sge return config def", "config_value is not None: config_value = config_value.get(arg_name, None) if config_value", "Rpipeline(config) elif command == \"extract_10x\": return Extract10xPipeline(config) elif command ==", "command == \"extract_10x\": return Extract10xPipeline(config) elif command == \"varbin_10x\": return", "config_groups = list(validator.schema.keys()) for group_name in config_groups: if group_name ==", "os import sys from copy import deepcopy import traceback import", "not None: assert os.path.exists(args.config), args.config with open(args.config, \"r\") as infile:", "create_pipeline(command, config) assert pipeline is not None, command executor =", "sgains.configuration.parser import SgainsValidator, Config from sgains.configuration.schema import sgains_schema from sgains.executor", "= [ MappingPipeline(config), VarbinPipeline(config), Rpipeline(config), ] return CompositePipeline(config, pipelines) raise", "}, } def build_common_options(parser): parser.add_argument( \"-v\", \"--verbose\", dest=\"verbose\", action=\"count\", help=\"set", "name=command_name, help=command_help, formatter_class=ArgumentDefaultsHelpFormatter ) build_cli_options(subparser, command, config, sge_flag=sge_flag) subparser.set_defaults(func=functools.partial(execute, command))", "command\", }, \"mapping\": { \"config_groups\": [\"aligner\", \"genome\", \"reads\", \"mapping\", \"sge\"],", "help=\"configuration file\", metavar=\"path\" ) parser.add_argument( \"-n\", \"--dry-run\", dest=\"dry_run\", action=\"store_true\", help=\"perform", "build_common_options(parser): parser.add_argument( \"-v\", \"--verbose\", dest=\"verbose\", action=\"count\", help=\"set verbosity level [default:", "yaml from argparse import ArgumentParser,\\ RawDescriptionHelpFormatter, ArgumentDefaultsHelpFormatter from sgains.configuration.parser import", "each bin\", }, \"varbin_10x\": { \"config_groups\": [ \"data_10x\", \"bins\", \"varbin\",", "elif arg_type == \"integer\": arg_type = int elif arg_type ==", "sgains.pipelines.extract_10x_pipeline import Extract10xPipeline from sgains.pipelines.varbin_10x_pipeline import Varbin10xPipeline from sgains.pipelines.varbin_pipeline import", "yaml.safe_load(infile) work_dirname = os.path.dirname(args.config) validator = SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) result", "config_groups: if group_name == \"sge\" and not args.sge: continue group", "= \\ 'sgains - sparse genomic analysis of individual nuclei", "sge_flag=False): work_dirname = os.getcwd() if config is not None: work_dirname", "assert os.path.exists(args.config), args.config with open(args.config, \"r\") as infile: config_dict =", "of the SCGV input data\" }, \"process\": { \"config_groups\": [", "== \"genomeindex\": return GenomeIndexPipeline(config) elif command == \"mappable_regions\": return MappableRegionsPipeline(config)", "}, \"varbin\": { \"config_groups\": [\"bins\", \"mapping\", \"varbin\", \"sge\"], \"help\": \"applies", "by ' \\ 'sequencing pipeline' program_description = '''%s USAGE '''", "e: traceback.print_exc() indent = len(program_name) * \" \" sys.stderr.write(program_name +", "copy import deepcopy import traceback import functools from collections import", "from sgains.pipelines.varbin_10x_pipeline import Varbin10xPipeline from sgains.pipelines.varbin_pipeline import VarbinPipeline from sgains.pipelines.r_pipeline", "%(default)s]\", default=0 ) parser.add_argument( \"-c\", \"--config\", dest=\"config\", help=\"configuration file\", metavar=\"path\"", "not None: config_value = config_value.get(arg_name, None) if config_value is not", "is not None: help_data = meta_data.get(\"help\") arg_default = _get_config_value(config, group_name,", "help=\"perform a trial run with no changes made\", default=False )", "assert pipeline is not None, command executor = Executor(config) executor.run_pipeline(pipeline)", "work_dirname) config.verbose = args.verbose config.config_file = args.config config.dry_run = args.dry_run", "Extract10xPipeline from sgains.pipelines.varbin_10x_pipeline import Varbin10xPipeline from sgains.pipelines.varbin_pipeline import VarbinPipeline from", "command == \"varbin_10x\": return Varbin10xPipeline(config) elif command == \"prepare\": pipelines", "group_result: result[group_name] = group_result config = Config.from_dict(result, work_dirname) config.verbose =", "build_cli_options(subparser, command, config, sge_flag=sge_flag) subparser.set_defaults(func=functools.partial(execute, command)) args = argparser.parse_args(argv) args.func(args)", "command: {command}\") def execute(command, args): config = parse_cli_options(args) pipeline =", "}, \"varbin_10x\": { \"config_groups\": [ \"data_10x\", \"bins\", \"varbin\", \"sge\"], \"help\":", "datasets without realigning\", }, \"scclust\": { \"config_groups\": [\"bins\", \"varbin\", \"scclust\"],", "\"string\") if arg_type == \"string\": arg_type = str elif arg_type", "dest=arg_name, type=arg_type, default=arg_default) return argparser def parse_cli_options(args): config_dict = defaultdict(dict)", "group_name == \"sge\" and not args.sge: continue group = validator.schema.get(group_name)", "Rpipeline(config), ] return CompositePipeline(config, pipelines) raise ValueError(f\"Unexpected command: {command}\") def", "group_name in config_groups: if group_name == \"sge\" and not args.sge:", "help=\"set verbosity level [default: %(default)s]\", default=0 ) parser.add_argument( \"-c\", \"--config\",", "with no changes made\", default=False ) parser.add_argument( \"--force\", \"-F\", dest=\"force\",", "argument type {arg_type}\") help_data = None meta_data = arg_spec.get(\"meta\") if", "{ \"config_groups\": [\"aligner\", \"genome\", \"reads\", \"mapping\", \"sge\"], \"help\": \"performs mapping", "'-')}\" arg_type = str arg_type = arg_spec.get(\"type\", \"string\") if arg_type", "'bins') into single command\", }, \"mapping\": { \"config_groups\": [\"aligner\", \"genome\",", "\"config_groups\": [ \"aligner\", \"genome\", \"mappable_regions\", \"bins\", \"sge\"], \"help\": \"combines all", "\"mapping\", \"bins\", \"varbin\", \"scclust\", \"sge\"], \"help\": \"combines all process steps", "None, command executor = Executor(config) executor.run_pipeline(pipeline) if __name__ == \"__main__\":", "MappableRegionsPipeline(config) elif command == \"bins\": return BinsPipeline(config) elif command ==", "for arg_name, arg_spec in group_schema.items(): name = f\"--{arg_name.replace('_', '-')}\" arg_type", "dest=\"sge\", action=\"store_true\", help=\"parallelilizes commands using SGE cluster manager\", default=False )", "return config def main(argv=sys.argv[1:]): program_name = os.path.basename(sys.argv[0]) program_shortdesc = \\", "return Varbin10xPipeline(config) elif command == \"prepare\": pipelines = [ GenomeIndexPipeline(config),", "\"reads\", \"sge\"], \"help\": \"extracts cells reads from 10x Genomics datasets\",", "made\", default=False ) parser.add_argument( \"--force\", \"-F\", dest=\"force\", action=\"store_true\", help=\"allows overwriting", "= { \"genomeindex\": { \"config_groups\": [\"aligner\", \"genome\"], \"help\": \"builds appropriate", "command)) args = argparser.parse_args(argv) args.func(args) except KeyboardInterrupt: traceback.print_exc() return 0", "elif command == \"mappable_regions\": return MappableRegionsPipeline(config) elif command == \"bins\":", "\"mappable_regions\": return MappableRegionsPipeline(config) elif command == \"bins\": return BinsPipeline(config) elif", "= list(validator.schema.keys()) else: assert command in SGAINS_COMMANDS command = SGAINS_COMMANDS[command]", "sys.stderr.write(program_name + \": \" + repr(e) + \"\\n\") sys.stderr.write(indent +", "continue group = validator.schema.get(group_name) group_parser = argparser.add_argument_group(f\"{group_name} group:\") assert group[\"type\"]", "\"-n\", \"--dry-run\", dest=\"dry_run\", action=\"store_true\", help=\"perform a trial run with no", "= ArgumentParser( description=program_description, formatter_class=ArgumentDefaultsHelpFormatter) build_common_options(argparser) subparsers = argparser.add_subparsers( title=\"sGAINS subcommands\"", "indent = len(program_name) * \" \" sys.stderr.write(program_name + \": \"", "default=False ) parser.add_argument( \"--parallel\", \"-p\", dest=\"parallel\", help=\"number of task to", "from sgains.configuration.parser import SgainsValidator, Config from sgains.configuration.schema import sgains_schema from", "_get_config_value(config, group_name, name): if config is None: return None group", "\"genome\", \"reads\", \"mapping\", \"sge\"], \"help\": \"performs mapping of cells reads", "return None result = getattr(group, name) return result def build_cli_options(argparser,", "commands using SGE cluster manager\", default=False ) def _get_config_value(config, group_name,", "use --help\") sys.stderr.write('\\n') return 2 def create_pipeline(command, config): if command", "defaultdict(dict) work_dirname = os.getcwd() if args.config is not None: assert", "\"mappable_regions\", \"bins\", \"sge\"], \"help\": \"calculates all bins boundaries for specified", "command_name = command.replace(\"_\", \"-\") command_help = SGAINS_COMMANDS[command].get(\"help\", \"\") subparser =", "into single command\" }, } def build_common_options(parser): parser.add_argument( \"-v\", \"--verbose\",", "sgains.pipelines.genomeindex_pipeline import GenomeIndexPipeline from sgains.pipelines.bins_pipeline import BinsPipeline from sgains.pipelines.mapping_pipeline import", "Rpipeline from sgains.pipelines.composite_pipeline import CompositePipeline SGAINS_COMMANDS = { \"genomeindex\": {", ") def _get_config_value(config, group_name, name): if config is None: return", "= arg_value else: config_value = config_dict.get(group_name, None) if config_value is", "\"--config\", dest=\"config\", help=\"configuration file\", metavar=\"path\" ) parser.add_argument( \"-n\", \"--dry-run\", dest=\"dry_run\",", "a trial run with no changes made\", default=False ) parser.add_argument(", "command\" }, } def build_common_options(parser): parser.add_argument( \"-v\", \"--verbose\", dest=\"verbose\", action=\"count\",", "meta_data = arg_spec.get(\"meta\") if meta_data is not None: help_data =", "for help use --help\") sys.stderr.write('\\n') return 2 def create_pipeline(command, config):", "def _get_config_value(config, group_name, name): if config is None: return None", "help_data = meta_data.get(\"help\") arg_default = _get_config_value(config, group_name, arg_name) if arg_default", "VarbinPipeline(config), Rpipeline(config), ] return CompositePipeline(config, pipelines) raise ValueError(f\"Unexpected command: {command}\")", "traceback.print_exc() indent = len(program_name) * \" \" sys.stderr.write(program_name + \":", "individual nuclei by ' \\ 'sequencing pipeline' program_description = '''%s", "Varbin10xPipeline from sgains.pipelines.varbin_pipeline import VarbinPipeline from sgains.pipelines.r_pipeline import Rpipeline from", "= config_dict.get(group_name, None) if config_value is not None: config_value =", "from sgains.pipelines.varbin_pipeline import VarbinPipeline from sgains.pipelines.r_pipeline import Rpipeline from sgains.pipelines.composite_pipeline", "[\"genome\", \"mappable_regions\", \"bins\", \"sge\"], \"help\": \"calculates all bins boundaries for", "pipelines) elif command == \"process\": pipelines = [ MappingPipeline(config), VarbinPipeline(config),", "\"genome\", \"mappable_regions\", \"sge\"], \"help\": \"finds all mappable regions in specified", "if config_value is not None: group_result[arg_name] = config_value if group_result:", "USAGE ''' % (program_shortdesc, ) try: config = Config.parse_argv(argv) sge_flag", "subparser = subparsers.add_parser( name=command_name, help=command_help, formatter_class=ArgumentDefaultsHelpFormatter ) build_cli_options(subparser, command, config,", "using SGE cluster manager\", default=False ) def _get_config_value(config, group_name, name):", "import deepcopy import traceback import functools from collections import defaultdict", "= Config.check_sge_argv(argv) argparser = ArgumentParser( description=program_description, formatter_class=ArgumentDefaultsHelpFormatter) build_common_options(argparser) subparsers =", "\"help\": \"extracts cells reads from 10x Genomics datasets\", }, \"varbin\":", "metavar=\"path\" ) parser.add_argument( \"-n\", \"--dry-run\", dest=\"dry_run\", action=\"store_true\", help=\"perform a trial", "os.path.exists(args.config), args.config with open(args.config, \"r\") as infile: config_dict = yaml.safe_load(infile)", "of cells reads to the reference genome\", }, \"extract_10x\": {", "}, \"extract_10x\": { \"config_groups\": [ \"data_10x\", \"reads\", \"sge\"], \"help\": \"extracts", "command in SGAINS_COMMANDS: command_name = command.replace(\"_\", \"-\") command_help = SGAINS_COMMANDS[command].get(\"help\",", "group_schema is None: continue group_result = {} for arg_name in", "\"mapping\", \"varbin\", \"sge\"], \"help\": \"applies varbin algorithm to count read", "group_name == \"sge\" and not sge_flag: continue group = validator.schema.get(group_name)", "= _get_config_value(config, group_name, arg_name) if arg_default is None: arg_default =", "work_dirname=work_dirname) if command is None: config_groups = list(validator.schema.keys()) else: assert", "== \"extract_10x\": return Extract10xPipeline(config) elif command == \"varbin_10x\": return Varbin10xPipeline(config)", "config_value = config_dict.get(group_name, None) if config_value is not None: config_value", "from collections import defaultdict import yaml from argparse import ArgumentParser,\\", "import Rpipeline from sgains.pipelines.composite_pipeline import CompositePipeline SGAINS_COMMANDS = { \"genomeindex\":", "dest=\"force\", action=\"store_true\", help=\"allows overwriting nonempty results directory\", default=False ) parser.add_argument(", "in config_groups: if group_name == \"sge\" and not sge_flag: continue", "Config.check_sge_argv(argv) argparser = ArgumentParser( description=program_description, formatter_class=ArgumentDefaultsHelpFormatter) build_common_options(argparser) subparsers = argparser.add_subparsers(", "from sgains.pipelines.mapping_pipeline import MappingPipeline from sgains.pipelines.extract_10x_pipeline import Extract10xPipeline from sgains.pipelines.varbin_10x_pipeline", "program_name = os.path.basename(sys.argv[0]) program_shortdesc = \\ 'sgains - sparse genomic", "mappings in each bin\", }, \"varbin_10x\": { \"config_groups\": [ \"data_10x\",", "== \"dict\", (group_name, group) group_schema = group[\"schema\"] for arg_name, arg_spec", "import functools from collections import defaultdict import yaml from argparse", "= SGAINS_COMMANDS[command] config_groups = command[\"config_groups\"] for group_name in config_groups: if", "config.parallel = args.parallel config.sge = args.sge return config def main(argv=sys.argv[1:]):", "= str elif arg_type == \"integer\": arg_type = int elif", "import MappingPipeline from sgains.pipelines.extract_10x_pipeline import Extract10xPipeline from sgains.pipelines.varbin_10x_pipeline import Varbin10xPipeline", "== \"sge\" and not sge_flag: continue group = validator.schema.get(group_name) group_parser", "config_value.get(arg_name, None) if config_value is not None: group_result[arg_name] = config_value", "== \"varbin_10x\": return Varbin10xPipeline(config) elif command == \"prepare\": pipelines =", "argparser.add_argument_group(f\"{group_name} group:\") assert group[\"type\"] == \"dict\", (group_name, group) group_schema =", "steps ('genome', 'mappable-regions' \" \"and 'bins') into single command\", },", ") parser.add_argument( \"-n\", \"--dry-run\", dest=\"dry_run\", action=\"store_true\", help=\"perform a trial run", "from sgains.pipelines.genomeindex_pipeline import GenomeIndexPipeline from sgains.pipelines.bins_pipeline import BinsPipeline from sgains.pipelines.mapping_pipeline", "varbin algorithm to count read mappings in each bin\", },", "bin \" \"to 10x Genomics datasets without realigning\", }, \"scclust\":", "in group_schema.items(): name = f\"--{arg_name.replace('_', '-')}\" arg_type = str arg_type", "= int elif arg_type == \"float\": arg_type = float elif", "os.getcwd() if args.config is not None: assert os.path.exists(args.config), args.config with", "config = Config.parse_argv(argv) sge_flag = Config.check_sge_argv(argv) argparser = ArgumentParser( description=program_description,", "\"extracts cells reads from 10x Genomics datasets\", }, \"varbin\": {", "2 def create_pipeline(command, config): if command == \"genomeindex\": return GenomeIndexPipeline(config)", "config is not None: work_dirname = config.work_dirname validator = SgainsValidator(", "into single command\", }, \"mapping\": { \"config_groups\": [\"aligner\", \"genome\", \"reads\",", "None) if config_value is not None: config_value = config_value.get(arg_name, None)", "import sgains_schema from sgains.executor import Executor from sgains.pipelines.mappableregions_pipeline import MappableRegionsPipeline", "\"bins\", \"varbin\", \"scclust\", \"sge\"], \"help\": \"combines all process steps ('mapping',", "config_value = config_value.get(arg_name, None) if config_value is not None: group_result[arg_name]", "count \" \"and read length\", }, \"prepare\": { \"config_groups\": [", "config = Config.from_dict(result, work_dirname) config.verbose = args.verbose config.config_file = args.config", "work_dirname = config.work_dirname validator = SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) if command", "list else: raise ValueError(f\"wrong argument type {arg_type}\") help_data = None", "in SGAINS_COMMANDS: command_name = command.replace(\"_\", \"-\") command_help = SGAINS_COMMANDS[command].get(\"help\", \"\")", "\"\\n\") sys.stderr.write(indent + \" for help use --help\") sys.stderr.write('\\n') return", "command, config, sge_flag=sge_flag) subparser.set_defaults(func=functools.partial(execute, command)) args = argparser.parse_args(argv) args.func(args) except", "default=arg_default) return argparser def parse_cli_options(args): config_dict = defaultdict(dict) work_dirname =", "import VarbinPipeline from sgains.pipelines.r_pipeline import Rpipeline from sgains.pipelines.composite_pipeline import CompositePipeline", "\" \"to 10x Genomics datasets without realigning\", }, \"scclust\": {", "\"segmentation and clustering based bin counts and \" \"preparation of", "subparsers.add_parser( name=command_name, help=command_help, formatter_class=ArgumentDefaultsHelpFormatter ) build_cli_options(subparser, command, config, sge_flag=sge_flag) subparser.set_defaults(func=functools.partial(execute,", "\"-c\", \"--config\", dest=\"config\", help=\"configuration file\", metavar=\"path\" ) parser.add_argument( \"-n\", \"--dry-run\",", "command = SGAINS_COMMANDS[command] config_groups = command[\"config_groups\"] for group_name in config_groups:", "Config.parse_argv(argv) sge_flag = Config.check_sge_argv(argv) argparser = ArgumentParser( description=program_description, formatter_class=ArgumentDefaultsHelpFormatter) build_common_options(argparser)", "= [ GenomeIndexPipeline(config), MappableRegionsPipeline(config), BinsPipeline(config), ] return CompositePipeline(config, pipelines) elif", "arg_type = str elif arg_type == \"integer\": arg_type = int", "group_parser = argparser.add_argument_group(f\"{group_name} group:\") assert group[\"type\"] == \"dict\", (group_name, group)", "deepcopy import traceback import functools from collections import defaultdict import", "datasets\", }, \"varbin\": { \"config_groups\": [\"bins\", \"mapping\", \"varbin\", \"sge\"], \"help\":", "mappings in each bin \" \"to 10x Genomics datasets without", "'sequencing pipeline' program_description = '''%s USAGE ''' % (program_shortdesc, )", "command == \"bins\": return BinsPipeline(config) elif command == \"mapping\": return", "program_shortdesc = \\ 'sgains - sparse genomic analysis of individual", "ValueError(f\"Unexpected command: {command}\") def execute(command, args): config = parse_cli_options(args) pipeline", "if config is not None: work_dirname = config.work_dirname validator =", "work_dirname = os.path.dirname(args.config) validator = SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) result =", "command == \"varbin\": return VarbinPipeline(config) elif command == \"scclust\": return", "execute(command, args): config = parse_cli_options(args) pipeline = create_pipeline(command, config) assert", "= meta_data.get(\"help\") arg_default = _get_config_value(config, group_name, arg_name) if arg_default is", "all preparation steps ('genome', 'mappable-regions' \" \"and 'bins') into single", "config=None, sge_flag=False): work_dirname = os.getcwd() if config is not None:", "\"dict\", (group_name, group) group_schema = group[\"schema\"] for arg_name, arg_spec in", "config.config_file = args.config config.dry_run = args.dry_run config.force = args.force config.parallel", "[ \"data_10x\", \"bins\", \"varbin\", \"sge\"], \"help\": \"applies varbin algorithm to", "not None: work_dirname = config.work_dirname validator = SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname)", "None: return None result = getattr(group, name) return result def", "str elif arg_type == \"integer\": arg_type = int elif arg_type", "] return CompositePipeline(config, pipelines) raise ValueError(f\"Unexpected command: {command}\") def execute(command,", "getattr(group, name) return result def build_cli_options(argparser, command=None, config=None, sge_flag=False): work_dirname", "{} for arg_name in group_schema.keys(): arg_value = getattr(args, arg_name, None)", "manager\", default=False ) def _get_config_value(config, group_name, name): if config is", "main(argv=sys.argv[1:]): program_name = os.path.basename(sys.argv[0]) program_shortdesc = \\ 'sgains - sparse", "{command}\") def execute(command, args): config = parse_cli_options(args) pipeline = create_pipeline(command,", "{ \"config_groups\": [ \"aligner\", \"genome\", \"mappable_regions\", \"bins\", \"sge\"], \"help\": \"combines", "\"config_groups\": [\"aligner\", \"genome\", \"mappable_regions\", \"sge\"], \"help\": \"finds all mappable regions", "= group[\"schema\"] for arg_name, arg_spec in group_schema.items(): name = f\"--{arg_name.replace('_',", "list(validator.schema.keys()) for group_name in config_groups: if group_name == \"sge\" and", "= os.path.basename(sys.argv[0]) program_shortdesc = \\ 'sgains - sparse genomic analysis", "specified genome\", }, \"bins\": { \"config_groups\": [\"genome\", \"mappable_regions\", \"bins\", \"sge\"],", "\"bins\": { \"config_groups\": [\"genome\", \"mappable_regions\", \"bins\", \"sge\"], \"help\": \"calculates all", "= SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) result = defaultdict(dict) config_groups = list(validator.schema.keys())", "None) if config_value is not None: group_result[arg_name] = config_value if", "from sgains.pipelines.composite_pipeline import CompositePipeline SGAINS_COMMANDS = { \"genomeindex\": { \"config_groups\":", "== \"float\": arg_type = float elif arg_type == \"list\": arg_type", "sge_flag=sge_flag) subparser.set_defaults(func=functools.partial(execute, command)) args = argparser.parse_args(argv) args.func(args) except KeyboardInterrupt: traceback.print_exc()", "reference genome\", }, \"extract_10x\": { \"config_groups\": [ \"data_10x\", \"reads\", \"sge\"],", "clustering based bin counts and \" \"preparation of the SCGV", "infile: config_dict = yaml.safe_load(infile) work_dirname = os.path.dirname(args.config) validator = SgainsValidator(", "SGE cluster manager\", default=False ) def _get_config_value(config, group_name, name): if", "\"varbin\": return VarbinPipeline(config) elif command == \"scclust\": return Rpipeline(config) elif", "realigning\", }, \"scclust\": { \"config_groups\": [\"bins\", \"varbin\", \"scclust\"], \"help\": \"segmentation", "help=help_data, dest=arg_name, type=arg_type, default=arg_default) return argparser def parse_cli_options(args): config_dict =", "return Rpipeline(config) elif command == \"extract_10x\": return Extract10xPipeline(config) elif command", "process steps ('mapping', 'varbin' \" \"and 'scclust') into single command\"", "args.config with open(args.config, \"r\") as infile: config_dict = yaml.safe_load(infile) work_dirname", "+ \" for help use --help\") sys.stderr.write('\\n') return 2 def", "\"string\": arg_type = str elif arg_type == \"integer\": arg_type =", "\"varbin_10x\": return Varbin10xPipeline(config) elif command == \"prepare\": pipelines = [", "CompositePipeline(config, pipelines) raise ValueError(f\"Unexpected command: {command}\") def execute(command, args): config", "KeyboardInterrupt: traceback.print_exc() return 0 except Exception as e: traceback.print_exc() indent", "pipelines) raise ValueError(f\"Unexpected command: {command}\") def execute(command, args): config =", "if group_schema is None: continue group_result = {} for arg_name", "if meta_data is not None: help_data = meta_data.get(\"help\") arg_default =", "args.sge: continue group = validator.schema.get(group_name) group_schema = group.get(\"schema\") if group_schema", "\"performs mapping of cells reads to the reference genome\", },", "\" + repr(e) + \"\\n\") sys.stderr.write(indent + \" for help", "no changes made\", default=False ) parser.add_argument( \"--force\", \"-F\", dest=\"force\", action=\"store_true\",", "parse_cli_options(args): config_dict = defaultdict(dict) work_dirname = os.getcwd() if args.config is", "return GenomeIndexPipeline(config) elif command == \"mappable_regions\": return MappableRegionsPipeline(config) elif command", ") try: config = Config.parse_argv(argv) sge_flag = Config.check_sge_argv(argv) argparser =", "not None, command executor = Executor(config) executor.run_pipeline(pipeline) if __name__ ==", "boundaries for specified bins count \" \"and read length\", },", "group_result[arg_name] = arg_value else: config_value = config_dict.get(group_name, None) if config_value", "\"bins\": return BinsPipeline(config) elif command == \"mapping\": return MappingPipeline(config) elif", "\"genomeindex\": return GenomeIndexPipeline(config) elif command == \"mappable_regions\": return MappableRegionsPipeline(config) elif", "return MappingPipeline(config) elif command == \"varbin\": return VarbinPipeline(config) elif command", "group.get(\"schema\") if group_schema is None: continue group_result = {} for", "\"varbin\", \"sge\"], \"help\": \"applies varbin algorithm to count read mappings", "read mappings in each bin\", }, \"varbin_10x\": { \"config_groups\": [", "= group_result config = Config.from_dict(result, work_dirname) config.verbose = args.verbose config.config_file", "== \"bins\": return BinsPipeline(config) elif command == \"mapping\": return MappingPipeline(config)", "the \" \"reference genome\", }, \"mappable_regions\": { \"config_groups\": [\"aligner\", \"genome\",", "\" \"preparation of the SCGV input data\" }, \"process\": {", "\"genome\", \"reads\", \"mapping\", \"bins\", \"varbin\", \"scclust\", \"sge\"], \"help\": \"combines all", "is None: return None group = config.config.get(group_name) if group is", "mappable regions in specified genome\", }, \"bins\": { \"config_groups\": [\"genome\",", "directory\", default=False ) parser.add_argument( \"--parallel\", \"-p\", dest=\"parallel\", help=\"number of task", "('mapping', 'varbin' \" \"and 'scclust') into single command\" }, }", "if arg_type == \"string\": arg_type = str elif arg_type ==", "parser.add_argument( \"-c\", \"--config\", dest=\"config\", help=\"configuration file\", metavar=\"path\" ) parser.add_argument( \"-n\",", "\"extract_10x\": { \"config_groups\": [ \"data_10x\", \"reads\", \"sge\"], \"help\": \"extracts cells", "type {arg_type}\") help_data = None meta_data = arg_spec.get(\"meta\") if meta_data", "dest=\"parallel\", help=\"number of task to run in parallel\", type=int, default=1", "default=False ) def _get_config_value(config, group_name, name): if config is None:", "{ \"config_groups\": [\"bins\", \"varbin\", \"scclust\"], \"help\": \"segmentation and clustering based", "Config.from_dict(result, work_dirname) config.verbose = args.verbose config.config_file = args.config config.dry_run =", "from sgains.configuration.schema import sgains_schema from sgains.executor import Executor from sgains.pipelines.mappableregions_pipeline", "argparser = ArgumentParser( description=program_description, formatter_class=ArgumentDefaultsHelpFormatter) build_common_options(argparser) subparsers = argparser.add_subparsers( title=\"sGAINS", "float elif arg_type == \"list\": arg_type = list else: raise", "\"finds all mappable regions in specified genome\", }, \"bins\": {", "config_dict = defaultdict(dict) work_dirname = os.getcwd() if args.config is not", "if group_result: result[group_name] = group_result config = Config.from_dict(result, work_dirname) config.verbose", "in each bin \" \"to 10x Genomics datasets without realigning\",", "arg_type == \"integer\": arg_type = int elif arg_type == \"float\":", "as e: traceback.print_exc() indent = len(program_name) * \" \" sys.stderr.write(program_name", "\"help\": \"finds all mappable regions in specified genome\", }, \"bins\":", "command=None, config=None, sge_flag=False): work_dirname = os.getcwd() if config is not", "to count read mappings in each bin\", }, \"varbin_10x\": {", "arg_spec.get(\"type\", \"string\") if arg_type == \"string\": arg_type = str elif", "\"sge\"], \"help\": \"performs mapping of cells reads to the reference", ") build_cli_options(subparser, command, config, sge_flag=sge_flag) subparser.set_defaults(func=functools.partial(execute, command)) args = argparser.parse_args(argv)", "defaultdict import yaml from argparse import ArgumentParser,\\ RawDescriptionHelpFormatter, ArgumentDefaultsHelpFormatter from", "MappingPipeline from sgains.pipelines.extract_10x_pipeline import Extract10xPipeline from sgains.pipelines.varbin_10x_pipeline import Varbin10xPipeline from", "\"config_groups\": [\"bins\", \"varbin\", \"scclust\"], \"help\": \"segmentation and clustering based bin", "group[\"schema\"] for arg_name, arg_spec in group_schema.items(): name = f\"--{arg_name.replace('_', '-')}\"", "'sgains - sparse genomic analysis of individual nuclei by '", "return result def build_cli_options(argparser, command=None, config=None, sge_flag=False): work_dirname = os.getcwd()", "else: assert command in SGAINS_COMMANDS command = SGAINS_COMMANDS[command] config_groups =", "def create_pipeline(command, config): if command == \"genomeindex\": return GenomeIndexPipeline(config) elif", "\"help\": \"builds appropriate hisat2 or bowtie index for the \"", "\"combines all preparation steps ('genome', 'mappable-regions' \" \"and 'bins') into", "\"mappable_regions\", \"bins\", \"sge\"], \"help\": \"combines all preparation steps ('genome', 'mappable-regions'", "default=False ) parser.add_argument( \"--force\", \"-F\", dest=\"force\", action=\"store_true\", help=\"allows overwriting nonempty", "VarbinPipeline(config) elif command == \"scclust\": return Rpipeline(config) elif command ==", "def execute(command, args): config = parse_cli_options(args) pipeline = create_pipeline(command, config)", "SGAINS_COMMANDS[command] config_groups = command[\"config_groups\"] for group_name in config_groups: if group_name", "elif command == \"scclust\": return Rpipeline(config) elif command == \"extract_10x\":", "\"process\": pipelines = [ MappingPipeline(config), VarbinPipeline(config), Rpipeline(config), ] return CompositePipeline(config,", "\"prepare\": { \"config_groups\": [ \"aligner\", \"genome\", \"mappable_regions\", \"bins\", \"sge\"], \"help\":", "without realigning\", }, \"scclust\": { \"config_groups\": [\"bins\", \"varbin\", \"scclust\"], \"help\":", "is not None: group_result[arg_name] = arg_value else: config_value = config_dict.get(group_name,", "= SGAINS_COMMANDS[command].get(\"help\", \"\") subparser = subparsers.add_parser( name=command_name, help=command_help, formatter_class=ArgumentDefaultsHelpFormatter )", "} def build_common_options(parser): parser.add_argument( \"-v\", \"--verbose\", dest=\"verbose\", action=\"count\", help=\"set verbosity", "\"--sge\", dest=\"sge\", action=\"store_true\", help=\"parallelilizes commands using SGE cluster manager\", default=False", "[ MappingPipeline(config), VarbinPipeline(config), Rpipeline(config), ] return CompositePipeline(config, pipelines) raise ValueError(f\"Unexpected", "import os import sys from copy import deepcopy import traceback", "parser.add_argument( \"-v\", \"--verbose\", dest=\"verbose\", action=\"count\", help=\"set verbosity level [default: %(default)s]\",", "raise ValueError(f\"Unexpected command: {command}\") def execute(command, args): config = parse_cli_options(args)", "= getattr(group, name) return result def build_cli_options(argparser, command=None, config=None, sge_flag=False):", "action=\"store_true\", help=\"perform a trial run with no changes made\", default=False", "is not None: assert os.path.exists(args.config), args.config with open(args.config, \"r\") as", "assert command in SGAINS_COMMANDS command = SGAINS_COMMANDS[command] config_groups = command[\"config_groups\"]", "{ \"config_groups\": [\"aligner\", \"genome\"], \"help\": \"builds appropriate hisat2 or bowtie", "name) return result def build_cli_options(argparser, command=None, config=None, sge_flag=False): work_dirname =", "of individual nuclei by ' \\ 'sequencing pipeline' program_description =", "\"mappable_regions\", \"sge\"], \"help\": \"finds all mappable regions in specified genome\",", "single command\", }, \"mapping\": { \"config_groups\": [\"aligner\", \"genome\", \"reads\", \"mapping\",", "continue group = validator.schema.get(group_name) group_schema = group.get(\"schema\") if group_schema is", ") for command in SGAINS_COMMANDS: command_name = command.replace(\"_\", \"-\") command_help", "\"sge\"], \"help\": \"extracts cells reads from 10x Genomics datasets\", },", "action=\"store_true\", help=\"allows overwriting nonempty results directory\", default=False ) parser.add_argument( \"--parallel\",", "length\", }, \"prepare\": { \"config_groups\": [ \"aligner\", \"genome\", \"mappable_regions\", \"bins\",", "arg_type = arg_spec.get(\"type\", \"string\") if arg_type == \"string\": arg_type =", "group_result config = Config.from_dict(result, work_dirname) config.verbose = args.verbose config.config_file =", "command[\"config_groups\"] for group_name in config_groups: if group_name == \"sge\" and", "elif arg_type == \"float\": arg_type = float elif arg_type ==", "CompositePipeline SGAINS_COMMANDS = { \"genomeindex\": { \"config_groups\": [\"aligner\", \"genome\"], \"help\":", "help_data = None meta_data = arg_spec.get(\"meta\") if meta_data is not", "Config from sgains.configuration.schema import sgains_schema from sgains.executor import Executor from", "_get_config_value(config, group_name, arg_name) if arg_default is None: arg_default = arg_spec.get(\"default\")", "argparser.parse_args(argv) args.func(args) except KeyboardInterrupt: traceback.print_exc() return 0 except Exception as", "\"preparation of the SCGV input data\" }, \"process\": { \"config_groups\":", "nonempty results directory\", default=False ) parser.add_argument( \"--parallel\", \"-p\", dest=\"parallel\", help=\"number", "meta_data.get(\"help\") arg_default = _get_config_value(config, group_name, arg_name) if arg_default is None:", "\"sge\"], \"help\": \"calculates all bins boundaries for specified bins count", "import ArgumentParser,\\ RawDescriptionHelpFormatter, ArgumentDefaultsHelpFormatter from sgains.configuration.parser import SgainsValidator, Config from", "config_value is not None: group_result[arg_name] = config_value if group_result: result[group_name]", "config is None: return None group = config.config.get(group_name) if group", "\"and 'bins') into single command\", }, \"mapping\": { \"config_groups\": [\"aligner\",", "is not None: group_result[arg_name] = config_value if group_result: result[group_name] =", "\"list\": arg_type = list else: raise ValueError(f\"wrong argument type {arg_type}\")", "\"bins\", \"varbin\", \"sge\"], \"help\": \"applies varbin algorithm to count read", "SGAINS_COMMANDS: command_name = command.replace(\"_\", \"-\") command_help = SGAINS_COMMANDS[command].get(\"help\", \"\") subparser", "{ \"config_groups\": [ \"data_10x\", \"bins\", \"varbin\", \"sge\"], \"help\": \"applies varbin", "or bowtie index for the \" \"reference genome\", }, \"mappable_regions\":", "config) assert pipeline is not None, command executor = Executor(config)", "SgainsValidator, Config from sgains.configuration.schema import sgains_schema from sgains.executor import Executor", "\"mapping\": return MappingPipeline(config) elif command == \"varbin\": return VarbinPipeline(config) elif", "== \"mapping\": return MappingPipeline(config) elif command == \"varbin\": return VarbinPipeline(config)", "group[\"type\"] == \"dict\", (group_name, group) group_schema = group[\"schema\"] for arg_name,", "based bin counts and \" \"preparation of the SCGV input", "for arg_name in group_schema.keys(): arg_value = getattr(args, arg_name, None) if", "sge_flag = Config.check_sge_argv(argv) argparser = ArgumentParser( description=program_description, formatter_class=ArgumentDefaultsHelpFormatter) build_common_options(argparser) subparsers", "\"help\": \"applies varbin algorithm to count read mappings in each", "= getattr(args, arg_name, None) if arg_value is not None: group_result[arg_name]", "parser.add_argument( \"--parallel\", \"-p\", dest=\"parallel\", help=\"number of task to run in", "subparser.set_defaults(func=functools.partial(execute, command)) args = argparser.parse_args(argv) args.func(args) except KeyboardInterrupt: traceback.print_exc() return", "\"help\": \"performs mapping of cells reads to the reference genome\",", "command == \"genomeindex\": return GenomeIndexPipeline(config) elif command == \"mappable_regions\": return", "except Exception as e: traceback.print_exc() indent = len(program_name) * \"", "\" \"and 'scclust') into single command\" }, } def build_common_options(parser):", "ValueError(f\"wrong argument type {arg_type}\") help_data = None meta_data = arg_spec.get(\"meta\")", "bins count \" \"and read length\", }, \"prepare\": { \"config_groups\":", "config = parse_cli_options(args) pipeline = create_pipeline(command, config) assert pipeline is", "\" \" sys.stderr.write(program_name + \": \" + repr(e) + \"\\n\")", "group_name, name): if config is None: return None group =", "arg_type = int elif arg_type == \"float\": arg_type = float", "= argparser.parse_args(argv) args.func(args) except KeyboardInterrupt: traceback.print_exc() return 0 except Exception", "\"float\": arg_type = float elif arg_type == \"list\": arg_type =", "\"sge\"], \"help\": \"finds all mappable regions in specified genome\", },", "def parse_cli_options(args): config_dict = defaultdict(dict) work_dirname = os.getcwd() if args.config", "= Config.from_dict(result, work_dirname) config.verbose = args.verbose config.config_file = args.config config.dry_run", "try: config = Config.parse_argv(argv) sge_flag = Config.check_sge_argv(argv) argparser = ArgumentParser(", "(program_shortdesc, ) try: config = Config.parse_argv(argv) sge_flag = Config.check_sge_argv(argv) argparser", "is None: return None result = getattr(group, name) return result", "sgains.pipelines.varbin_pipeline import VarbinPipeline from sgains.pipelines.r_pipeline import Rpipeline from sgains.pipelines.composite_pipeline import", "raise ValueError(f\"wrong argument type {arg_type}\") help_data = None meta_data =", "arg_default = _get_config_value(config, group_name, arg_name) if arg_default is None: arg_default", "SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) result = defaultdict(dict) config_groups = list(validator.schema.keys()) for", "{ \"genomeindex\": { \"config_groups\": [\"aligner\", \"genome\"], \"help\": \"builds appropriate hisat2", "\"to 10x Genomics datasets without realigning\", }, \"scclust\": { \"config_groups\":", "if config is None: return None group = config.config.get(group_name) if", "= Config.parse_argv(argv) sge_flag = Config.check_sge_argv(argv) argparser = ArgumentParser( description=program_description, formatter_class=ArgumentDefaultsHelpFormatter)", "\"help\": \"combines all process steps ('mapping', 'varbin' \" \"and 'scclust')", "arg_value else: config_value = config_dict.get(group_name, None) if config_value is not", "\" \"reference genome\", }, \"mappable_regions\": { \"config_groups\": [\"aligner\", \"genome\", \"mappable_regions\",", "[ \"aligner\", \"genome\", \"mappable_regions\", \"bins\", \"sge\"], \"help\": \"combines all preparation", "group) group_schema = group[\"schema\"] for arg_name, arg_spec in group_schema.items(): name", "in parallel\", type=int, default=1 ) parser.add_argument( \"--sge\", dest=\"sge\", action=\"store_true\", help=\"parallelilizes", "}, \"scclust\": { \"config_groups\": [\"bins\", \"varbin\", \"scclust\"], \"help\": \"segmentation and", "arg_name, arg_spec in group_schema.items(): name = f\"--{arg_name.replace('_', '-')}\" arg_type =", "traceback.print_exc() return 0 except Exception as e: traceback.print_exc() indent =", "+ repr(e) + \"\\n\") sys.stderr.write(indent + \" for help use", "is None: arg_default = arg_spec.get(\"default\") group_parser.add_argument( name, help=help_data, dest=arg_name, type=arg_type,", "elif command == \"prepare\": pipelines = [ GenomeIndexPipeline(config), MappableRegionsPipeline(config), BinsPipeline(config),", "''' % (program_shortdesc, ) try: config = Config.parse_argv(argv) sge_flag =", "{ \"config_groups\": [\"bins\", \"mapping\", \"varbin\", \"sge\"], \"help\": \"applies varbin algorithm", "input data\" }, \"process\": { \"config_groups\": [ \"aligner\", \"genome\", \"reads\",", "action=\"count\", help=\"set verbosity level [default: %(default)s]\", default=0 ) parser.add_argument( \"-c\",", "group_result[arg_name] = config_value if group_result: result[group_name] = group_result config =", "\"config_groups\": [ \"aligner\", \"genome\", \"reads\", \"mapping\", \"bins\", \"varbin\", \"scclust\", \"sge\"],", "= command[\"config_groups\"] for group_name in config_groups: if group_name == \"sge\"", "help=\"allows overwriting nonempty results directory\", default=False ) parser.add_argument( \"--parallel\", \"-p\",", "[\"bins\", \"mapping\", \"varbin\", \"sge\"], \"help\": \"applies varbin algorithm to count", "MappableRegionsPipeline(config), BinsPipeline(config), ] return CompositePipeline(config, pipelines) elif command == \"process\":", "= SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) if command is None: config_groups =", "read mappings in each bin \" \"to 10x Genomics datasets", "assert group[\"type\"] == \"dict\", (group_name, group) group_schema = group[\"schema\"] for", "args.verbose config.config_file = args.config config.dry_run = args.dry_run config.force = args.force", "name, help=help_data, dest=arg_name, type=arg_type, default=arg_default) return argparser def parse_cli_options(args): config_dict", "not args.sge: continue group = validator.schema.get(group_name) group_schema = group.get(\"schema\") if", "= f\"--{arg_name.replace('_', '-')}\" arg_type = str arg_type = arg_spec.get(\"type\", \"string\")", "help=command_help, formatter_class=ArgumentDefaultsHelpFormatter ) build_cli_options(subparser, command, config, sge_flag=sge_flag) subparser.set_defaults(func=functools.partial(execute, command)) args", "pipeline is not None, command executor = Executor(config) executor.run_pipeline(pipeline) if", "sgains.pipelines.bins_pipeline import BinsPipeline from sgains.pipelines.mapping_pipeline import MappingPipeline from sgains.pipelines.extract_10x_pipeline import", "None: help_data = meta_data.get(\"help\") arg_default = _get_config_value(config, group_name, arg_name) if", "= create_pipeline(command, config) assert pipeline is not None, command executor", "= config_value if group_result: result[group_name] = group_result config = Config.from_dict(result,", "\": \" + repr(e) + \"\\n\") sys.stderr.write(indent + \" for", "= argparser.add_subparsers( title=\"sGAINS subcommands\" ) for command in SGAINS_COMMANDS: command_name", "Executor from sgains.pipelines.mappableregions_pipeline import MappableRegionsPipeline from sgains.pipelines.genomeindex_pipeline import GenomeIndexPipeline from", "argparse import ArgumentParser,\\ RawDescriptionHelpFormatter, ArgumentDefaultsHelpFormatter from sgains.configuration.parser import SgainsValidator, Config", "all mappable regions in specified genome\", }, \"bins\": { \"config_groups\":", "config_dict.get(group_name, None) if config_value is not None: config_value = config_value.get(arg_name,", "\"genomeindex\": { \"config_groups\": [\"aligner\", \"genome\"], \"help\": \"builds appropriate hisat2 or", "\"varbin\": { \"config_groups\": [\"bins\", \"mapping\", \"varbin\", \"sge\"], \"help\": \"applies varbin", "args): config = parse_cli_options(args) pipeline = create_pipeline(command, config) assert pipeline", "= list else: raise ValueError(f\"wrong argument type {arg_type}\") help_data =", "ArgumentDefaultsHelpFormatter from sgains.configuration.parser import SgainsValidator, Config from sgains.configuration.schema import sgains_schema", "type=arg_type, default=arg_default) return argparser def parse_cli_options(args): config_dict = defaultdict(dict) work_dirname", "command == \"scclust\": return Rpipeline(config) elif command == \"extract_10x\": return", "SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) if command is None: config_groups = list(validator.schema.keys())", "from argparse import ArgumentParser,\\ RawDescriptionHelpFormatter, ArgumentDefaultsHelpFormatter from sgains.configuration.parser import SgainsValidator,", "\"varbin\", \"scclust\"], \"help\": \"segmentation and clustering based bin counts and", "GenomeIndexPipeline(config) elif command == \"mappable_regions\": return MappableRegionsPipeline(config) elif command ==", "index for the \" \"reference genome\", }, \"mappable_regions\": { \"config_groups\":", "\"sge\"], \"help\": \"applies varbin algorithm to count read mappings in", "None: return None group = config.config.get(group_name) if group is None:", "= arg_spec.get(\"type\", \"string\") if arg_type == \"string\": arg_type = str", "elif command == \"varbin_10x\": return Varbin10xPipeline(config) elif command == \"prepare\":", "and not args.sge: continue group = validator.schema.get(group_name) group_schema = group.get(\"schema\")", "Genomics datasets without realigning\", }, \"scclust\": { \"config_groups\": [\"bins\", \"varbin\",", "steps ('mapping', 'varbin' \" \"and 'scclust') into single command\" },", "arg_type == \"list\": arg_type = list else: raise ValueError(f\"wrong argument", "\"help\": \"combines all preparation steps ('genome', 'mappable-regions' \" \"and 'bins')", "os.path.basename(sys.argv[0]) program_shortdesc = \\ 'sgains - sparse genomic analysis of", "= command.replace(\"_\", \"-\") command_help = SGAINS_COMMANDS[command].get(\"help\", \"\") subparser = subparsers.add_parser(", "elif command == \"extract_10x\": return Extract10xPipeline(config) elif command == \"varbin_10x\":", "None group = config.config.get(group_name) if group is None: return None", "default=1 ) parser.add_argument( \"--sge\", dest=\"sge\", action=\"store_true\", help=\"parallelilizes commands using SGE", "Varbin10xPipeline(config) elif command == \"prepare\": pipelines = [ GenomeIndexPipeline(config), MappableRegionsPipeline(config),", "arg_default is None: arg_default = arg_spec.get(\"default\") group_parser.add_argument( name, help=help_data, dest=arg_name,", "parser.add_argument( \"-n\", \"--dry-run\", dest=\"dry_run\", action=\"store_true\", help=\"perform a trial run with", "[default: %(default)s]\", default=0 ) parser.add_argument( \"-c\", \"--config\", dest=\"config\", help=\"configuration file\",", "config, sge_flag=sge_flag) subparser.set_defaults(func=functools.partial(execute, command)) args = argparser.parse_args(argv) args.func(args) except KeyboardInterrupt:", "elif command == \"varbin\": return VarbinPipeline(config) elif command == \"scclust\":", "elif command == \"bins\": return BinsPipeline(config) elif command == \"mapping\":", "'''%s USAGE ''' % (program_shortdesc, ) try: config = Config.parse_argv(argv)", "parse_cli_options(args) pipeline = create_pipeline(command, config) assert pipeline is not None,", "--help\") sys.stderr.write('\\n') return 2 def create_pipeline(command, config): if command ==", "import CompositePipeline SGAINS_COMMANDS = { \"genomeindex\": { \"config_groups\": [\"aligner\", \"genome\"],", "* \" \" sys.stderr.write(program_name + \": \" + repr(e) +", "config.config.get(group_name) if group is None: return None result = getattr(group,", "= subparsers.add_parser( name=command_name, help=command_help, formatter_class=ArgumentDefaultsHelpFormatter ) build_cli_options(subparser, command, config, sge_flag=sge_flag)", "\"scclust\": return Rpipeline(config) elif command == \"extract_10x\": return Extract10xPipeline(config) elif", "\" sys.stderr.write(program_name + \": \" + repr(e) + \"\\n\") sys.stderr.write(indent", "group_name in config_groups: if group_name == \"sge\" and not sge_flag:", "return CompositePipeline(config, pipelines) elif command == \"process\": pipelines = [", "all bins boundaries for specified bins count \" \"and read", "for command in SGAINS_COMMANDS: command_name = command.replace(\"_\", \"-\") command_help =", "help=\"parallelilizes commands using SGE cluster manager\", default=False ) def _get_config_value(config,", "'mappable-regions' \" \"and 'bins') into single command\", }, \"mapping\": {", "import MappableRegionsPipeline from sgains.pipelines.genomeindex_pipeline import GenomeIndexPipeline from sgains.pipelines.bins_pipeline import BinsPipeline", "= group.get(\"schema\") if group_schema is None: continue group_result = {}", "command == \"mapping\": return MappingPipeline(config) elif command == \"varbin\": return", "return VarbinPipeline(config) elif command == \"scclust\": return Rpipeline(config) elif command", "mapping of cells reads to the reference genome\", }, \"extract_10x\":", "SGAINS_COMMANDS command = SGAINS_COMMANDS[command] config_groups = command[\"config_groups\"] for group_name in", "group_name, arg_name) if arg_default is None: arg_default = arg_spec.get(\"default\") group_parser.add_argument(", "arg_type = str arg_type = arg_spec.get(\"type\", \"string\") if arg_type ==", "= validator.schema.get(group_name) group_schema = group.get(\"schema\") if group_schema is None: continue", "return 0 except Exception as e: traceback.print_exc() indent = len(program_name)", "group_result = {} for arg_name in group_schema.keys(): arg_value = getattr(args,", "sgains.executor import Executor from sgains.pipelines.mappableregions_pipeline import MappableRegionsPipeline from sgains.pipelines.genomeindex_pipeline import", "Exception as e: traceback.print_exc() indent = len(program_name) * \" \"", "import GenomeIndexPipeline from sgains.pipelines.bins_pipeline import BinsPipeline from sgains.pipelines.mapping_pipeline import MappingPipeline", "\"and 'scclust') into single command\" }, } def build_common_options(parser): parser.add_argument(", "import Varbin10xPipeline from sgains.pipelines.varbin_pipeline import VarbinPipeline from sgains.pipelines.r_pipeline import Rpipeline", "{ \"config_groups\": [\"genome\", \"mappable_regions\", \"bins\", \"sge\"], \"help\": \"calculates all bins", "None: work_dirname = config.work_dirname validator = SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) if", "level [default: %(default)s]\", default=0 ) parser.add_argument( \"-c\", \"--config\", dest=\"config\", help=\"configuration", "command is None: config_groups = list(validator.schema.keys()) else: assert command in", "sgains.pipelines.mapping_pipeline import MappingPipeline from sgains.pipelines.extract_10x_pipeline import Extract10xPipeline from sgains.pipelines.varbin_10x_pipeline import", "build_cli_options(argparser, command=None, config=None, sge_flag=False): work_dirname = os.getcwd() if config is", "0 except Exception as e: traceback.print_exc() indent = len(program_name) *", "[ GenomeIndexPipeline(config), MappableRegionsPipeline(config), BinsPipeline(config), ] return CompositePipeline(config, pipelines) elif command", "work_dirname = os.getcwd() if config is not None: work_dirname =", "file\", metavar=\"path\" ) parser.add_argument( \"-n\", \"--dry-run\", dest=\"dry_run\", action=\"store_true\", help=\"perform a", "validator = SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) if command is None: config_groups", "' \\ 'sequencing pipeline' program_description = '''%s USAGE ''' %", "args.func(args) except KeyboardInterrupt: traceback.print_exc() return 0 except Exception as e:", "continue group_result = {} for arg_name in group_schema.keys(): arg_value =", "= args.force config.parallel = args.parallel config.sge = args.sge return config", "= validator.schema.get(group_name) group_parser = argparser.add_argument_group(f\"{group_name} group:\") assert group[\"type\"] == \"dict\",", "for group_name in config_groups: if group_name == \"sge\" and not", "\"aligner\", \"genome\", \"reads\", \"mapping\", \"bins\", \"varbin\", \"scclust\", \"sge\"], \"help\": \"combines", "not None: group_result[arg_name] = arg_value else: config_value = config_dict.get(group_name, None)", "BinsPipeline(config) elif command == \"mapping\": return MappingPipeline(config) elif command ==", "arg_spec.get(\"meta\") if meta_data is not None: help_data = meta_data.get(\"help\") arg_default", "{ \"config_groups\": [\"aligner\", \"genome\", \"mappable_regions\", \"sge\"], \"help\": \"finds all mappable", "config): if command == \"genomeindex\": return GenomeIndexPipeline(config) elif command ==", "arg_spec.get(\"default\") group_parser.add_argument( name, help=help_data, dest=arg_name, type=arg_type, default=arg_default) return argparser def", "= config_value.get(arg_name, None) if config_value is not None: group_result[arg_name] =", "10x Genomics datasets without realigning\", }, \"scclust\": { \"config_groups\": [\"bins\",", "return CompositePipeline(config, pipelines) raise ValueError(f\"Unexpected command: {command}\") def execute(command, args):", "in specified genome\", }, \"bins\": { \"config_groups\": [\"genome\", \"mappable_regions\", \"bins\",", "getattr(args, arg_name, None) if arg_value is not None: group_result[arg_name] =", "None: continue group_result = {} for arg_name in group_schema.keys(): arg_value", "\"sge\"], \"help\": \"combines all process steps ('mapping', 'varbin' \" \"and", "None result = getattr(group, name) return result def build_cli_options(argparser, command=None,", "\"bins\", \"sge\"], \"help\": \"combines all preparation steps ('genome', 'mappable-regions' \"", "\"builds appropriate hisat2 or bowtie index for the \" \"reference", "for the \" \"reference genome\", }, \"mappable_regions\": { \"config_groups\": [\"aligner\",", "else: config_value = config_dict.get(group_name, None) if config_value is not None:", "sys from copy import deepcopy import traceback import functools from", "list(validator.schema.keys()) else: assert command in SGAINS_COMMANDS command = SGAINS_COMMANDS[command] config_groups", "= args.dry_run config.force = args.force config.parallel = args.parallel config.sge =", "genome\", }, \"extract_10x\": { \"config_groups\": [ \"data_10x\", \"reads\", \"sge\"], \"help\":", "return None group = config.config.get(group_name) if group is None: return", "MappableRegionsPipeline from sgains.pipelines.genomeindex_pipeline import GenomeIndexPipeline from sgains.pipelines.bins_pipeline import BinsPipeline from", "('genome', 'mappable-regions' \" \"and 'bins') into single command\", }, \"mapping\":", "\"sge\" and not args.sge: continue group = validator.schema.get(group_name) group_schema =", "\"reads\", \"mapping\", \"bins\", \"varbin\", \"scclust\", \"sge\"], \"help\": \"combines all process", "name = f\"--{arg_name.replace('_', '-')}\" arg_type = str arg_type = arg_spec.get(\"type\",", "\"r\") as infile: config_dict = yaml.safe_load(infile) work_dirname = os.path.dirname(args.config) validator", "import Extract10xPipeline from sgains.pipelines.varbin_10x_pipeline import Varbin10xPipeline from sgains.pipelines.varbin_pipeline import VarbinPipeline", "== \"sge\" and not args.sge: continue group = validator.schema.get(group_name) group_schema", "== \"mappable_regions\": return MappableRegionsPipeline(config) elif command == \"bins\": return BinsPipeline(config)", "config_dict = yaml.safe_load(infile) work_dirname = os.path.dirname(args.config) validator = SgainsValidator( deepcopy(sgains_schema),", "args.parallel config.sge = args.sge return config def main(argv=sys.argv[1:]): program_name =", "specified bins count \" \"and read length\", }, \"prepare\": {", "= args.sge return config def main(argv=sys.argv[1:]): program_name = os.path.basename(sys.argv[0]) program_shortdesc", "command.replace(\"_\", \"-\") command_help = SGAINS_COMMANDS[command].get(\"help\", \"\") subparser = subparsers.add_parser( name=command_name,", "= argparser.add_argument_group(f\"{group_name} group:\") assert group[\"type\"] == \"dict\", (group_name, group) group_schema", "== \"process\": pipelines = [ MappingPipeline(config), VarbinPipeline(config), Rpipeline(config), ] return", "parallel\", type=int, default=1 ) parser.add_argument( \"--sge\", dest=\"sge\", action=\"store_true\", help=\"parallelilizes commands", "elif command == \"process\": pipelines = [ MappingPipeline(config), VarbinPipeline(config), Rpipeline(config),", "\\ 'sequencing pipeline' program_description = '''%s USAGE ''' % (program_shortdesc,", "config.work_dirname validator = SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) if command is None:", "\"help\": \"calculates all bins boundaries for specified bins count \"", "\"varbin_10x\": { \"config_groups\": [ \"data_10x\", \"bins\", \"varbin\", \"sge\"], \"help\": \"applies", "not None: group_result[arg_name] = config_value if group_result: result[group_name] = group_result", "genome\", }, \"bins\": { \"config_groups\": [\"genome\", \"mappable_regions\", \"bins\", \"sge\"], \"help\":", "group = config.config.get(group_name) if group is None: return None result", "import Executor from sgains.pipelines.mappableregions_pipeline import MappableRegionsPipeline from sgains.pipelines.genomeindex_pipeline import GenomeIndexPipeline", "of task to run in parallel\", type=int, default=1 ) parser.add_argument(", "\"sge\" and not sge_flag: continue group = validator.schema.get(group_name) group_parser =", "\"--force\", \"-F\", dest=\"force\", action=\"store_true\", help=\"allows overwriting nonempty results directory\", default=False", "validator = SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) result = defaultdict(dict) config_groups =", "result[group_name] = group_result config = Config.from_dict(result, work_dirname) config.verbose = args.verbose", "+ \": \" + repr(e) + \"\\n\") sys.stderr.write(indent + \"", "[\"aligner\", \"genome\", \"mappable_regions\", \"sge\"], \"help\": \"finds all mappable regions in", "from sgains.executor import Executor from sgains.pipelines.mappableregions_pipeline import MappableRegionsPipeline from sgains.pipelines.genomeindex_pipeline", "group is None: return None result = getattr(group, name) return", "parser.add_argument( \"--force\", \"-F\", dest=\"force\", action=\"store_true\", help=\"allows overwriting nonempty results directory\",", "changes made\", default=False ) parser.add_argument( \"--force\", \"-F\", dest=\"force\", action=\"store_true\", help=\"allows", "overwriting nonempty results directory\", default=False ) parser.add_argument( \"--parallel\", \"-p\", dest=\"parallel\",", "= list(validator.schema.keys()) for group_name in config_groups: if group_name == \"sge\"", "bin\", }, \"varbin_10x\": { \"config_groups\": [ \"data_10x\", \"bins\", \"varbin\", \"sge\"],", "description=program_description, formatter_class=ArgumentDefaultsHelpFormatter) build_common_options(argparser) subparsers = argparser.add_subparsers( title=\"sGAINS subcommands\" ) for", "10x Genomics datasets\", }, \"varbin\": { \"config_groups\": [\"bins\", \"mapping\", \"varbin\",", "except KeyboardInterrupt: traceback.print_exc() return 0 except Exception as e: traceback.print_exc()", "command executor = Executor(config) executor.run_pipeline(pipeline) if __name__ == \"__main__\": sys.exit(main())", "\"reference genome\", }, \"mappable_regions\": { \"config_groups\": [\"aligner\", \"genome\", \"mappable_regions\", \"sge\"],", "result = getattr(group, name) return result def build_cli_options(argparser, command=None, config=None,", "config_groups: if group_name == \"sge\" and not sge_flag: continue group", "create_pipeline(command, config): if command == \"genomeindex\": return GenomeIndexPipeline(config) elif command", "task to run in parallel\", type=int, default=1 ) parser.add_argument( \"--sge\",", "\"sge\"], \"help\": \"combines all preparation steps ('genome', 'mappable-regions' \" \"and", "if arg_default is None: arg_default = arg_spec.get(\"default\") group_parser.add_argument( name, help=help_data,", "\"--verbose\", dest=\"verbose\", action=\"count\", help=\"set verbosity level [default: %(default)s]\", default=0 )", "bins boundaries for specified bins count \" \"and read length\",", "dest=\"config\", help=\"configuration file\", metavar=\"path\" ) parser.add_argument( \"-n\", \"--dry-run\", dest=\"dry_run\", action=\"store_true\",", ") parser.add_argument( \"--sge\", dest=\"sge\", action=\"store_true\", help=\"parallelilizes commands using SGE cluster", "count read mappings in each bin \" \"to 10x Genomics", "verbosity level [default: %(default)s]\", default=0 ) parser.add_argument( \"-c\", \"--config\", dest=\"config\",", "str arg_type = arg_spec.get(\"type\", \"string\") if arg_type == \"string\": arg_type", "- sparse genomic analysis of individual nuclei by ' \\", "= args.verbose config.config_file = args.config config.dry_run = args.dry_run config.force =", "(group_name, group) group_schema = group[\"schema\"] for arg_name, arg_spec in group_schema.items():", "parser.add_argument( \"--sge\", dest=\"sge\", action=\"store_true\", help=\"parallelilizes commands using SGE cluster manager\",", "if arg_value is not None: group_result[arg_name] = arg_value else: config_value", "= os.getcwd() if config is not None: work_dirname = config.work_dirname", "\"process\": { \"config_groups\": [ \"aligner\", \"genome\", \"reads\", \"mapping\", \"bins\", \"varbin\",", "}, \"process\": { \"config_groups\": [ \"aligner\", \"genome\", \"reads\", \"mapping\", \"bins\",", "from sgains.pipelines.extract_10x_pipeline import Extract10xPipeline from sgains.pipelines.varbin_10x_pipeline import Varbin10xPipeline from sgains.pipelines.varbin_pipeline", "type=int, default=1 ) parser.add_argument( \"--sge\", dest=\"sge\", action=\"store_true\", help=\"parallelilizes commands using", "arg_name in group_schema.keys(): arg_value = getattr(args, arg_name, None) if arg_value", "is not None: config_value = config_value.get(arg_name, None) if config_value is", "\\ 'sgains - sparse genomic analysis of individual nuclei by", "with open(args.config, \"r\") as infile: config_dict = yaml.safe_load(infile) work_dirname =", "RawDescriptionHelpFormatter, ArgumentDefaultsHelpFormatter from sgains.configuration.parser import SgainsValidator, Config from sgains.configuration.schema import", "result = defaultdict(dict) config_groups = list(validator.schema.keys()) for group_name in config_groups:", "return Extract10xPipeline(config) elif command == \"varbin_10x\": return Varbin10xPipeline(config) elif command", "GenomeIndexPipeline(config), MappableRegionsPipeline(config), BinsPipeline(config), ] return CompositePipeline(config, pipelines) elif command ==", "functools from collections import defaultdict import yaml from argparse import", "arg_spec in group_schema.items(): name = f\"--{arg_name.replace('_', '-')}\" arg_type = str", "result def build_cli_options(argparser, command=None, config=None, sge_flag=False): work_dirname = os.getcwd() if", "\"config_groups\": [\"bins\", \"mapping\", \"varbin\", \"sge\"], \"help\": \"applies varbin algorithm to", "\"applies varbin algorithm to count read mappings in each bin", "None: arg_default = arg_spec.get(\"default\") group_parser.add_argument( name, help=help_data, dest=arg_name, type=arg_type, default=arg_default)", "= defaultdict(dict) work_dirname = os.getcwd() if args.config is not None:", "args.config is not None: assert os.path.exists(args.config), args.config with open(args.config, \"r\")", "group_schema = group.get(\"schema\") if group_schema is None: continue group_result =", "import BinsPipeline from sgains.pipelines.mapping_pipeline import MappingPipeline from sgains.pipelines.extract_10x_pipeline import Extract10xPipeline", "+ \"\\n\") sys.stderr.write(indent + \" for help use --help\") sys.stderr.write('\\n')", "repr(e) + \"\\n\") sys.stderr.write(indent + \" for help use --help\")", "read length\", }, \"prepare\": { \"config_groups\": [ \"aligner\", \"genome\", \"mappable_regions\",", "group:\") assert group[\"type\"] == \"dict\", (group_name, group) group_schema = group[\"schema\"]", "\"--parallel\", \"-p\", dest=\"parallel\", help=\"number of task to run in parallel\",", "BinsPipeline from sgains.pipelines.mapping_pipeline import MappingPipeline from sgains.pipelines.extract_10x_pipeline import Extract10xPipeline from", "sge_flag: continue group = validator.schema.get(group_name) group_parser = argparser.add_argument_group(f\"{group_name} group:\") assert", "help use --help\") sys.stderr.write('\\n') return 2 def create_pipeline(command, config): if", "if group_name == \"sge\" and not args.sge: continue group =", "CompositePipeline(config, pipelines) elif command == \"process\": pipelines = [ MappingPipeline(config),", "\"-v\", \"--verbose\", dest=\"verbose\", action=\"count\", help=\"set verbosity level [default: %(default)s]\", default=0", "os.path.dirname(args.config) validator = SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) result = defaultdict(dict) config_groups", "validator.schema.get(group_name) group_parser = argparser.add_argument_group(f\"{group_name} group:\") assert group[\"type\"] == \"dict\", (group_name,", "is None: continue group_result = {} for arg_name in group_schema.keys():", "None) if arg_value is not None: group_result[arg_name] = arg_value else:", "Genomics datasets\", }, \"varbin\": { \"config_groups\": [\"bins\", \"mapping\", \"varbin\", \"sge\"],", "= args.config config.dry_run = args.dry_run config.force = args.force config.parallel =", "cells reads to the reference genome\", }, \"extract_10x\": { \"config_groups\":", "\"reads\", \"mapping\", \"sge\"], \"help\": \"performs mapping of cells reads to", "= args.parallel config.sge = args.sge return config def main(argv=sys.argv[1:]): program_name", "[\"bins\", \"varbin\", \"scclust\"], \"help\": \"segmentation and clustering based bin counts", "\"applies varbin algorithm to count read mappings in each bin\",", "config_groups = command[\"config_groups\"] for group_name in config_groups: if group_name ==", "sparse genomic analysis of individual nuclei by ' \\ 'sequencing", "args.dry_run config.force = args.force config.parallel = args.parallel config.sge = args.sge", "ArgumentParser( description=program_description, formatter_class=ArgumentDefaultsHelpFormatter) build_common_options(argparser) subparsers = argparser.add_subparsers( title=\"sGAINS subcommands\" )", "[ \"data_10x\", \"reads\", \"sge\"], \"help\": \"extracts cells reads from 10x", "= str arg_type = arg_spec.get(\"type\", \"string\") if arg_type == \"string\":", "= {} for arg_name in group_schema.keys(): arg_value = getattr(args, arg_name,", "command_help = SGAINS_COMMANDS[command].get(\"help\", \"\") subparser = subparsers.add_parser( name=command_name, help=command_help, formatter_class=ArgumentDefaultsHelpFormatter", "in SGAINS_COMMANDS command = SGAINS_COMMANDS[command] config_groups = command[\"config_groups\"] for group_name", ") parser.add_argument( \"-c\", \"--config\", dest=\"config\", help=\"configuration file\", metavar=\"path\" ) parser.add_argument(", "genomic analysis of individual nuclei by ' \\ 'sequencing pipeline'", "\"--dry-run\", dest=\"dry_run\", action=\"store_true\", help=\"perform a trial run with no changes", "arg_type = float elif arg_type == \"list\": arg_type = list", "from sgains.pipelines.mappableregions_pipeline import MappableRegionsPipeline from sgains.pipelines.genomeindex_pipeline import GenomeIndexPipeline from sgains.pipelines.bins_pipeline", "\"-F\", dest=\"force\", action=\"store_true\", help=\"allows overwriting nonempty results directory\", default=False )", "group_schema.keys(): arg_value = getattr(args, arg_name, None) if arg_value is not", "is None: config_groups = list(validator.schema.keys()) else: assert command in SGAINS_COMMANDS", "hisat2 or bowtie index for the \" \"reference genome\", },", "sgains.pipelines.composite_pipeline import CompositePipeline SGAINS_COMMANDS = { \"genomeindex\": { \"config_groups\": [\"aligner\",", "\"scclust\", \"sge\"], \"help\": \"combines all process steps ('mapping', 'varbin' \"", "not sge_flag: continue group = validator.schema.get(group_name) group_parser = argparser.add_argument_group(f\"{group_name} group:\")", "for specified bins count \" \"and read length\", }, \"prepare\":", "config.sge = args.sge return config def main(argv=sys.argv[1:]): program_name = os.path.basename(sys.argv[0])", "= parse_cli_options(args) pipeline = create_pipeline(command, config) assert pipeline is not", "\"config_groups\": [\"genome\", \"mappable_regions\", \"bins\", \"sge\"], \"help\": \"calculates all bins boundaries", "formatter_class=ArgumentDefaultsHelpFormatter) build_common_options(argparser) subparsers = argparser.add_subparsers( title=\"sGAINS subcommands\" ) for command", "import sys from copy import deepcopy import traceback import functools", "= os.getcwd() if args.config is not None: assert os.path.exists(args.config), args.config", "\"\") subparser = subparsers.add_parser( name=command_name, help=command_help, formatter_class=ArgumentDefaultsHelpFormatter ) build_cli_options(subparser, command,", "varbin algorithm to count read mappings in each bin \"", "data\" }, \"process\": { \"config_groups\": [ \"aligner\", \"genome\", \"reads\", \"mapping\",", "{ \"config_groups\": [ \"data_10x\", \"reads\", \"sge\"], \"help\": \"extracts cells reads", "trial run with no changes made\", default=False ) parser.add_argument( \"--force\",", "\"bins\", \"sge\"], \"help\": \"calculates all bins boundaries for specified bins", "elif arg_type == \"list\": arg_type = list else: raise ValueError(f\"wrong", "work_dirname=work_dirname) result = defaultdict(dict) config_groups = list(validator.schema.keys()) for group_name in", "}, \"mapping\": { \"config_groups\": [\"aligner\", \"genome\", \"reads\", \"mapping\", \"sge\"], \"help\":", "work_dirname = os.getcwd() if args.config is not None: assert os.path.exists(args.config),", "help=\"number of task to run in parallel\", type=int, default=1 )", "None: group_result[arg_name] = config_value if group_result: result[group_name] = group_result config", "}, \"mappable_regions\": { \"config_groups\": [\"aligner\", \"genome\", \"mappable_regions\", \"sge\"], \"help\": \"finds", "arg_type == \"string\": arg_type = str elif arg_type == \"integer\":", "\"config_groups\": [ \"data_10x\", \"bins\", \"varbin\", \"sge\"], \"help\": \"applies varbin algorithm", "= defaultdict(dict) config_groups = list(validator.schema.keys()) for group_name in config_groups: if", "import traceback import functools from collections import defaultdict import yaml", "VarbinPipeline from sgains.pipelines.r_pipeline import Rpipeline from sgains.pipelines.composite_pipeline import CompositePipeline SGAINS_COMMANDS", "\"-\") command_help = SGAINS_COMMANDS[command].get(\"help\", \"\") subparser = subparsers.add_parser( name=command_name, help=command_help,", "algorithm to count read mappings in each bin \" \"to", "\"genome\"], \"help\": \"builds appropriate hisat2 or bowtie index for the", "\"data_10x\", \"reads\", \"sge\"], \"help\": \"extracts cells reads from 10x Genomics", "Extract10xPipeline(config) elif command == \"varbin_10x\": return Varbin10xPipeline(config) elif command ==", "as infile: config_dict = yaml.safe_load(infile) work_dirname = os.path.dirname(args.config) validator =", "config_groups = list(validator.schema.keys()) else: assert command in SGAINS_COMMANDS command =", "def build_cli_options(argparser, command=None, config=None, sge_flag=False): work_dirname = os.getcwd() if config", "sgains.pipelines.varbin_10x_pipeline import Varbin10xPipeline from sgains.pipelines.varbin_pipeline import VarbinPipeline from sgains.pipelines.r_pipeline import", "None: config_value = config_value.get(arg_name, None) if config_value is not None:", "args.sge return config def main(argv=sys.argv[1:]): program_name = os.path.basename(sys.argv[0]) program_shortdesc =", "dest=\"dry_run\", action=\"store_true\", help=\"perform a trial run with no changes made\",", "group = validator.schema.get(group_name) group_schema = group.get(\"schema\") if group_schema is None:", "= None meta_data = arg_spec.get(\"meta\") if meta_data is not None:", "traceback import functools from collections import defaultdict import yaml from", "] return CompositePipeline(config, pipelines) elif command == \"process\": pipelines =", "name): if config is None: return None group = config.config.get(group_name)", "argparser def parse_cli_options(args): config_dict = defaultdict(dict) work_dirname = os.getcwd() if", "'scclust') into single command\" }, } def build_common_options(parser): parser.add_argument( \"-v\",", "pipeline = create_pipeline(command, config) assert pipeline is not None, command", "def main(argv=sys.argv[1:]): program_name = os.path.basename(sys.argv[0]) program_shortdesc = \\ 'sgains -", "\"varbin\", \"scclust\", \"sge\"], \"help\": \"combines all process steps ('mapping', 'varbin'", "if group_name == \"sge\" and not sge_flag: continue group =", "arg_type == \"float\": arg_type = float elif arg_type == \"list\":", "\"integer\": arg_type = int elif arg_type == \"float\": arg_type =", "\"aligner\", \"genome\", \"mappable_regions\", \"bins\", \"sge\"], \"help\": \"combines all preparation steps", "\"prepare\": pipelines = [ GenomeIndexPipeline(config), MappableRegionsPipeline(config), BinsPipeline(config), ] return CompositePipeline(config,", "pipelines = [ MappingPipeline(config), VarbinPipeline(config), Rpipeline(config), ] return CompositePipeline(config, pipelines)", "sgains.pipelines.r_pipeline import Rpipeline from sgains.pipelines.composite_pipeline import CompositePipeline SGAINS_COMMANDS = {", "None: assert os.path.exists(args.config), args.config with open(args.config, \"r\") as infile: config_dict", "sgains.configuration.schema import sgains_schema from sgains.executor import Executor from sgains.pipelines.mappableregions_pipeline import", "== \"varbin\": return VarbinPipeline(config) elif command == \"scclust\": return Rpipeline(config)", "\"help\": \"segmentation and clustering based bin counts and \" \"preparation", "MappingPipeline(config), VarbinPipeline(config), Rpipeline(config), ] return CompositePipeline(config, pipelines) raise ValueError(f\"Unexpected command:", "appropriate hisat2 or bowtie index for the \" \"reference genome\",", "== \"prepare\": pipelines = [ GenomeIndexPipeline(config), MappableRegionsPipeline(config), BinsPipeline(config), ] return", "action=\"store_true\", help=\"parallelilizes commands using SGE cluster manager\", default=False ) def", "from copy import deepcopy import traceback import functools from collections", "preparation steps ('genome', 'mappable-regions' \" \"and 'bins') into single command\",", "\"-p\", dest=\"parallel\", help=\"number of task to run in parallel\", type=int,", "if command is None: config_groups = list(validator.schema.keys()) else: assert command", "to run in parallel\", type=int, default=1 ) parser.add_argument( \"--sge\", dest=\"sge\",", "command == \"process\": pipelines = [ MappingPipeline(config), VarbinPipeline(config), Rpipeline(config), ]", "sgains.pipelines.mappableregions_pipeline import MappableRegionsPipeline from sgains.pipelines.genomeindex_pipeline import GenomeIndexPipeline from sgains.pipelines.bins_pipeline import", "default=0 ) parser.add_argument( \"-c\", \"--config\", dest=\"config\", help=\"configuration file\", metavar=\"path\" )", "if group is None: return None result = getattr(group, name)", "\"scclust\": { \"config_groups\": [\"bins\", \"varbin\", \"scclust\"], \"help\": \"segmentation and clustering", "None: group_result[arg_name] = arg_value else: config_value = config_dict.get(group_name, None) if", "in config_groups: if group_name == \"sge\" and not args.sge: continue", "arg_default = arg_spec.get(\"default\") group_parser.add_argument( name, help=help_data, dest=arg_name, type=arg_type, default=arg_default) return", "\"calculates all bins boundaries for specified bins count \" \"and", "{ \"config_groups\": [ \"aligner\", \"genome\", \"reads\", \"mapping\", \"bins\", \"varbin\", \"scclust\",", "config def main(argv=sys.argv[1:]): program_name = os.path.basename(sys.argv[0]) program_shortdesc = \\ 'sgains", "argparser.add_subparsers( title=\"sGAINS subcommands\" ) for command in SGAINS_COMMANDS: command_name =", "= '''%s USAGE ''' % (program_shortdesc, ) try: config =", "config.force = args.force config.parallel = args.parallel config.sge = args.sge return", "\" \"and read length\", }, \"prepare\": { \"config_groups\": [ \"aligner\",", "in each bin\", }, \"varbin_10x\": { \"config_groups\": [ \"data_10x\", \"bins\",", "title=\"sGAINS subcommands\" ) for command in SGAINS_COMMANDS: command_name = command.replace(\"_\",", "}, \"prepare\": { \"config_groups\": [ \"aligner\", \"genome\", \"mappable_regions\", \"bins\", \"sge\"],", "from sgains.pipelines.r_pipeline import Rpipeline from sgains.pipelines.composite_pipeline import CompositePipeline SGAINS_COMMANDS =", "subparsers = argparser.add_subparsers( title=\"sGAINS subcommands\" ) for command in SGAINS_COMMANDS:", "= os.path.dirname(args.config) validator = SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) result = defaultdict(dict)", "% (program_shortdesc, ) try: config = Config.parse_argv(argv) sge_flag = Config.check_sge_argv(argv)", "\" for help use --help\") sys.stderr.write('\\n') return 2 def create_pipeline(command,", "regions in specified genome\", }, \"bins\": { \"config_groups\": [\"genome\", \"mappable_regions\",", "arg_type = list else: raise ValueError(f\"wrong argument type {arg_type}\") help_data", "arg_name) if arg_default is None: arg_default = arg_spec.get(\"default\") group_parser.add_argument( name,", "group_schema = group[\"schema\"] for arg_name, arg_spec in group_schema.items(): name =", "bin counts and \" \"preparation of the SCGV input data\"", "and \" \"preparation of the SCGV input data\" }, \"process\":", "'varbin' \" \"and 'scclust') into single command\" }, } def", "to count read mappings in each bin \" \"to 10x", "analysis of individual nuclei by ' \\ 'sequencing pipeline' program_description", "formatter_class=ArgumentDefaultsHelpFormatter ) build_cli_options(subparser, command, config, sge_flag=sge_flag) subparser.set_defaults(func=functools.partial(execute, command)) args =", "and clustering based bin counts and \" \"preparation of the", "cluster manager\", default=False ) def _get_config_value(config, group_name, name): if config", "nuclei by ' \\ 'sequencing pipeline' program_description = '''%s USAGE", "return MappableRegionsPipeline(config) elif command == \"bins\": return BinsPipeline(config) elif command", "sgains_schema from sgains.executor import Executor from sgains.pipelines.mappableregions_pipeline import MappableRegionsPipeline from", "\"mappable_regions\": { \"config_groups\": [\"aligner\", \"genome\", \"mappable_regions\", \"sge\"], \"help\": \"finds all", "args = argparser.parse_args(argv) args.func(args) except KeyboardInterrupt: traceback.print_exc() return 0 except", "return 2 def create_pipeline(command, config): if command == \"genomeindex\": return", "f\"--{arg_name.replace('_', '-')}\" arg_type = str arg_type = arg_spec.get(\"type\", \"string\") if", "{arg_type}\") help_data = None meta_data = arg_spec.get(\"meta\") if meta_data is", "= arg_spec.get(\"meta\") if meta_data is not None: help_data = meta_data.get(\"help\")", "pipeline' program_description = '''%s USAGE ''' % (program_shortdesc, ) try:", "= arg_spec.get(\"default\") group_parser.add_argument( name, help=help_data, dest=arg_name, type=arg_type, default=arg_default) return argparser", "\"combines all process steps ('mapping', 'varbin' \" \"and 'scclust') into", "= config.work_dirname validator = SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) if command is", "group = validator.schema.get(group_name) group_parser = argparser.add_argument_group(f\"{group_name} group:\") assert group[\"type\"] ==", "sys.stderr.write('\\n') return 2 def create_pipeline(command, config): if command == \"genomeindex\":", "count read mappings in each bin\", }, \"varbin_10x\": { \"config_groups\":", "config.dry_run = args.dry_run config.force = args.force config.parallel = args.parallel config.sge", "defaultdict(dict) config_groups = list(validator.schema.keys()) for group_name in config_groups: if group_name", "None: config_groups = list(validator.schema.keys()) else: assert command in SGAINS_COMMANDS command", "in group_schema.keys(): arg_value = getattr(args, arg_name, None) if arg_value is", "the SCGV input data\" }, \"process\": { \"config_groups\": [ \"aligner\",", "program_description = '''%s USAGE ''' % (program_shortdesc, ) try: config" ]
[ "512, 3, pad=1, stride=1) model.Relu('conv4_3', 'conv4_3') model.MaxPool('conv4_3', 'pool4', kernel=2, pad=0,", "512, 3, pad=1, stride=1) model.Relu('conv5_2', 'conv5_2') model.Conv('conv5_2', 'conv5_3', 512, 512,", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "3, pad=1, stride=1) model.Relu('conv3_2', 'conv3_2') model.Conv('conv3_2', 'conv3_3', 256, 256, 3,", "pad=1, stride=1) model.Relu('conv4_2', 'conv4_2') model.Conv('conv4_2', 'conv4_3', 512, 512, 3, pad=1,", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "language governing permissions and # limitations under the License. ##############################################################################", "kernel=2, pad=0, stride=2) model.StopGradient('pool2', 'pool2') model.Conv('pool2', 'conv3_1', 128, 256, 3,", "model.MaxPool('conv3_3', 'pool3', kernel=2, pad=0, stride=2) model.Conv('pool3', 'conv4_1', 256, 512, 3,", "stride=1) model.Relu('conv4_1', 'conv4_1') model.Conv('conv4_1', 'conv4_2', 512, 512, 3, pad=1, stride=1)", "'conv4_2', 512, 512, 3, pad=1, stride=1) model.Relu('conv4_2', 'conv4_2') model.Conv('conv4_2', 'conv4_3',", "stride=1) model.Relu('conv4_2', 'conv4_2') model.Conv('conv4_2', 'conv4_3', 512, 512, 3, pad=1, stride=1)", "stride=1) model.Relu('conv4_3', 'conv4_3') model.MaxPool('conv4_3', 'pool4', kernel=2, pad=0, stride=2) model.Conv('pool4', 'conv5_1',", "model.Relu('conv5_1', 'conv5_1') model.Conv('conv5_1', 'conv5_2', 512, 512, 3, pad=1, stride=1) model.Relu('conv5_2',", "from __future__ import division from __future__ import print_function from __future__", "resolution=7, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale ) model.FC('pool5', 'fc6', dim_in * 7 *", "distributed under the License is distributed on an \"AS IS\"", "model.Relu('conv3_2', 'conv3_2') model.Conv('conv3_2', 'conv3_3', 256, 256, 3, pad=1, stride=1) model.Relu('conv3_3',", "'pool4', kernel=2, pad=0, stride=2) model.Conv('pool4', 'conv5_1', 512, 512, 3, pad=1,", "absolute_import from __future__ import division from __future__ import print_function from", "spatial_scale): model.RoIFeatureTransform( blob_in, 'pool5', blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=7, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale )", "4096) model.Relu('fc6', 'fc6') model.FC('fc6', 'fc7', 4096, 4096) blob_out = model.Relu('fc7',", "# Copyright (c) 2017-present, Facebook, Inc. # # Licensed under", "model.Relu('conv4_1', 'conv4_1') model.Conv('conv4_1', 'conv4_2', 512, 512, 3, pad=1, stride=1) model.Relu('conv4_2',", "model.FC('pool5', 'fc6', dim_in * 7 * 7, 4096) model.Relu('fc6', 'fc6')", "= model.Relu('conv5_3', 'conv5_3') return blob_out, 512, 1. / 16. def", "512, 512, 3, pad=1, stride=1) model.Relu('conv4_3', 'conv4_3') model.MaxPool('conv4_3', 'pool4', kernel=2,", "the specific language governing permissions and # limitations under the", "from __future__ import print_function from __future__ import unicode_literals from core.config", "3, pad=1, stride=1) model.Relu('conv3_3', 'conv3_3') model.MaxPool('conv3_3', 'pool3', kernel=2, pad=0, stride=2)", "pad=1, stride=1) blob_out = model.Relu('conv5_3', 'conv5_3') return blob_out, 512, 1.", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "1. / 16. def add_VGG16_roi_fc_head(model, blob_in, dim_in, spatial_scale): model.RoIFeatureTransform( blob_in,", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "from __future__ import unicode_literals from core.config import cfg def add_VGG16_conv5_body(model):", "'conv1_1') model.Conv('conv1_1', 'conv1_2', 64, 64, 3, pad=1, stride=1) model.Relu('conv1_2', 'conv1_2')", "except in compliance with the License. # You may obtain", "core.config import cfg def add_VGG16_conv5_body(model): model.Conv('data', 'conv1_1', 3, 64, 3,", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Copyright (c) 2017-present, Facebook, Inc. # # Licensed under the", "3, pad=1, stride=1) model.Relu('conv1_1', 'conv1_1') model.Conv('conv1_1', 'conv1_2', 64, 64, 3,", "model.MaxPool('conv2_2', 'pool2', kernel=2, pad=0, stride=2) model.StopGradient('pool2', 'pool2') model.Conv('pool2', 'conv3_1', 128,", "'conv3_3', 256, 256, 3, pad=1, stride=1) model.Relu('conv3_3', 'conv3_3') model.MaxPool('conv3_3', 'pool3',", "blob_in, dim_in, spatial_scale): model.RoIFeatureTransform( blob_in, 'pool5', blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=7, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO,", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "def add_VGG16_conv5_body(model): model.Conv('data', 'conv1_1', 3, 64, 3, pad=1, stride=1) model.Relu('conv1_1',", "pad=1, stride=1) model.Relu('conv5_2', 'conv5_2') model.Conv('conv5_2', 'conv5_3', 512, 512, 3, pad=1,", "'conv5_3') return blob_out, 512, 1. / 16. def add_VGG16_roi_fc_head(model, blob_in,", "not use this file except in compliance with the License.", "'fc7', 4096, 4096) blob_out = model.Relu('fc7', 'fc7') return blob_out, 4096", "pad=1, stride=1) model.Relu('conv5_1', 'conv5_1') model.Conv('conv5_1', 'conv5_2', 512, 512, 3, pad=1,", "'conv1_1', 3, 64, 3, pad=1, stride=1) model.Relu('conv1_1', 'conv1_1') model.Conv('conv1_1', 'conv1_2',", "'conv2_1', 64, 128, 3, pad=1, stride=1) model.Relu('conv2_1', 'conv2_1') model.Conv('conv2_1', 'conv2_2',", "return blob_out, 512, 1. / 16. def add_VGG16_roi_fc_head(model, blob_in, dim_in,", "import unicode_literals from core.config import cfg def add_VGG16_conv5_body(model): model.Conv('data', 'conv1_1',", "writing, software # distributed under the License is distributed on", "stride=1) model.Relu('conv2_2', 'conv2_2') model.MaxPool('conv2_2', 'pool2', kernel=2, pad=0, stride=2) model.StopGradient('pool2', 'pool2')", "model.Conv('conv4_1', 'conv4_2', 512, 512, 3, pad=1, stride=1) model.Relu('conv4_2', 'conv4_2') model.Conv('conv4_2',", "in writing, software # distributed under the License is distributed", "import cfg def add_VGG16_conv5_body(model): model.Conv('data', 'conv1_1', 3, 64, 3, pad=1,", "model.Conv('pool3', 'conv4_1', 256, 512, 3, pad=1, stride=1) model.Relu('conv4_1', 'conv4_1') model.Conv('conv4_1',", "3, pad=1, stride=1) model.Relu('conv4_3', 'conv4_3') model.MaxPool('conv4_3', 'pool4', kernel=2, pad=0, stride=2)", "model.Conv('pool1', 'conv2_1', 64, 128, 3, pad=1, stride=1) model.Relu('conv2_1', 'conv2_1') model.Conv('conv2_1',", "you may not use this file except in compliance with", "'pool1', kernel=2, pad=0, stride=2) model.Conv('pool1', 'conv2_1', 64, 128, 3, pad=1,", "blob_out = model.Relu('conv5_3', 'conv5_3') return blob_out, 512, 1. / 16.", "'conv4_1') model.Conv('conv4_1', 'conv4_2', 512, 512, 3, pad=1, stride=1) model.Relu('conv4_2', 'conv4_2')", "3, pad=1, stride=1) model.Relu('conv4_2', 'conv4_2') model.Conv('conv4_2', 'conv4_3', 512, 512, 3,", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "'conv4_1', 256, 512, 3, pad=1, stride=1) model.Relu('conv4_1', 'conv4_1') model.Conv('conv4_1', 'conv4_2',", "Facebook, Inc. # # Licensed under the Apache License, Version", "256, 512, 3, pad=1, stride=1) model.Relu('conv4_1', 'conv4_1') model.Conv('conv4_1', 'conv4_2', 512,", "3, pad=1, stride=1) model.Relu('conv2_2', 'conv2_2') model.MaxPool('conv2_2', 'pool2', kernel=2, pad=0, stride=2)", "pad=0, stride=2) model.Conv('pool4', 'conv5_1', 512, 512, 3, pad=1, stride=1) model.Relu('conv5_1',", "512, 3, pad=1, stride=1) model.Relu('conv4_2', 'conv4_2') model.Conv('conv4_2', 'conv4_3', 512, 512,", "64, 64, 3, pad=1, stride=1) model.Relu('conv1_2', 'conv1_2') model.MaxPool('conv1_2', 'pool1', kernel=2,", "spatial_scale=spatial_scale ) model.FC('pool5', 'fc6', dim_in * 7 * 7, 4096)", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "'conv2_1') model.Conv('conv2_1', 'conv2_2', 128, 128, 3, pad=1, stride=1) model.Relu('conv2_2', 'conv2_2')", "3, pad=1, stride=1) model.Relu('conv4_1', 'conv4_1') model.Conv('conv4_1', 'conv4_2', 512, 512, 3,", "64, 3, pad=1, stride=1) model.Relu('conv1_1', 'conv1_1') model.Conv('conv1_1', 'conv1_2', 64, 64,", "64, 3, pad=1, stride=1) model.Relu('conv1_2', 'conv1_2') model.MaxPool('conv1_2', 'pool1', kernel=2, pad=0,", "kernel=2, pad=0, stride=2) model.Conv('pool1', 'conv2_1', 64, 128, 3, pad=1, stride=1)", "stride=2) model.StopGradient('pool2', 'pool2') model.Conv('pool2', 'conv3_1', 128, 256, 3, pad=1, stride=1)", "model.Conv('conv4_2', 'conv4_3', 512, 512, 3, pad=1, stride=1) model.Relu('conv4_3', 'conv4_3') model.MaxPool('conv4_3',", "model.Conv('conv5_2', 'conv5_3', 512, 512, 3, pad=1, stride=1) blob_out = model.Relu('conv5_3',", "CONDITIONS OF ANY KIND, either express or implied. # See", "model.Relu('conv1_2', 'conv1_2') model.MaxPool('conv1_2', 'pool1', kernel=2, pad=0, stride=2) model.Conv('pool1', 'conv2_1', 64,", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "https://arxiv.org/abs/1409.1556.\"\"\" from __future__ import absolute_import from __future__ import division from", "model.Relu('conv2_1', 'conv2_1') model.Conv('conv2_1', 'conv2_2', 128, 128, 3, pad=1, stride=1) model.Relu('conv2_2',", "128, 3, pad=1, stride=1) model.Relu('conv2_2', 'conv2_2') model.MaxPool('conv2_2', 'pool2', kernel=2, pad=0,", "stride=1) model.Relu('conv3_1', 'conv3_1') model.Conv('conv3_1', 'conv3_2', 256, 256, 3, pad=1, stride=1)", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "'conv3_3') model.MaxPool('conv3_3', 'pool3', kernel=2, pad=0, stride=2) model.Conv('pool3', 'conv4_1', 256, 512,", "__future__ import division from __future__ import print_function from __future__ import", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "3, pad=1, stride=1) model.Relu('conv2_1', 'conv2_1') model.Conv('conv2_1', 'conv2_2', 128, 128, 3,", "# You may obtain a copy of the License at", "'conv3_1', 128, 256, 3, pad=1, stride=1) model.Relu('conv3_1', 'conv3_1') model.Conv('conv3_1', 'conv3_2',", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "'conv4_3', 512, 512, 3, pad=1, stride=1) model.Relu('conv4_3', 'conv4_3') model.MaxPool('conv4_3', 'pool4',", "and # limitations under the License. ############################################################################## \"\"\"VGG16 from https://arxiv.org/abs/1409.1556.\"\"\"", "stride=2) model.Conv('pool3', 'conv4_1', 256, 512, 3, pad=1, stride=1) model.Relu('conv4_1', 'conv4_1')", "stride=2) model.Conv('pool4', 'conv5_1', 512, 512, 3, pad=1, stride=1) model.Relu('conv5_1', 'conv5_1')", "512, 512, 3, pad=1, stride=1) model.Relu('conv5_1', 'conv5_1') model.Conv('conv5_1', 'conv5_2', 512,", "under the License is distributed on an \"AS IS\" BASIS,", "pad=1, stride=1) model.Relu('conv1_2', 'conv1_2') model.MaxPool('conv1_2', 'pool1', kernel=2, pad=0, stride=2) model.Conv('pool1',", "'fc6') model.FC('fc6', 'fc7', 4096, 4096) blob_out = model.Relu('fc7', 'fc7') return", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License for the specific language governing permissions and # limitations", "512, 3, pad=1, stride=1) model.Relu('conv4_1', 'conv4_1') model.Conv('conv4_1', 'conv4_2', 512, 512,", "sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale ) model.FC('pool5', 'fc6', dim_in * 7 * 7,", "model.Conv('conv3_1', 'conv3_2', 256, 256, 3, pad=1, stride=1) model.Relu('conv3_2', 'conv3_2') model.Conv('conv3_2',", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "limitations under the License. ############################################################################## \"\"\"VGG16 from https://arxiv.org/abs/1409.1556.\"\"\" from __future__", "'pool2', kernel=2, pad=0, stride=2) model.StopGradient('pool2', 'pool2') model.Conv('pool2', 'conv3_1', 128, 256,", "'conv3_1') model.Conv('conv3_1', 'conv3_2', 256, 256, 3, pad=1, stride=1) model.Relu('conv3_2', 'conv3_2')", "model.Relu('conv3_3', 'conv3_3') model.MaxPool('conv3_3', 'pool3', kernel=2, pad=0, stride=2) model.Conv('pool3', 'conv4_1', 256,", "############################################################################## \"\"\"VGG16 from https://arxiv.org/abs/1409.1556.\"\"\" from __future__ import absolute_import from __future__", "512, 512, 3, pad=1, stride=1) blob_out = model.Relu('conv5_3', 'conv5_3') return", "permissions and # limitations under the License. ############################################################################## \"\"\"VGG16 from", "dim_in, spatial_scale): model.RoIFeatureTransform( blob_in, 'pool5', blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=7, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale", "the License for the specific language governing permissions and #", "model.Conv('data', 'conv1_1', 3, 64, 3, pad=1, stride=1) model.Relu('conv1_1', 'conv1_1') model.Conv('conv1_1',", "blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=7, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale ) model.FC('pool5', 'fc6', dim_in *", "model.FC('fc6', 'fc7', 4096, 4096) blob_out = model.Relu('fc7', 'fc7') return blob_out,", "'pool3', kernel=2, pad=0, stride=2) model.Conv('pool3', 'conv4_1', 256, 512, 3, pad=1,", "(the \"License\"); # you may not use this file except", "'conv2_2') model.MaxPool('conv2_2', 'pool2', kernel=2, pad=0, stride=2) model.StopGradient('pool2', 'pool2') model.Conv('pool2', 'conv3_1',", "model.Conv('conv3_2', 'conv3_3', 256, 256, 3, pad=1, stride=1) model.Relu('conv3_3', 'conv3_3') model.MaxPool('conv3_3',", "Apache License, Version 2.0 (the \"License\"); # you may not", "'conv5_2') model.Conv('conv5_2', 'conv5_3', 512, 512, 3, pad=1, stride=1) blob_out =", "# you may not use this file except in compliance", "either express or implied. # See the License for the", "OR CONDITIONS OF ANY KIND, either express or implied. #", "stride=1) model.Relu('conv1_1', 'conv1_1') model.Conv('conv1_1', 'conv1_2', 64, 64, 3, pad=1, stride=1)", "from https://arxiv.org/abs/1409.1556.\"\"\" from __future__ import absolute_import from __future__ import division", "16. def add_VGG16_roi_fc_head(model, blob_in, dim_in, spatial_scale): model.RoIFeatureTransform( blob_in, 'pool5', blob_rois='rois',", "2017-present, Facebook, Inc. # # Licensed under the Apache License,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "print_function from __future__ import unicode_literals from core.config import cfg def", "* 7 * 7, 4096) model.Relu('fc6', 'fc6') model.FC('fc6', 'fc7', 4096,", "the License is distributed on an \"AS IS\" BASIS, #", "from __future__ import absolute_import from __future__ import division from __future__", "stride=1) model.Relu('conv5_2', 'conv5_2') model.Conv('conv5_2', 'conv5_3', 512, 512, 3, pad=1, stride=1)", "* 7, 4096) model.Relu('fc6', 'fc6') model.FC('fc6', 'fc7', 4096, 4096) blob_out", "in compliance with the License. # You may obtain a", "def add_VGG16_roi_fc_head(model, blob_in, dim_in, spatial_scale): model.RoIFeatureTransform( blob_in, 'pool5', blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD,", "512, 3, pad=1, stride=1) blob_out = model.Relu('conv5_3', 'conv5_3') return blob_out,", "under the License. ############################################################################## \"\"\"VGG16 from https://arxiv.org/abs/1409.1556.\"\"\" from __future__ import", "method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=7, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale ) model.FC('pool5', 'fc6', dim_in * 7", "model.RoIFeatureTransform( blob_in, 'pool5', blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=7, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale ) model.FC('pool5',", "'conv1_2', 64, 64, 3, pad=1, stride=1) model.Relu('conv1_2', 'conv1_2') model.MaxPool('conv1_2', 'pool1',", "software # distributed under the License is distributed on an", "License. ############################################################################## \"\"\"VGG16 from https://arxiv.org/abs/1409.1556.\"\"\" from __future__ import absolute_import from", "model.MaxPool('conv1_2', 'pool1', kernel=2, pad=0, stride=2) model.Conv('pool1', 'conv2_1', 64, 128, 3,", "'conv3_2', 256, 256, 3, pad=1, stride=1) model.Relu('conv3_2', 'conv3_2') model.Conv('conv3_2', 'conv3_3',", "model.Relu('conv2_2', 'conv2_2') model.MaxPool('conv2_2', 'pool2', kernel=2, pad=0, stride=2) model.StopGradient('pool2', 'pool2') model.Conv('pool2',", "pad=0, stride=2) model.Conv('pool3', 'conv4_1', 256, 512, 3, pad=1, stride=1) model.Relu('conv4_1',", "3, 64, 3, pad=1, stride=1) model.Relu('conv1_1', 'conv1_1') model.Conv('conv1_1', 'conv1_2', 64,", "pad=1, stride=1) model.Relu('conv2_1', 'conv2_1') model.Conv('conv2_1', 'conv2_2', 128, 128, 3, pad=1,", "3, pad=1, stride=1) model.Relu('conv3_1', 'conv3_1') model.Conv('conv3_1', 'conv3_2', 256, 256, 3,", "__future__ import print_function from __future__ import unicode_literals from core.config import", "stride=1) model.Relu('conv3_3', 'conv3_3') model.MaxPool('conv3_3', 'pool3', kernel=2, pad=0, stride=2) model.Conv('pool3', 'conv4_1',", "# # Unless required by applicable law or agreed to", "model.Relu('conv5_3', 'conv5_3') return blob_out, 512, 1. / 16. def add_VGG16_roi_fc_head(model,", "stride=2) model.Conv('pool1', 'conv2_1', 64, 128, 3, pad=1, stride=1) model.Relu('conv2_1', 'conv2_1')", "model.Relu('conv1_1', 'conv1_1') model.Conv('conv1_1', 'conv1_2', 64, 64, 3, pad=1, stride=1) model.Relu('conv1_2',", "64, 128, 3, pad=1, stride=1) model.Relu('conv2_1', 'conv2_1') model.Conv('conv2_1', 'conv2_2', 128,", "pad=1, stride=1) model.Relu('conv2_2', 'conv2_2') model.MaxPool('conv2_2', 'pool2', kernel=2, pad=0, stride=2) model.StopGradient('pool2',", "'conv1_2') model.MaxPool('conv1_2', 'pool1', kernel=2, pad=0, stride=2) model.Conv('pool1', 'conv2_1', 64, 128,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "kernel=2, pad=0, stride=2) model.Conv('pool4', 'conv5_1', 512, 512, 3, pad=1, stride=1)", "unicode_literals from core.config import cfg def add_VGG16_conv5_body(model): model.Conv('data', 'conv1_1', 3,", "pad=1, stride=1) model.Relu('conv3_1', 'conv3_1') model.Conv('conv3_1', 'conv3_2', 256, 256, 3, pad=1,", "(c) 2017-present, Facebook, Inc. # # Licensed under the Apache", "pad=1, stride=1) model.Relu('conv1_1', 'conv1_1') model.Conv('conv1_1', 'conv1_2', 64, 64, 3, pad=1,", "Version 2.0 (the \"License\"); # you may not use this", "law or agreed to in writing, software # distributed under", "governing permissions and # limitations under the License. ############################################################################## \"\"\"VGG16", "7, 4096) model.Relu('fc6', 'fc6') model.FC('fc6', 'fc7', 4096, 4096) blob_out =", "512, 3, pad=1, stride=1) model.Relu('conv5_1', 'conv5_1') model.Conv('conv5_1', 'conv5_2', 512, 512,", "512, 512, 3, pad=1, stride=1) model.Relu('conv5_2', 'conv5_2') model.Conv('conv5_2', 'conv5_3', 512,", "pad=0, stride=2) model.StopGradient('pool2', 'pool2') model.Conv('pool2', 'conv3_1', 128, 256, 3, pad=1,", "stride=1) model.Relu('conv3_2', 'conv3_2') model.Conv('conv3_2', 'conv3_3', 256, 256, 3, pad=1, stride=1)", "import division from __future__ import print_function from __future__ import unicode_literals", "blob_out, 512, 1. / 16. def add_VGG16_roi_fc_head(model, blob_in, dim_in, spatial_scale):", "model.Conv('pool4', 'conv5_1', 512, 512, 3, pad=1, stride=1) model.Relu('conv5_1', 'conv5_1') model.Conv('conv5_1',", "__future__ import absolute_import from __future__ import division from __future__ import", "'conv5_1', 512, 512, 3, pad=1, stride=1) model.Relu('conv5_1', 'conv5_1') model.Conv('conv5_1', 'conv5_2',", "256, 256, 3, pad=1, stride=1) model.Relu('conv3_2', 'conv3_2') model.Conv('conv3_2', 'conv3_3', 256,", "3, pad=1, stride=1) model.Relu('conv5_1', 'conv5_1') model.Conv('conv5_1', 'conv5_2', 512, 512, 3,", "implied. # See the License for the specific language governing", "256, 3, pad=1, stride=1) model.Relu('conv3_3', 'conv3_3') model.MaxPool('conv3_3', 'pool3', kernel=2, pad=0,", "model.Relu('conv5_2', 'conv5_2') model.Conv('conv5_2', 'conv5_3', 512, 512, 3, pad=1, stride=1) blob_out", "division from __future__ import print_function from __future__ import unicode_literals from", "under the Apache License, Version 2.0 (the \"License\"); # you", "'conv4_2') model.Conv('conv4_2', 'conv4_3', 512, 512, 3, pad=1, stride=1) model.Relu('conv4_3', 'conv4_3')", "256, 3, pad=1, stride=1) model.Relu('conv3_1', 'conv3_1') model.Conv('conv3_1', 'conv3_2', 256, 256,", "\"License\"); # you may not use this file except in", "pad=1, stride=1) model.Relu('conv3_3', 'conv3_3') model.MaxPool('conv3_3', 'pool3', kernel=2, pad=0, stride=2) model.Conv('pool3',", "128, 128, 3, pad=1, stride=1) model.Relu('conv2_2', 'conv2_2') model.MaxPool('conv2_2', 'pool2', kernel=2,", "import print_function from __future__ import unicode_literals from core.config import cfg", "/ 16. def add_VGG16_roi_fc_head(model, blob_in, dim_in, spatial_scale): model.RoIFeatureTransform( blob_in, 'pool5',", "3, pad=1, stride=1) blob_out = model.Relu('conv5_3', 'conv5_3') return blob_out, 512,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "the License. ############################################################################## \"\"\"VGG16 from https://arxiv.org/abs/1409.1556.\"\"\" from __future__ import absolute_import", "stride=1) model.Relu('conv1_2', 'conv1_2') model.MaxPool('conv1_2', 'pool1', kernel=2, pad=0, stride=2) model.Conv('pool1', 'conv2_1',", "pad=1, stride=1) model.Relu('conv4_3', 'conv4_3') model.MaxPool('conv4_3', 'pool4', kernel=2, pad=0, stride=2) model.Conv('pool4',", "256, 256, 3, pad=1, stride=1) model.Relu('conv3_3', 'conv3_3') model.MaxPool('conv3_3', 'pool3', kernel=2,", "model.Relu('conv4_3', 'conv4_3') model.MaxPool('conv4_3', 'pool4', kernel=2, pad=0, stride=2) model.Conv('pool4', 'conv5_1', 512,", "512, 1. / 16. def add_VGG16_roi_fc_head(model, blob_in, dim_in, spatial_scale): model.RoIFeatureTransform(", "add_VGG16_conv5_body(model): model.Conv('data', 'conv1_1', 3, 64, 3, pad=1, stride=1) model.Relu('conv1_1', 'conv1_1')", "256, 3, pad=1, stride=1) model.Relu('conv3_2', 'conv3_2') model.Conv('conv3_2', 'conv3_3', 256, 256,", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "stride=1) model.Relu('conv5_1', 'conv5_1') model.Conv('conv5_1', 'conv5_2', 512, 512, 3, pad=1, stride=1)", "'conv5_2', 512, 512, 3, pad=1, stride=1) model.Relu('conv5_2', 'conv5_2') model.Conv('conv5_2', 'conv5_3',", "512, 512, 3, pad=1, stride=1) model.Relu('conv4_2', 'conv4_2') model.Conv('conv4_2', 'conv4_3', 512,", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "'fc6', dim_in * 7 * 7, 4096) model.Relu('fc6', 'fc6') model.FC('fc6',", "model.Conv('conv2_1', 'conv2_2', 128, 128, 3, pad=1, stride=1) model.Relu('conv2_2', 'conv2_2') model.MaxPool('conv2_2',", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "pad=1, stride=1) model.Relu('conv4_1', 'conv4_1') model.Conv('conv4_1', 'conv4_2', 512, 512, 3, pad=1,", "from core.config import cfg def add_VGG16_conv5_body(model): model.Conv('data', 'conv1_1', 3, 64,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "'conv4_3') model.MaxPool('conv4_3', 'pool4', kernel=2, pad=0, stride=2) model.Conv('pool4', 'conv5_1', 512, 512,", "to in writing, software # distributed under the License is", "# limitations under the License. ############################################################################## \"\"\"VGG16 from https://arxiv.org/abs/1409.1556.\"\"\" from", "'conv5_3', 512, 512, 3, pad=1, stride=1) blob_out = model.Relu('conv5_3', 'conv5_3')", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Inc. # # Licensed under the Apache License, Version 2.0", "# See the License for the specific language governing permissions", "model.StopGradient('pool2', 'pool2') model.Conv('pool2', 'conv3_1', 128, 256, 3, pad=1, stride=1) model.Relu('conv3_1',", "'conv2_2', 128, 128, 3, pad=1, stride=1) model.Relu('conv2_2', 'conv2_2') model.MaxPool('conv2_2', 'pool2',", "pad=1, stride=1) model.Relu('conv3_2', 'conv3_2') model.Conv('conv3_2', 'conv3_3', 256, 256, 3, pad=1,", "kernel=2, pad=0, stride=2) model.Conv('pool3', 'conv4_1', 256, 512, 3, pad=1, stride=1)", "3, pad=1, stride=1) model.Relu('conv5_2', 'conv5_2') model.Conv('conv5_2', 'conv5_3', 512, 512, 3,", "stride=1) blob_out = model.Relu('conv5_3', 'conv5_3') return blob_out, 512, 1. /", "You may obtain a copy of the License at #", "'conv5_1') model.Conv('conv5_1', 'conv5_2', 512, 512, 3, pad=1, stride=1) model.Relu('conv5_2', 'conv5_2')", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "__future__ import unicode_literals from core.config import cfg def add_VGG16_conv5_body(model): model.Conv('data',", "3, pad=1, stride=1) model.Relu('conv1_2', 'conv1_2') model.MaxPool('conv1_2', 'pool1', kernel=2, pad=0, stride=2)", "pad=0, stride=2) model.Conv('pool1', 'conv2_1', 64, 128, 3, pad=1, stride=1) model.Relu('conv2_1',", "model.Conv('conv5_1', 'conv5_2', 512, 512, 3, pad=1, stride=1) model.Relu('conv5_2', 'conv5_2') model.Conv('conv5_2',", "blob_in, 'pool5', blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=7, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale ) model.FC('pool5', 'fc6',", "required by applicable law or agreed to in writing, software", "dim_in * 7 * 7, 4096) model.Relu('fc6', 'fc6') model.FC('fc6', 'fc7',", "'conv3_2') model.Conv('conv3_2', 'conv3_3', 256, 256, 3, pad=1, stride=1) model.Relu('conv3_3', 'conv3_3')", "model.Conv('pool2', 'conv3_1', 128, 256, 3, pad=1, stride=1) model.Relu('conv3_1', 'conv3_1') model.Conv('conv3_1',", "'pool2') model.Conv('pool2', 'conv3_1', 128, 256, 3, pad=1, stride=1) model.Relu('conv3_1', 'conv3_1')", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "model.Relu('conv3_1', 'conv3_1') model.Conv('conv3_1', 'conv3_2', 256, 256, 3, pad=1, stride=1) model.Relu('conv3_2',", "add_VGG16_roi_fc_head(model, blob_in, dim_in, spatial_scale): model.RoIFeatureTransform( blob_in, 'pool5', blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=7,", "'pool5', blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=7, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale ) model.FC('pool5', 'fc6', dim_in", "with the License. # You may obtain a copy of", "model.Conv('conv1_1', 'conv1_2', 64, 64, 3, pad=1, stride=1) model.Relu('conv1_2', 'conv1_2') model.MaxPool('conv1_2',", "this file except in compliance with the License. # You", "model.MaxPool('conv4_3', 'pool4', kernel=2, pad=0, stride=2) model.Conv('pool4', 'conv5_1', 512, 512, 3,", "model.Relu('fc6', 'fc6') model.FC('fc6', 'fc7', 4096, 4096) blob_out = model.Relu('fc7', 'fc7')", "the Apache License, Version 2.0 (the \"License\"); # you may", "\"\"\"VGG16 from https://arxiv.org/abs/1409.1556.\"\"\" from __future__ import absolute_import from __future__ import", "import absolute_import from __future__ import division from __future__ import print_function", "128, 3, pad=1, stride=1) model.Relu('conv2_1', 'conv2_1') model.Conv('conv2_1', 'conv2_2', 128, 128,", "128, 256, 3, pad=1, stride=1) model.Relu('conv3_1', 'conv3_1') model.Conv('conv3_1', 'conv3_2', 256,", "model.Relu('conv4_2', 'conv4_2') model.Conv('conv4_2', 'conv4_3', 512, 512, 3, pad=1, stride=1) model.Relu('conv4_3',", "cfg def add_VGG16_conv5_body(model): model.Conv('data', 'conv1_1', 3, 64, 3, pad=1, stride=1)", ") model.FC('pool5', 'fc6', dim_in * 7 * 7, 4096) model.Relu('fc6',", "stride=1) model.Relu('conv2_1', 'conv2_1') model.Conv('conv2_1', 'conv2_2', 128, 128, 3, pad=1, stride=1)", "7 * 7, 4096) model.Relu('fc6', 'fc6') model.FC('fc6', 'fc7', 4096, 4096)" ]
[ "<4', # custom classifiers=[ \"Programming Language :: Python :: 3\",", "os import sys from shutil import rmtree from setuptools import", "as f: long_description = f.read() install_requires = [ # custom", "'build')) rmtree(os.path.join(here, 'xmind2testcase.egg-info')) # custom except OSError: pass self.status('Congratulations! Upload", "publish git tag successfully...') sys.exit() setup( name=about['__title__'], version=about['__version__'], description=about['__description__'], long_description=long_description,", "3\", \"License :: OSI Approved :: MIT License\", \"Operating System", "as f: # custom exec(f.read(), about) with io.open('README.md', encoding='utf-8') as", "[ 'xmind2testcase=xmind2testcase.cli:cli_main', ] }, cmdclass={ # python3 setup.py pypi 'pypi':", "(universal) distribution...') os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable)) self.status('Uploading the package", "tag successfully...') sys.exit() setup( name=about['__title__'], version=about['__version__'], description=about['__description__'], long_description=long_description, long_description_content_type='text/markdown', keywords=about['__keywords__'],", "'': ['README.md'], 'webtool': ['static/*', 'static/css/*', 'static/guide/*', 'templates/*', 'schema.sql'], }, install_requires=install_requires,", "}, install_requires=install_requires, extras_require={}, python_requires='>=3.0, <4', # custom classifiers=[ \"Programming Language", "os.path.abspath(os.path.dirname(__file__)) with io.open(os.path.join(here, 'xmind2testcase', '__about__.py'), encoding='utf-8') as f: # custom", "System :: OS Independent\", ], entry_points={ # custom 'console_scripts': [", "import io import os import sys from shutil import rmtree", "install_requires = [ # custom \"xmind\", \"flask\", \"arrow\", ] class", "shutil import rmtree from setuptools import setup, find_packages, Command about", "things in green color.\"\"\" print('\\033[0;32m{0}\\033[0m'.format(s)) def initialize_options(self): \"\"\" override \"\"\"", "current build artifacts...') rmtree(os.path.join(here, 'dist')) rmtree(os.path.join(here, 'build')) rmtree(os.path.join(here, 'xmind2testcase.egg-info')) #", "with io.open(os.path.join(here, 'xmind2testcase', '__about__.py'), encoding='utf-8') as f: # custom exec(f.read(),", "'static/guide/*', 'templates/*', 'schema.sql'], }, install_requires=install_requires, extras_require={}, python_requires='>=3.0, <4', # custom", "encoding='utf-8') as f: # custom exec(f.read(), about) with io.open('README.md', encoding='utf-8')", "color.\"\"\" print('\\033[0;32m{0}\\033[0m'.format(s)) def initialize_options(self): \"\"\" override \"\"\" pass def finalize_options(self):", "--tags') try: self.status('Removing current build artifacts...') rmtree(os.path.join(here, 'dist')) rmtree(os.path.join(here, 'build'))", "# custom 'console_scripts': [ 'xmind2testcase=xmind2testcase.cli:cli_main', ] }, cmdclass={ # python3", "def run(self): self.status('Building Source and Wheel (universal) distribution...') os.system('{0} setup.py", "python setup.py pypi Copied from requests_html \"\"\" user_options = []", "build artifacts...') rmtree(os.path.join(here, 'dist')) rmtree(os.path.join(here, 'build')) rmtree(os.path.join(here, 'xmind2testcase.egg-info')) # custom", "pass def finalize_options(self): \"\"\" override \"\"\" pass def run(self): self.status('Building", "run(self): self.status('Building Source and Wheel (universal) distribution...') os.system('{0} setup.py sdist", "os.system('git push --tags') try: self.status('Removing current build artifacts...') rmtree(os.path.join(here, 'dist'))", "self.status('Congratulations! Upload PyPi and publish git tag successfully...') sys.exit() setup(", "rmtree from setuptools import setup, find_packages, Command about = {}", "'schema.sql'], }, install_requires=install_requires, extras_require={}, python_requires='>=3.0, <4', # custom classifiers=[ \"Programming", "\"License :: OSI Approved :: MIT License\", \"Operating System ::", "os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable)) self.status('Uploading the package to PyPi", "find_packages, Command about = {} here = os.path.abspath(os.path.dirname(__file__)) with io.open(os.path.join(here,", "requests_html \"\"\" user_options = [] @staticmethod def status(s): \"\"\"Prints things", "Source and Wheel (universal) distribution...') os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))", "'xmind2testcase.egg-info')) # custom except OSError: pass self.status('Congratulations! Upload PyPi and", "Command about = {} here = os.path.abspath(os.path.dirname(__file__)) with io.open(os.path.join(here, 'xmind2testcase',", "] class PyPiCommand(Command): \"\"\" Build and publish this package and", "\"Operating System :: OS Independent\", ], entry_points={ # custom 'console_scripts':", "-*- coding: utf-8 -*- import io import os import sys", "['static/*', 'static/css/*', 'static/guide/*', 'templates/*', 'schema.sql'], }, install_requires=install_requires, extras_require={}, python_requires='>=3.0, <4',", "os.system('twine upload dist/*') self.status('Publishing git tags...') os.system('git tag v{0}'.format(about['__version__'])) os.system('git", "tag. Support: python setup.py pypi Copied from requests_html \"\"\" user_options", "\"\"\" pass def run(self): self.status('Building Source and Wheel (universal) distribution...')", "os.system('git tag v{0}'.format(about['__version__'])) os.system('git push --tags') try: self.status('Removing current build", "io.open('README.md', encoding='utf-8') as f: long_description = f.read() install_requires = [", "dist/*') self.status('Publishing git tags...') os.system('git tag v{0}'.format(about['__version__'])) os.system('git push --tags')", "# custom exec(f.read(), about) with io.open('README.md', encoding='utf-8') as f: long_description", "Build and publish this package and make a tag. Support:", "Support: python setup.py pypi Copied from requests_html \"\"\" user_options =", "pypi Copied from requests_html \"\"\" user_options = [] @staticmethod def", "[] @staticmethod def status(s): \"\"\"Prints things in green color.\"\"\" print('\\033[0;32m{0}\\033[0m'.format(s))", ":: 3\", \"License :: OSI Approved :: MIT License\", \"Operating", "package and make a tag. Support: python setup.py pypi Copied", "\"flask\", \"arrow\", ] class PyPiCommand(Command): \"\"\" Build and publish this", "= [ # custom \"xmind\", \"flask\", \"arrow\", ] class PyPiCommand(Command):", "f.read() install_requires = [ # custom \"xmind\", \"flask\", \"arrow\", ]", "= [] @staticmethod def status(s): \"\"\"Prints things in green color.\"\"\"", "in green color.\"\"\" print('\\033[0;32m{0}\\033[0m'.format(s)) def initialize_options(self): \"\"\" override \"\"\" pass", "distribution...') os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable)) self.status('Uploading the package to", "# custom except OSError: pass self.status('Congratulations! Upload PyPi and publish", "setup.py sdist bdist_wheel --universal'.format(sys.executable)) self.status('Uploading the package to PyPi via", "def finalize_options(self): \"\"\" override \"\"\" pass def run(self): self.status('Building Source", "'xmind2testcase', '__about__.py'), encoding='utf-8') as f: # custom exec(f.read(), about) with", "# custom package_data={ # custom '': ['README.md'], 'webtool': ['static/*', 'static/css/*',", "[ # custom \"xmind\", \"flask\", \"arrow\", ] class PyPiCommand(Command): \"\"\"", "self.status('Publishing git tags...') os.system('git tag v{0}'.format(about['__version__'])) os.system('git push --tags') try:", "--universal'.format(sys.executable)) self.status('Uploading the package to PyPi via Twine...') os.system('twine upload", "this package and make a tag. Support: python setup.py pypi", "= f.read() install_requires = [ # custom \"xmind\", \"flask\", \"arrow\",", "rmtree(os.path.join(here, 'dist')) rmtree(os.path.join(here, 'build')) rmtree(os.path.join(here, 'xmind2testcase.egg-info')) # custom except OSError:", "PyPi and publish git tag successfully...') sys.exit() setup( name=about['__title__'], version=about['__version__'],", "upload dist/*') self.status('Publishing git tags...') os.system('git tag v{0}'.format(about['__version__'])) os.system('git push", ":: OS Independent\", ], entry_points={ # custom 'console_scripts': [ 'xmind2testcase=xmind2testcase.cli:cli_main',", "python # -*- coding: utf-8 -*- import io import os", "f: long_description = f.read() install_requires = [ # custom \"xmind\",", "sdist bdist_wheel --universal'.format(sys.executable)) self.status('Uploading the package to PyPi via Twine...')", "green color.\"\"\" print('\\033[0;32m{0}\\033[0m'.format(s)) def initialize_options(self): \"\"\" override \"\"\" pass def", "setup.py pypi Copied from requests_html \"\"\" user_options = [] @staticmethod", "Python :: 3\", \"License :: OSI Approved :: MIT License\",", "custom 'console_scripts': [ 'xmind2testcase=xmind2testcase.cli:cli_main', ] }, cmdclass={ # python3 setup.py", "['README.md'], 'webtool': ['static/*', 'static/css/*', 'static/guide/*', 'templates/*', 'schema.sql'], }, install_requires=install_requires, extras_require={},", "keywords=about['__keywords__'], author=about['__author__'], author_email=about['__author_email__'], url=about['__url__'], license=about['__license__'], packages=find_packages(exclude=['tests', 'test.*', 'docs']), # custom", "entry_points={ # custom 'console_scripts': [ 'xmind2testcase=xmind2testcase.cli:cli_main', ] }, cmdclass={ #", "\"arrow\", ] class PyPiCommand(Command): \"\"\" Build and publish this package", "extras_require={}, python_requires='>=3.0, <4', # custom classifiers=[ \"Programming Language :: Python", "coding: utf-8 -*- import io import os import sys from", "about = {} here = os.path.abspath(os.path.dirname(__file__)) with io.open(os.path.join(here, 'xmind2testcase', '__about__.py'),", "self.status('Uploading the package to PyPi via Twine...') os.system('twine upload dist/*')", "'dist')) rmtree(os.path.join(here, 'build')) rmtree(os.path.join(here, 'xmind2testcase.egg-info')) # custom except OSError: pass", "def initialize_options(self): \"\"\" override \"\"\" pass def finalize_options(self): \"\"\" override", "Wheel (universal) distribution...') os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable)) self.status('Uploading the", "here = os.path.abspath(os.path.dirname(__file__)) with io.open(os.path.join(here, 'xmind2testcase', '__about__.py'), encoding='utf-8') as f:", "}, cmdclass={ # python3 setup.py pypi 'pypi': PyPiCommand } )", "\"\"\" override \"\"\" pass def run(self): self.status('Building Source and Wheel", "import rmtree from setuptools import setup, find_packages, Command about =", "'__about__.py'), encoding='utf-8') as f: # custom exec(f.read(), about) with io.open('README.md',", "python_requires='>=3.0, <4', # custom classifiers=[ \"Programming Language :: Python ::", "package_data={ # custom '': ['README.md'], 'webtool': ['static/*', 'static/css/*', 'static/guide/*', 'templates/*',", "v{0}'.format(about['__version__'])) os.system('git push --tags') try: self.status('Removing current build artifacts...') rmtree(os.path.join(here,", "encoding='utf-8') as f: long_description = f.read() install_requires = [ #", "# custom '': ['README.md'], 'webtool': ['static/*', 'static/css/*', 'static/guide/*', 'templates/*', 'schema.sql'],", "successfully...') sys.exit() setup( name=about['__title__'], version=about['__version__'], description=about['__description__'], long_description=long_description, long_description_content_type='text/markdown', keywords=about['__keywords__'], author=about['__author__'],", "license=about['__license__'], packages=find_packages(exclude=['tests', 'test.*', 'docs']), # custom package_data={ # custom '':", "\"\"\"Prints things in green color.\"\"\" print('\\033[0;32m{0}\\033[0m'.format(s)) def initialize_options(self): \"\"\" override", "pass def run(self): self.status('Building Source and Wheel (universal) distribution...') os.system('{0}", "initialize_options(self): \"\"\" override \"\"\" pass def finalize_options(self): \"\"\" override \"\"\"", "self.status('Building Source and Wheel (universal) distribution...') os.system('{0} setup.py sdist bdist_wheel", "io.open(os.path.join(here, 'xmind2testcase', '__about__.py'), encoding='utf-8') as f: # custom exec(f.read(), about)", "'static/css/*', 'static/guide/*', 'templates/*', 'schema.sql'], }, install_requires=install_requires, extras_require={}, python_requires='>=3.0, <4', #", "setup, find_packages, Command about = {} here = os.path.abspath(os.path.dirname(__file__)) with", "except OSError: pass self.status('Congratulations! Upload PyPi and publish git tag", "custom except OSError: pass self.status('Congratulations! Upload PyPi and publish git", "= {} here = os.path.abspath(os.path.dirname(__file__)) with io.open(os.path.join(here, 'xmind2testcase', '__about__.py'), encoding='utf-8')", "sys.exit() setup( name=about['__title__'], version=about['__version__'], description=about['__description__'], long_description=long_description, long_description_content_type='text/markdown', keywords=about['__keywords__'], author=about['__author__'], author_email=about['__author_email__'],", "sys from shutil import rmtree from setuptools import setup, find_packages,", "the package to PyPi via Twine...') os.system('twine upload dist/*') self.status('Publishing", "version=about['__version__'], description=about['__description__'], long_description=long_description, long_description_content_type='text/markdown', keywords=about['__keywords__'], author=about['__author__'], author_email=about['__author_email__'], url=about['__url__'], license=about['__license__'], packages=find_packages(exclude=['tests',", "MIT License\", \"Operating System :: OS Independent\", ], entry_points={ #", "# -*- coding: utf-8 -*- import io import os import", "print('\\033[0;32m{0}\\033[0m'.format(s)) def initialize_options(self): \"\"\" override \"\"\" pass def finalize_options(self): \"\"\"", "class PyPiCommand(Command): \"\"\" Build and publish this package and make", "utf-8 -*- import io import os import sys from shutil", "from requests_html \"\"\" user_options = [] @staticmethod def status(s): \"\"\"Prints", "finalize_options(self): \"\"\" override \"\"\" pass def run(self): self.status('Building Source and", "'docs']), # custom package_data={ # custom '': ['README.md'], 'webtool': ['static/*',", "PyPiCommand(Command): \"\"\" Build and publish this package and make a", "self.status('Removing current build artifacts...') rmtree(os.path.join(here, 'dist')) rmtree(os.path.join(here, 'build')) rmtree(os.path.join(here, 'xmind2testcase.egg-info'))", "] }, cmdclass={ # python3 setup.py pypi 'pypi': PyPiCommand }", "from setuptools import setup, find_packages, Command about = {} here", "= os.path.abspath(os.path.dirname(__file__)) with io.open(os.path.join(here, 'xmind2testcase', '__about__.py'), encoding='utf-8') as f: #", "git tag successfully...') sys.exit() setup( name=about['__title__'], version=about['__version__'], description=about['__description__'], long_description=long_description, long_description_content_type='text/markdown',", "io import os import sys from shutil import rmtree from", "\"xmind\", \"flask\", \"arrow\", ] class PyPiCommand(Command): \"\"\" Build and publish", "name=about['__title__'], version=about['__version__'], description=about['__description__'], long_description=long_description, long_description_content_type='text/markdown', keywords=about['__keywords__'], author=about['__author__'], author_email=about['__author_email__'], url=about['__url__'], license=about['__license__'],", "\"\"\" Build and publish this package and make a tag.", "\"\"\" user_options = [] @staticmethod def status(s): \"\"\"Prints things in", "# custom classifiers=[ \"Programming Language :: Python :: 3\", \"License", "classifiers=[ \"Programming Language :: Python :: 3\", \"License :: OSI", "'webtool': ['static/*', 'static/css/*', 'static/guide/*', 'templates/*', 'schema.sql'], }, install_requires=install_requires, extras_require={}, python_requires='>=3.0,", "with io.open('README.md', encoding='utf-8') as f: long_description = f.read() install_requires =", "rmtree(os.path.join(here, 'build')) rmtree(os.path.join(here, 'xmind2testcase.egg-info')) # custom except OSError: pass self.status('Congratulations!", "install_requires=install_requires, extras_require={}, python_requires='>=3.0, <4', # custom classifiers=[ \"Programming Language ::", "long_description=long_description, long_description_content_type='text/markdown', keywords=about['__keywords__'], author=about['__author__'], author_email=about['__author_email__'], url=about['__url__'], license=about['__license__'], packages=find_packages(exclude=['tests', 'test.*', 'docs']),", "License\", \"Operating System :: OS Independent\", ], entry_points={ # custom", "custom '': ['README.md'], 'webtool': ['static/*', 'static/css/*', 'static/guide/*', 'templates/*', 'schema.sql'], },", "url=about['__url__'], license=about['__license__'], packages=find_packages(exclude=['tests', 'test.*', 'docs']), # custom package_data={ # custom", "and make a tag. Support: python setup.py pypi Copied from", "f: # custom exec(f.read(), about) with io.open('README.md', encoding='utf-8') as f:", "override \"\"\" pass def run(self): self.status('Building Source and Wheel (universal)", "artifacts...') rmtree(os.path.join(here, 'dist')) rmtree(os.path.join(here, 'build')) rmtree(os.path.join(here, 'xmind2testcase.egg-info')) # custom except", "Independent\", ], entry_points={ # custom 'console_scripts': [ 'xmind2testcase=xmind2testcase.cli:cli_main', ] },", "import setup, find_packages, Command about = {} here = os.path.abspath(os.path.dirname(__file__))", "{} here = os.path.abspath(os.path.dirname(__file__)) with io.open(os.path.join(here, 'xmind2testcase', '__about__.py'), encoding='utf-8') as", "OS Independent\", ], entry_points={ # custom 'console_scripts': [ 'xmind2testcase=xmind2testcase.cli:cli_main', ]", "user_options = [] @staticmethod def status(s): \"\"\"Prints things in green", "\"\"\" override \"\"\" pass def finalize_options(self): \"\"\" override \"\"\" pass", "author_email=about['__author_email__'], url=about['__url__'], license=about['__license__'], packages=find_packages(exclude=['tests', 'test.*', 'docs']), # custom package_data={ #", "exec(f.read(), about) with io.open('README.md', encoding='utf-8') as f: long_description = f.read()", "override \"\"\" pass def finalize_options(self): \"\"\" override \"\"\" pass def", "and publish this package and make a tag. Support: python", "'test.*', 'docs']), # custom package_data={ # custom '': ['README.md'], 'webtool':", "packages=find_packages(exclude=['tests', 'test.*', 'docs']), # custom package_data={ # custom '': ['README.md'],", "custom classifiers=[ \"Programming Language :: Python :: 3\", \"License ::", ":: Python :: 3\", \"License :: OSI Approved :: MIT", "Copied from requests_html \"\"\" user_options = [] @staticmethod def status(s):", "bdist_wheel --universal'.format(sys.executable)) self.status('Uploading the package to PyPi via Twine...') os.system('twine", "Upload PyPi and publish git tag successfully...') sys.exit() setup( name=about['__title__'],", "\"\"\" pass def finalize_options(self): \"\"\" override \"\"\" pass def run(self):", "OSI Approved :: MIT License\", \"Operating System :: OS Independent\",", "about) with io.open('README.md', encoding='utf-8') as f: long_description = f.read() install_requires", "make a tag. Support: python setup.py pypi Copied from requests_html", "description=about['__description__'], long_description=long_description, long_description_content_type='text/markdown', keywords=about['__keywords__'], author=about['__author__'], author_email=about['__author_email__'], url=about['__url__'], license=about['__license__'], packages=find_packages(exclude=['tests', 'test.*',", "author=about['__author__'], author_email=about['__author_email__'], url=about['__url__'], license=about['__license__'], packages=find_packages(exclude=['tests', 'test.*', 'docs']), # custom package_data={", "], entry_points={ # custom 'console_scripts': [ 'xmind2testcase=xmind2testcase.cli:cli_main', ] }, cmdclass={", "and publish git tag successfully...') sys.exit() setup( name=about['__title__'], version=about['__version__'], description=about['__description__'],", "long_description_content_type='text/markdown', keywords=about['__keywords__'], author=about['__author__'], author_email=about['__author_email__'], url=about['__url__'], license=about['__license__'], packages=find_packages(exclude=['tests', 'test.*', 'docs']), #", "package to PyPi via Twine...') os.system('twine upload dist/*') self.status('Publishing git", "git tags...') os.system('git tag v{0}'.format(about['__version__'])) os.system('git push --tags') try: self.status('Removing", "long_description = f.read() install_requires = [ # custom \"xmind\", \"flask\",", "-*- import io import os import sys from shutil import", "\"Programming Language :: Python :: 3\", \"License :: OSI Approved", "'xmind2testcase=xmind2testcase.cli:cli_main', ] }, cmdclass={ # python3 setup.py pypi 'pypi': PyPiCommand", "Language :: Python :: 3\", \"License :: OSI Approved ::", "import sys from shutil import rmtree from setuptools import setup,", "custom exec(f.read(), about) with io.open('README.md', encoding='utf-8') as f: long_description =", "setup( name=about['__title__'], version=about['__version__'], description=about['__description__'], long_description=long_description, long_description_content_type='text/markdown', keywords=about['__keywords__'], author=about['__author__'], author_email=about['__author_email__'], url=about['__url__'],", "setuptools import setup, find_packages, Command about = {} here =", ":: MIT License\", \"Operating System :: OS Independent\", ], entry_points={", "publish this package and make a tag. Support: python setup.py", "tag v{0}'.format(about['__version__'])) os.system('git push --tags') try: self.status('Removing current build artifacts...')", "Twine...') os.system('twine upload dist/*') self.status('Publishing git tags...') os.system('git tag v{0}'.format(about['__version__']))", "@staticmethod def status(s): \"\"\"Prints things in green color.\"\"\" print('\\033[0;32m{0}\\033[0m'.format(s)) def", ":: OSI Approved :: MIT License\", \"Operating System :: OS", "# custom \"xmind\", \"flask\", \"arrow\", ] class PyPiCommand(Command): \"\"\" Build", "and Wheel (universal) distribution...') os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable)) self.status('Uploading", "custom \"xmind\", \"flask\", \"arrow\", ] class PyPiCommand(Command): \"\"\" Build and", "a tag. Support: python setup.py pypi Copied from requests_html \"\"\"", "'templates/*', 'schema.sql'], }, install_requires=install_requires, extras_require={}, python_requires='>=3.0, <4', # custom classifiers=[", "push --tags') try: self.status('Removing current build artifacts...') rmtree(os.path.join(here, 'dist')) rmtree(os.path.join(here,", "tags...') os.system('git tag v{0}'.format(about['__version__'])) os.system('git push --tags') try: self.status('Removing current", "OSError: pass self.status('Congratulations! Upload PyPi and publish git tag successfully...')", "Approved :: MIT License\", \"Operating System :: OS Independent\", ],", "custom package_data={ # custom '': ['README.md'], 'webtool': ['static/*', 'static/css/*', 'static/guide/*',", "#!/usr/env/bin python # -*- coding: utf-8 -*- import io import", "'console_scripts': [ 'xmind2testcase=xmind2testcase.cli:cli_main', ] }, cmdclass={ # python3 setup.py pypi", "def status(s): \"\"\"Prints things in green color.\"\"\" print('\\033[0;32m{0}\\033[0m'.format(s)) def initialize_options(self):", "import os import sys from shutil import rmtree from setuptools", "rmtree(os.path.join(here, 'xmind2testcase.egg-info')) # custom except OSError: pass self.status('Congratulations! Upload PyPi", "via Twine...') os.system('twine upload dist/*') self.status('Publishing git tags...') os.system('git tag", "to PyPi via Twine...') os.system('twine upload dist/*') self.status('Publishing git tags...')", "PyPi via Twine...') os.system('twine upload dist/*') self.status('Publishing git tags...') os.system('git", "try: self.status('Removing current build artifacts...') rmtree(os.path.join(here, 'dist')) rmtree(os.path.join(here, 'build')) rmtree(os.path.join(here,", "from shutil import rmtree from setuptools import setup, find_packages, Command", "pass self.status('Congratulations! Upload PyPi and publish git tag successfully...') sys.exit()", "status(s): \"\"\"Prints things in green color.\"\"\" print('\\033[0;32m{0}\\033[0m'.format(s)) def initialize_options(self): \"\"\"" ]
[ "103, 189), (197, 176, 213), (140, 86, 75), (196, 156,", "218, 229)] tableau20 = [(r/255., g/255., b/255.) for r,g,b, in", "plt.subplot(111) x = np.linspace(0, 7, 1000) y = np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi)) l,", "219, 141), (23, 190, 207), (158, 218, 229)] tableau20 =", "(127, 127, 127), (199, 199, 199), (188, 189, 34), (219,", "lw=1.1) #l.set_clip_on(0) plt.tick_params(which='both', top=False, right=False) plt.margins(0.01) ax.text(7, 1, r'$y(t)=\\exp\\left(-t/1.5\\right)\\cos(\\omega_1t)\\cos(\\omega_2t)$', fontsize=18,", "127, 14), (255, 187, 120), (44, 160, 44), (152, 223,", "229)] tableau20 = [(r/255., g/255., b/255.) for r,g,b, in tableau20]", "232), (255, 127, 14), (255, 187, 120), (44, 160, 44),", "= np.linspace(0, 7, 1000) y = np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi)) l, = plt.plot(x,", "import numpy as np tableau20 = [(31, 119, 180), (174,", "import matplotlib import matplotlib.pyplot as plt import numpy as np", "119, 194), (247, 182, 210), (127, 127, 127), (199, 199,", "(247, 182, 210), (127, 127, 127), (199, 199, 199), (188,", "{'xtick.direction': 'out', 'xtick.major.width': 1.5, 'xtick.minor.width': 1, 'xtick.major.size': 6, 'xtick.minor.size': 3,", "va='top', ha='right') #plt.title(\"Hallo\") plt.setp(plt.gca(), xlabel='Time [s]', ylabel='Amplitude') ax = plt.axes([0.57,", "39, 40), (255, 152, 150), (148, 103, 189), (197, 176,", "= plt.plot(x, y, lw=1.1) #l.set_clip_on(0) plt.tick_params(which='both', top=False, right=False) plt.margins(0.01) ax.text(7,", "plt.plot(x, np.exp(-x/1.5), lw=0.5, color='grey') l, = plt.plot(x, -np.exp(-x/1.5), lw=0.5, color='grey')", "out_ticks = {'xtick.direction': 'out', 'xtick.major.width': 1.5, 'xtick.minor.width': 1, 'xtick.major.size': 6,", "17 21:33:24 2015 @author: Tillsten \"\"\" import matplotlib import matplotlib.pyplot", "= plt.subplot(111) x = np.linspace(0, 7, 1000) y = np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi))", "= plt.plot(x, -np.exp(-x/1.5), lw=0.5, color='grey') l, = plt.plot(x, y, lw=1.1)", "0.3, .2]) #ax.plot(np.fft.fftfreq(x.size)[:y.size/2], abs(np.fft.fft(y))[:y.size/2]) ax.fill_between(np.fft.fftfreq(x.size, x[1]-x[0])[:y.size/2], abs(np.fft.fft(y))[:y.size/2], alpha=0.2, color='r') ax.set_xlim(0,", "199), (188, 189, 34), (219, 219, 141), (23, 190, 207),", "x[1]-x[0])[:y.size/2], abs(np.fft.fft(y))[:y.size/2], alpha=0.2, color='r') ax.set_xlim(0, 10) ax.set_xlabel(\"Frequency\") ax.xaxis.labelpad = 1", "top=False, right=False) plt.tick_params(which='minor', bottom=False, left=False) #plt.grid(1, axis='y', linestyle='-', alpha=0.3, lw=.5)", "= plt.plot(x, np.exp(-x/1.5), lw=0.5, color='grey') l, = plt.plot(x, -np.exp(-x/1.5), lw=0.5,", "(255, 187, 120), (44, 160, 44), (152, 223, 138), (214,", "tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127,", "-*- \"\"\" Created on Thu Sep 17 21:33:24 2015 @author:", "1, r'$y(t)=\\exp\\left(-t/1.5\\right)\\cos(\\omega_1t)\\cos(\\omega_2t)$', fontsize=18, va='top', ha='right') #plt.title(\"Hallo\") plt.setp(plt.gca(), xlabel='Time [s]', ylabel='Amplitude')", "'Vera Sans' out_ticks = {'xtick.direction': 'out', 'xtick.major.width': 1.5, 'xtick.minor.width': 1,", "'ytick.minor.visible': True, 'axes.spines.top': False, 'axes.spines.right': False, 'text.hinting': True, 'axes.titlesize': 'xx-large',", "#plt.rcParams['savefig.dpi'] = 110 #plt.rcParams['font.family'] = 'Vera Sans' out_ticks = {'xtick.direction':", "40), (255, 152, 150), (148, 103, 189), (197, 176, 213),", "'semibold', } plt.figure(figsize=(6,4)) with plt.style.context(out_ticks): ax = plt.subplot(111) x =", "plt import numpy as np tableau20 = [(31, 119, 180),", "'ytick.direction': 'out', 'ytick.major.width': 1.5, 'ytick.minor.width': 1, 'ytick.major.size': 6, 'ytick.minor.size': 3,", "(158, 218, 229)] tableau20 = [(r/255., g/255., b/255.) for r,g,b,", "as np tableau20 = [(31, 119, 180), (174, 199, 232),", "160, 44), (152, 223, 138), (214, 39, 40), (255, 152,", "#ax.plot(np.fft.fftfreq(x.size)[:y.size/2], abs(np.fft.fft(y))[:y.size/2]) ax.fill_between(np.fft.fftfreq(x.size, x[1]-x[0])[:y.size/2], abs(np.fft.fft(y))[:y.size/2], alpha=0.2, color='r') ax.set_xlim(0, 10) ax.set_xlabel(\"Frequency\")", "189), (197, 176, 213), (140, 86, 75), (196, 156, 148),", "\"\"\" import matplotlib import matplotlib.pyplot as plt import numpy as", "matplotlib.pyplot as plt import numpy as np tableau20 = [(31,", "(152, 223, 138), (214, 39, 40), (255, 152, 150), (148,", "199, 199), (188, 189, 34), (219, 219, 141), (23, 190,", "'text.hinting': True, 'axes.titlesize': 'xx-large', 'axes.titleweight': 'semibold', } plt.figure(figsize=(6,4)) with plt.style.context(out_ticks):", "np.linspace(0, 7, 1000) y = np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi)) l, = plt.plot(x, np.exp(-x/1.5),", "True, 'ytick.direction': 'out', 'ytick.major.width': 1.5, 'ytick.minor.width': 1, 'ytick.major.size': 6, 'ytick.minor.size':", "b/255.) for r,g,b, in tableau20] #plt.rcParams['savefig.dpi'] = 110 #plt.rcParams['font.family'] =", "138), (214, 39, 40), (255, 152, 150), (148, 103, 189),", "44), (152, 223, 138), (214, 39, 40), (255, 152, 150),", "np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi)) l, = plt.plot(x, np.exp(-x/1.5), lw=0.5, color='grey') l, = plt.plot(x,", "152, 150), (148, 103, 189), (197, 176, 213), (140, 86,", "= np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi)) l, = plt.plot(x, np.exp(-x/1.5), lw=0.5, color='grey') l, =", "right=False) plt.tick_params(which='minor', bottom=False, left=False) #plt.grid(1, axis='y', linestyle='-', alpha=0.3, lw=.5) plt.show()", "Created on Thu Sep 17 21:33:24 2015 @author: Tillsten \"\"\"", "3, 'xtick.minor.visible': True, 'ytick.direction': 'out', 'ytick.major.width': 1.5, 'ytick.minor.width': 1, 'ytick.major.size':", "(219, 219, 141), (23, 190, 207), (158, 218, 229)] tableau20", "176, 213), (140, 86, 75), (196, 156, 148), (227, 119,", "3, 'ytick.minor.visible': True, 'axes.spines.top': False, 'axes.spines.right': False, 'text.hinting': True, 'axes.titlesize':", "ax.xaxis.labelpad = 1 plt.locator_params(nbins=4) plt.tick_params(which='both', top=False, right=False) plt.tick_params(which='minor', bottom=False, left=False)", "ax.text(7, 1, r'$y(t)=\\exp\\left(-t/1.5\\right)\\cos(\\omega_1t)\\cos(\\omega_2t)$', fontsize=18, va='top', ha='right') #plt.title(\"Hallo\") plt.setp(plt.gca(), xlabel='Time [s]',", "plt.figure(figsize=(6,4)) with plt.style.context(out_ticks): ax = plt.subplot(111) x = np.linspace(0, 7,", "1 plt.locator_params(nbins=4) plt.tick_params(which='both', top=False, right=False) plt.tick_params(which='minor', bottom=False, left=False) #plt.grid(1, axis='y',", "'xx-large', 'axes.titleweight': 'semibold', } plt.figure(figsize=(6,4)) with plt.style.context(out_ticks): ax = plt.subplot(111)", "'ytick.major.size': 6, 'ytick.minor.size': 3, 'ytick.minor.visible': True, 'axes.spines.top': False, 'axes.spines.right': False,", "plt.plot(x, y, lw=1.1) #l.set_clip_on(0) plt.tick_params(which='both', top=False, right=False) plt.margins(0.01) ax.text(7, 1,", "119, 180), (174, 199, 232), (255, 127, 14), (255, 187,", "'ytick.minor.size': 3, 'ytick.minor.visible': True, 'axes.spines.top': False, 'axes.spines.right': False, 'text.hinting': True,", "1, 'xtick.major.size': 6, 'xtick.minor.size': 3, 'xtick.minor.visible': True, 'ytick.direction': 'out', 'ytick.major.width':", "r'$y(t)=\\exp\\left(-t/1.5\\right)\\cos(\\omega_1t)\\cos(\\omega_2t)$', fontsize=18, va='top', ha='right') #plt.title(\"Hallo\") plt.setp(plt.gca(), xlabel='Time [s]', ylabel='Amplitude') ax", "plt.style.context(out_ticks): ax = plt.subplot(111) x = np.linspace(0, 7, 1000) y", "2015 @author: Tillsten \"\"\" import matplotlib import matplotlib.pyplot as plt", "'axes.titlesize': 'xx-large', 'axes.titleweight': 'semibold', } plt.figure(figsize=(6,4)) with plt.style.context(out_ticks): ax =", "-*- coding: utf-8 -*- \"\"\" Created on Thu Sep 17", "[(r/255., g/255., b/255.) for r,g,b, in tableau20] #plt.rcParams['savefig.dpi'] = 110", "[(31, 119, 180), (174, 199, 232), (255, 127, 14), (255,", "(197, 176, 213), (140, 86, 75), (196, 156, 148), (227,", "coding: utf-8 -*- \"\"\" Created on Thu Sep 17 21:33:24", "= [(31, 119, 180), (174, 199, 232), (255, 127, 14),", "21:33:24 2015 @author: Tillsten \"\"\" import matplotlib import matplotlib.pyplot as", "np.exp(-x/1.5), lw=0.5, color='grey') l, = plt.plot(x, -np.exp(-x/1.5), lw=0.5, color='grey') l,", "lw=0.5, color='grey') l, = plt.plot(x, -np.exp(-x/1.5), lw=0.5, color='grey') l, =", "numpy as np tableau20 = [(31, 119, 180), (174, 199,", "1.5, 'ytick.minor.width': 1, 'ytick.major.size': 6, 'ytick.minor.size': 3, 'ytick.minor.visible': True, 'axes.spines.top':", "utf-8 -*- \"\"\" Created on Thu Sep 17 21:33:24 2015", "'axes.spines.top': False, 'axes.spines.right': False, 'text.hinting': True, 'axes.titlesize': 'xx-large', 'axes.titleweight': 'semibold',", "14), (255, 187, 120), (44, 160, 44), (152, 223, 138),", "<reponame>Tillsten/skultrafast # -*- coding: utf-8 -*- \"\"\" Created on Thu", "127, 127), (199, 199, 199), (188, 189, 34), (219, 219,", "plt.tick_params(which='both', top=False, right=False) plt.tick_params(which='minor', bottom=False, left=False) #plt.grid(1, axis='y', linestyle='-', alpha=0.3,", "75), (196, 156, 148), (227, 119, 194), (247, 182, 210),", "plt.locator_params(nbins=4) plt.tick_params(which='both', top=False, right=False) plt.tick_params(which='minor', bottom=False, left=False) #plt.grid(1, axis='y', linestyle='-',", "Sans' out_ticks = {'xtick.direction': 'out', 'xtick.major.width': 1.5, 'xtick.minor.width': 1, 'xtick.major.size':", "(214, 39, 40), (255, 152, 150), (148, 103, 189), (197,", "6, 'xtick.minor.size': 3, 'xtick.minor.visible': True, 'ytick.direction': 'out', 'ytick.major.width': 1.5, 'ytick.minor.width':", "right=False) plt.margins(0.01) ax.text(7, 1, r'$y(t)=\\exp\\left(-t/1.5\\right)\\cos(\\omega_1t)\\cos(\\omega_2t)$', fontsize=18, va='top', ha='right') #plt.title(\"Hallo\") plt.setp(plt.gca(),", "190, 207), (158, 218, 229)] tableau20 = [(r/255., g/255., b/255.)", "color='r') ax.set_xlim(0, 10) ax.set_xlabel(\"Frequency\") ax.xaxis.labelpad = 1 plt.locator_params(nbins=4) plt.tick_params(which='both', top=False,", "(255, 127, 14), (255, 187, 120), (44, 160, 44), (152,", "Tillsten \"\"\" import matplotlib import matplotlib.pyplot as plt import numpy", "tableau20] #plt.rcParams['savefig.dpi'] = 110 #plt.rcParams['font.family'] = 'Vera Sans' out_ticks =", "ax = plt.axes([0.57, 0.25, 0.3, .2]) #ax.plot(np.fft.fftfreq(x.size)[:y.size/2], abs(np.fft.fft(y))[:y.size/2]) ax.fill_between(np.fft.fftfreq(x.size, x[1]-x[0])[:y.size/2],", "\"\"\" Created on Thu Sep 17 21:33:24 2015 @author: Tillsten", "top=False, right=False) plt.margins(0.01) ax.text(7, 1, r'$y(t)=\\exp\\left(-t/1.5\\right)\\cos(\\omega_1t)\\cos(\\omega_2t)$', fontsize=18, va='top', ha='right') #plt.title(\"Hallo\")", "xlabel='Time [s]', ylabel='Amplitude') ax = plt.axes([0.57, 0.25, 0.3, .2]) #ax.plot(np.fft.fftfreq(x.size)[:y.size/2],", "ax.fill_between(np.fft.fftfreq(x.size, x[1]-x[0])[:y.size/2], abs(np.fft.fft(y))[:y.size/2], alpha=0.2, color='r') ax.set_xlim(0, 10) ax.set_xlabel(\"Frequency\") ax.xaxis.labelpad =", "6, 'ytick.minor.size': 3, 'ytick.minor.visible': True, 'axes.spines.top': False, 'axes.spines.right': False, 'text.hinting':", "= 110 #plt.rcParams['font.family'] = 'Vera Sans' out_ticks = {'xtick.direction': 'out',", "y, lw=1.1) #l.set_clip_on(0) plt.tick_params(which='both', top=False, right=False) plt.margins(0.01) ax.text(7, 1, r'$y(t)=\\exp\\left(-t/1.5\\right)\\cos(\\omega_1t)\\cos(\\omega_2t)$',", "= {'xtick.direction': 'out', 'xtick.major.width': 1.5, 'xtick.minor.width': 1, 'xtick.major.size': 6, 'xtick.minor.size':", "0.25, 0.3, .2]) #ax.plot(np.fft.fftfreq(x.size)[:y.size/2], abs(np.fft.fft(y))[:y.size/2]) ax.fill_between(np.fft.fftfreq(x.size, x[1]-x[0])[:y.size/2], abs(np.fft.fft(y))[:y.size/2], alpha=0.2, color='r')", "abs(np.fft.fft(y))[:y.size/2], alpha=0.2, color='r') ax.set_xlim(0, 10) ax.set_xlabel(\"Frequency\") ax.xaxis.labelpad = 1 plt.locator_params(nbins=4)", "= 1 plt.locator_params(nbins=4) plt.tick_params(which='both', top=False, right=False) plt.tick_params(which='minor', bottom=False, left=False) #plt.grid(1,", "(196, 156, 148), (227, 119, 194), (247, 182, 210), (127,", "l, = plt.plot(x, -np.exp(-x/1.5), lw=0.5, color='grey') l, = plt.plot(x, y,", "'axes.spines.right': False, 'text.hinting': True, 'axes.titlesize': 'xx-large', 'axes.titleweight': 'semibold', } plt.figure(figsize=(6,4))", "color='grey') l, = plt.plot(x, y, lw=1.1) #l.set_clip_on(0) plt.tick_params(which='both', top=False, right=False)", "141), (23, 190, 207), (158, 218, 229)] tableau20 = [(r/255.,", "1, 'ytick.major.size': 6, 'ytick.minor.size': 3, 'ytick.minor.visible': True, 'axes.spines.top': False, 'axes.spines.right':", "l, = plt.plot(x, y, lw=1.1) #l.set_clip_on(0) plt.tick_params(which='both', top=False, right=False) plt.margins(0.01)", "ax.set_xlim(0, 10) ax.set_xlabel(\"Frequency\") ax.xaxis.labelpad = 1 plt.locator_params(nbins=4) plt.tick_params(which='both', top=False, right=False)", "color='grey') l, = plt.plot(x, -np.exp(-x/1.5), lw=0.5, color='grey') l, = plt.plot(x,", "plt.setp(plt.gca(), xlabel='Time [s]', ylabel='Amplitude') ax = plt.axes([0.57, 0.25, 0.3, .2])", "34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]", "213), (140, 86, 75), (196, 156, 148), (227, 119, 194),", "199, 232), (255, 127, 14), (255, 187, 120), (44, 160,", "in tableau20] #plt.rcParams['savefig.dpi'] = 110 #plt.rcParams['font.family'] = 'Vera Sans' out_ticks", "110 #plt.rcParams['font.family'] = 'Vera Sans' out_ticks = {'xtick.direction': 'out', 'xtick.major.width':", "7, 1000) y = np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi)) l, = plt.plot(x, np.exp(-x/1.5), lw=0.5,", "1.5, 'xtick.minor.width': 1, 'xtick.major.size': 6, 'xtick.minor.size': 3, 'xtick.minor.visible': True, 'ytick.direction':", "import matplotlib.pyplot as plt import numpy as np tableau20 =", "127), (199, 199, 199), (188, 189, 34), (219, 219, 141),", "150), (148, 103, 189), (197, 176, 213), (140, 86, 75),", "matplotlib import matplotlib.pyplot as plt import numpy as np tableau20", "on Thu Sep 17 21:33:24 2015 @author: Tillsten \"\"\" import", "'xtick.major.size': 6, 'xtick.minor.size': 3, 'xtick.minor.visible': True, 'ytick.direction': 'out', 'ytick.major.width': 1.5,", "207), (158, 218, 229)] tableau20 = [(r/255., g/255., b/255.) for", "alpha=0.2, color='r') ax.set_xlim(0, 10) ax.set_xlabel(\"Frequency\") ax.xaxis.labelpad = 1 plt.locator_params(nbins=4) plt.tick_params(which='both',", "g/255., b/255.) for r,g,b, in tableau20] #plt.rcParams['savefig.dpi'] = 110 #plt.rcParams['font.family']", "fontsize=18, va='top', ha='right') #plt.title(\"Hallo\") plt.setp(plt.gca(), xlabel='Time [s]', ylabel='Amplitude') ax =", "as plt import numpy as np tableau20 = [(31, 119,", "187, 120), (44, 160, 44), (152, 223, 138), (214, 39,", "'xtick.minor.size': 3, 'xtick.minor.visible': True, 'ytick.direction': 'out', 'ytick.major.width': 1.5, 'ytick.minor.width': 1,", "ax = plt.subplot(111) x = np.linspace(0, 7, 1000) y =", "l, = plt.plot(x, np.exp(-x/1.5), lw=0.5, color='grey') l, = plt.plot(x, -np.exp(-x/1.5),", "y = np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi)) l, = plt.plot(x, np.exp(-x/1.5), lw=0.5, color='grey') l,", "lw=0.5, color='grey') l, = plt.plot(x, y, lw=1.1) #l.set_clip_on(0) plt.tick_params(which='both', top=False,", "-np.exp(-x/1.5), lw=0.5, color='grey') l, = plt.plot(x, y, lw=1.1) #l.set_clip_on(0) plt.tick_params(which='both',", "} plt.figure(figsize=(6,4)) with plt.style.context(out_ticks): ax = plt.subplot(111) x = np.linspace(0,", "156, 148), (227, 119, 194), (247, 182, 210), (127, 127,", "189, 34), (219, 219, 141), (23, 190, 207), (158, 218,", "(174, 199, 232), (255, 127, 14), (255, 187, 120), (44,", "with plt.style.context(out_ticks): ax = plt.subplot(111) x = np.linspace(0, 7, 1000)", "Sep 17 21:33:24 2015 @author: Tillsten \"\"\" import matplotlib import", "'xtick.minor.visible': True, 'ytick.direction': 'out', 'ytick.major.width': 1.5, 'ytick.minor.width': 1, 'ytick.major.size': 6,", "210), (127, 127, 127), (199, 199, 199), (188, 189, 34),", "223, 138), (214, 39, 40), (255, 152, 150), (148, 103,", "194), (247, 182, 210), (127, 127, 127), (199, 199, 199),", "plt.tick_params(which='both', top=False, right=False) plt.margins(0.01) ax.text(7, 1, r'$y(t)=\\exp\\left(-t/1.5\\right)\\cos(\\omega_1t)\\cos(\\omega_2t)$', fontsize=18, va='top', ha='right')", "True, 'axes.spines.top': False, 'axes.spines.right': False, 'text.hinting': True, 'axes.titlesize': 'xx-large', 'axes.titleweight':", "= plt.axes([0.57, 0.25, 0.3, .2]) #ax.plot(np.fft.fftfreq(x.size)[:y.size/2], abs(np.fft.fft(y))[:y.size/2]) ax.fill_between(np.fft.fftfreq(x.size, x[1]-x[0])[:y.size/2], abs(np.fft.fft(y))[:y.size/2],", "'axes.titleweight': 'semibold', } plt.figure(figsize=(6,4)) with plt.style.context(out_ticks): ax = plt.subplot(111) x", "10) ax.set_xlabel(\"Frequency\") ax.xaxis.labelpad = 1 plt.locator_params(nbins=4) plt.tick_params(which='both', top=False, right=False) plt.tick_params(which='minor',", "Thu Sep 17 21:33:24 2015 @author: Tillsten \"\"\" import matplotlib", "ylabel='Amplitude') ax = plt.axes([0.57, 0.25, 0.3, .2]) #ax.plot(np.fft.fftfreq(x.size)[:y.size/2], abs(np.fft.fft(y))[:y.size/2]) ax.fill_between(np.fft.fftfreq(x.size,", "'xtick.major.width': 1.5, 'xtick.minor.width': 1, 'xtick.major.size': 6, 'xtick.minor.size': 3, 'xtick.minor.visible': True,", "(255, 152, 150), (148, 103, 189), (197, 176, 213), (140,", "plt.margins(0.01) ax.text(7, 1, r'$y(t)=\\exp\\left(-t/1.5\\right)\\cos(\\omega_1t)\\cos(\\omega_2t)$', fontsize=18, va='top', ha='right') #plt.title(\"Hallo\") plt.setp(plt.gca(), xlabel='Time", "ha='right') #plt.title(\"Hallo\") plt.setp(plt.gca(), xlabel='Time [s]', ylabel='Amplitude') ax = plt.axes([0.57, 0.25,", "abs(np.fft.fft(y))[:y.size/2]) ax.fill_between(np.fft.fftfreq(x.size, x[1]-x[0])[:y.size/2], abs(np.fft.fft(y))[:y.size/2], alpha=0.2, color='r') ax.set_xlim(0, 10) ax.set_xlabel(\"Frequency\") ax.xaxis.labelpad", "(44, 160, 44), (152, 223, 138), (214, 39, 40), (255,", "86, 75), (196, 156, 148), (227, 119, 194), (247, 182,", "= [(r/255., g/255., b/255.) for r,g,b, in tableau20] #plt.rcParams['savefig.dpi'] =", "'out', 'ytick.major.width': 1.5, 'ytick.minor.width': 1, 'ytick.major.size': 6, 'ytick.minor.size': 3, 'ytick.minor.visible':", "1000) y = np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi)) l, = plt.plot(x, np.exp(-x/1.5), lw=0.5, color='grey')", ".2]) #ax.plot(np.fft.fftfreq(x.size)[:y.size/2], abs(np.fft.fft(y))[:y.size/2]) ax.fill_between(np.fft.fftfreq(x.size, x[1]-x[0])[:y.size/2], abs(np.fft.fft(y))[:y.size/2], alpha=0.2, color='r') ax.set_xlim(0, 10)", "(227, 119, 194), (247, 182, 210), (127, 127, 127), (199,", "(140, 86, 75), (196, 156, 148), (227, 119, 194), (247,", "@author: Tillsten \"\"\" import matplotlib import matplotlib.pyplot as plt import", "plt.axes([0.57, 0.25, 0.3, .2]) #ax.plot(np.fft.fftfreq(x.size)[:y.size/2], abs(np.fft.fft(y))[:y.size/2]) ax.fill_between(np.fft.fftfreq(x.size, x[1]-x[0])[:y.size/2], abs(np.fft.fft(y))[:y.size/2], alpha=0.2,", "False, 'text.hinting': True, 'axes.titlesize': 'xx-large', 'axes.titleweight': 'semibold', } plt.figure(figsize=(6,4)) with", "(188, 189, 34), (219, 219, 141), (23, 190, 207), (158,", "#plt.rcParams['font.family'] = 'Vera Sans' out_ticks = {'xtick.direction': 'out', 'xtick.major.width': 1.5,", "148), (227, 119, 194), (247, 182, 210), (127, 127, 127),", "(23, 190, 207), (158, 218, 229)] tableau20 = [(r/255., g/255.,", "[s]', ylabel='Amplitude') ax = plt.axes([0.57, 0.25, 0.3, .2]) #ax.plot(np.fft.fftfreq(x.size)[:y.size/2], abs(np.fft.fft(y))[:y.size/2])", "(148, 103, 189), (197, 176, 213), (140, 86, 75), (196,", "# -*- coding: utf-8 -*- \"\"\" Created on Thu Sep", "True, 'axes.titlesize': 'xx-large', 'axes.titleweight': 'semibold', } plt.figure(figsize=(6,4)) with plt.style.context(out_ticks): ax", "(199, 199, 199), (188, 189, 34), (219, 219, 141), (23,", "#plt.title(\"Hallo\") plt.setp(plt.gca(), xlabel='Time [s]', ylabel='Amplitude') ax = plt.axes([0.57, 0.25, 0.3,", "r,g,b, in tableau20] #plt.rcParams['savefig.dpi'] = 110 #plt.rcParams['font.family'] = 'Vera Sans'", "180), (174, 199, 232), (255, 127, 14), (255, 187, 120),", "np tableau20 = [(31, 119, 180), (174, 199, 232), (255,", "ax.set_xlabel(\"Frequency\") ax.xaxis.labelpad = 1 plt.locator_params(nbins=4) plt.tick_params(which='both', top=False, right=False) plt.tick_params(which='minor', bottom=False,", "for r,g,b, in tableau20] #plt.rcParams['savefig.dpi'] = 110 #plt.rcParams['font.family'] = 'Vera", "'xtick.minor.width': 1, 'xtick.major.size': 6, 'xtick.minor.size': 3, 'xtick.minor.visible': True, 'ytick.direction': 'out',", "tableau20 = [(r/255., g/255., b/255.) for r,g,b, in tableau20] #plt.rcParams['savefig.dpi']", "'ytick.minor.width': 1, 'ytick.major.size': 6, 'ytick.minor.size': 3, 'ytick.minor.visible': True, 'axes.spines.top': False,", "'ytick.major.width': 1.5, 'ytick.minor.width': 1, 'ytick.major.size': 6, 'ytick.minor.size': 3, 'ytick.minor.visible': True,", "#l.set_clip_on(0) plt.tick_params(which='both', top=False, right=False) plt.margins(0.01) ax.text(7, 1, r'$y(t)=\\exp\\left(-t/1.5\\right)\\cos(\\omega_1t)\\cos(\\omega_2t)$', fontsize=18, va='top',", "120), (44, 160, 44), (152, 223, 138), (214, 39, 40),", "182, 210), (127, 127, 127), (199, 199, 199), (188, 189,", "plt.plot(x, -np.exp(-x/1.5), lw=0.5, color='grey') l, = plt.plot(x, y, lw=1.1) #l.set_clip_on(0)", "'out', 'xtick.major.width': 1.5, 'xtick.minor.width': 1, 'xtick.major.size': 6, 'xtick.minor.size': 3, 'xtick.minor.visible':", "x = np.linspace(0, 7, 1000) y = np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi)) l, =", "False, 'axes.spines.right': False, 'text.hinting': True, 'axes.titlesize': 'xx-large', 'axes.titleweight': 'semibold', }", "= 'Vera Sans' out_ticks = {'xtick.direction': 'out', 'xtick.major.width': 1.5, 'xtick.minor.width':" ]
[ "this GithubBuildRunSource. The trigger that invoked the build run. :param", "oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators", "\"\"\" **[Required]** Gets the trigger_info of this GithubBuildRunSource. :return: The", "invoked the build run. :param trigger_id: The trigger_id of this", "``GITHUB`` and it should not be changed. The following keyword", "this class is ``GITHUB`` and it should not be changed.", "shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from .build_run_source", "(UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0", "this GithubBuildRunSource. :param trigger_info: The trigger_info of this GithubBuildRunSource. :type:", "= None self._trigger_info = None self._source_type = 'GITHUB' @property def", ":type: str \"\"\" self._trigger_id = trigger_id @property def trigger_info(self): \"\"\"", "def __init__(self, **kwargs): \"\"\" Initializes a new GithubBuildRunSource object with", "to the getters/setters of this class): :param source_type: The value", "__init__(self, **kwargs): \"\"\" Initializes a new GithubBuildRunSource object with values", "this property are: \"MANUAL\", \"GITHUB\", \"GITLAB\", \"DEVOPS_CODE_REPOSITORY\" :type source_type: str", "Allowed values for this property are: \"MANUAL\", \"GITHUB\", \"GITLAB\", \"DEVOPS_CODE_REPOSITORY\"", "of build run through GitHub. \"\"\" def __init__(self, **kwargs): \"\"\"", ":type trigger_info: oci.devops.models.TriggerInfo \"\"\" self.swagger_types = { 'source_type': 'str', 'trigger_id':", "that invoked the build run. :return: The trigger_id of this", ":return: The trigger_info of this GithubBuildRunSource. :rtype: oci.devops.models.TriggerInfo \"\"\" return", "this GithubBuildRunSource. :rtype: str \"\"\" return self._trigger_id @trigger_id.setter def trigger_id(self,", "= trigger_info def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if", "rights reserved. # This software is dual-licensed to you under", "assign to the trigger_id property of this GithubBuildRunSource. :type trigger_id:", "self._trigger_info = trigger_info def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other):", "init_model_state_from_kwargs @init_model_state_from_kwargs class GithubBuildRunSource(BuildRunSource): \"\"\" Specifies details of build run", "the build run. :param trigger_id: The trigger_id of this GithubBuildRunSource.", "'str', 'trigger_info': 'TriggerInfo' } self.attribute_map = { 'source_type': 'sourceType', 'trigger_id':", "as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown", "'trigger_info': 'TriggerInfo' } self.attribute_map = { 'source_type': 'sourceType', 'trigger_id': 'triggerId',", "= 'GITHUB' @property def trigger_id(self): \"\"\" **[Required]** Gets the trigger_id", "trigger_id of this GithubBuildRunSource. The trigger that invoked the build", "self.swagger_types = { 'source_type': 'str', 'trigger_id': 'str', 'trigger_info': 'TriggerInfo' }", "\"\"\" self._trigger_info = trigger_info def __repr__(self): return formatted_flat_dict(self) def __eq__(self,", "return self.__dict__ == other.__dict__ def __ne__(self, other): return not self", "source_type: The value to assign to the source_type property of", "trigger_info): \"\"\" Sets the trigger_info of this GithubBuildRunSource. :param trigger_info:", "return False return self.__dict__ == other.__dict__ def __ne__(self, other): return", "GithubBuildRunSource. :param trigger_info: The trigger_info of this GithubBuildRunSource. :type: oci.devops.models.TriggerInfo", "the trigger_info of this GithubBuildRunSource. :param trigger_info: The trigger_info of", "The trigger_id of this GithubBuildRunSource. :rtype: str \"\"\" return self._trigger_id", ":py:attr:`~oci.devops.models.GithubBuildRunSource.source_type` attribute of this class is ``GITHUB`` and it should", "and it should not be changed. The following keyword arguments", "property of this GithubBuildRunSource. Allowed values for this property are:", "shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at", "of this GithubBuildRunSource. Allowed values for this property are: \"MANUAL\",", "oci.devops.models.TriggerInfo \"\"\" self.swagger_types = { 'source_type': 'str', 'trigger_id': 'str', 'trigger_info':", "oci.devops.models.TriggerInfo \"\"\" self._trigger_info = trigger_info def __repr__(self): return formatted_flat_dict(self) def", "str :param trigger_id: The value to assign to the trigger_id", "is ``GITHUB`` and it should not be changed. The following", "= { 'source_type': 'sourceType', 'trigger_id': 'triggerId', 'trigger_info': 'triggerInfo' } self._source_type", "attribute of this class is ``GITHUB`` and it should not", "are supported (corresponding to the getters/setters of this class): :param", "GithubBuildRunSource. :type trigger_info: oci.devops.models.TriggerInfo \"\"\" self.swagger_types = { 'source_type': 'str',", "assign to the source_type property of this GithubBuildRunSource. Allowed values", "None self._trigger_info = None self._source_type = 'GITHUB' @property def trigger_id(self):", "The trigger_info of this GithubBuildRunSource. :rtype: oci.devops.models.TriggerInfo \"\"\" return self._trigger_info", "Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights", "= { 'source_type': 'str', 'trigger_id': 'str', 'trigger_info': 'TriggerInfo' } self.attribute_map", "\"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new GithubBuildRunSource object", "formatted_flat_dict(self) def __eq__(self, other): if other is None: return False", "import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import", "of this GithubBuildRunSource. :type trigger_id: str :param trigger_info: The value", "this GithubBuildRunSource. :type trigger_info: oci.devops.models.TriggerInfo \"\"\" self.swagger_types = { 'source_type':", "trigger_id): \"\"\" Sets the trigger_id of this GithubBuildRunSource. The trigger", "The trigger that invoked the build run. :return: The trigger_id", "oci.devops.models.TriggerInfo \"\"\" return self._trigger_info @trigger_info.setter def trigger_info(self, trigger_info): \"\"\" Sets", "value to assign to the trigger_info property of this GithubBuildRunSource.", "affiliates. All rights reserved. # This software is dual-licensed to", "'GITHUB' @property def trigger_id(self): \"\"\" **[Required]** Gets the trigger_id of", "'source_type': 'sourceType', 'trigger_id': 'triggerId', 'trigger_info': 'triggerInfo' } self._source_type = None", "\"GITLAB\", \"DEVOPS_CODE_REPOSITORY\" :type source_type: str :param trigger_id: The value to", "you under the Universal Permissive License (UPL) 1.0 as shown", "arguments are supported (corresponding to the getters/setters of this class):", "== other.__dict__ def __ne__(self, other): return not self == other", "@init_model_state_from_kwargs class GithubBuildRunSource(BuildRunSource): \"\"\" Specifies details of build run through", "of this GithubBuildRunSource. :type trigger_info: oci.devops.models.TriggerInfo \"\"\" self.swagger_types = {", "\"\"\" self._trigger_id = trigger_id @property def trigger_info(self): \"\"\" **[Required]** Gets", "software is dual-licensed to you under the Universal Permissive License", "Initializes a new GithubBuildRunSource object with values from keyword arguments.", "are: \"MANUAL\", \"GITHUB\", \"GITLAB\", \"DEVOPS_CODE_REPOSITORY\" :type source_type: str :param trigger_id:", "trigger_id of this GithubBuildRunSource. :rtype: str \"\"\" return self._trigger_id @trigger_id.setter", "'trigger_id': 'str', 'trigger_info': 'TriggerInfo' } self.attribute_map = { 'source_type': 'sourceType',", "} self._source_type = None self._trigger_id = None self._trigger_info = None", "= None self._source_type = 'GITHUB' @property def trigger_id(self): \"\"\" **[Required]**", "import BuildRunSource from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa:", "NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs", "trigger_id: str :param trigger_info: The value to assign to the", "value of the :py:attr:`~oci.devops.models.GithubBuildRunSource.source_type` attribute of this class is ``GITHUB``", "trigger_id of this GithubBuildRunSource. :type: str \"\"\" self._trigger_id = trigger_id", "of this GithubBuildRunSource. :return: The trigger_info of this GithubBuildRunSource. :rtype:", ":type trigger_id: str :param trigger_info: The value to assign to", "2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.", "to assign to the trigger_info property of this GithubBuildRunSource. :type", "source_type: str :param trigger_id: The value to assign to the", "new GithubBuildRunSource object with values from keyword arguments. The default", "build run through GitHub. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes", "of this GithubBuildRunSource. :param trigger_info: The trigger_info of this GithubBuildRunSource.", "utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates.", "Gets the trigger_info of this GithubBuildRunSource. :return: The trigger_info of", "GithubBuildRunSource. The trigger that invoked the build run. :return: The", "} self.attribute_map = { 'source_type': 'sourceType', 'trigger_id': 'triggerId', 'trigger_info': 'triggerInfo'", "return self._trigger_id @trigger_id.setter def trigger_id(self, trigger_id): \"\"\" Sets the trigger_id", "property are: \"MANUAL\", \"GITHUB\", \"GITLAB\", \"DEVOPS_CODE_REPOSITORY\" :type source_type: str :param", "this GithubBuildRunSource. :type: oci.devops.models.TriggerInfo \"\"\" self._trigger_info = trigger_info def __repr__(self):", "property of this GithubBuildRunSource. :type trigger_info: oci.devops.models.TriggerInfo \"\"\" self.swagger_types =", "\"MANUAL\", \"GITHUB\", \"GITLAB\", \"DEVOPS_CODE_REPOSITORY\" :type source_type: str :param trigger_id: The", "trigger_info def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other", "self._source_type = None self._trigger_id = None self._trigger_info = None self._source_type", "{ 'source_type': 'str', 'trigger_id': 'str', 'trigger_info': 'TriggerInfo' } self.attribute_map =", "GithubBuildRunSource. :type: str \"\"\" self._trigger_id = trigger_id @property def trigger_info(self):", "# This software is dual-licensed to you under the Universal", "GitHub. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new GithubBuildRunSource", "BuildRunSource from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401", "class is ``GITHUB`` and it should not be changed. The", "\"\"\" return self._trigger_id @trigger_id.setter def trigger_id(self, trigger_id): \"\"\" Sets the", "source_type property of this GithubBuildRunSource. Allowed values for this property", "= trigger_id @property def trigger_info(self): \"\"\" **[Required]** Gets the trigger_info", "to you under the Universal Permissive License (UPL) 1.0 as", "'sourceType', 'trigger_id': 'triggerId', 'trigger_info': 'triggerInfo' } self._source_type = None self._trigger_id", "Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache", "this GithubBuildRunSource. :return: The trigger_info of this GithubBuildRunSource. :rtype: oci.devops.models.TriggerInfo", "to assign to the trigger_id property of this GithubBuildRunSource. :type", "'source_type': 'str', 'trigger_id': 'str', 'trigger_info': 'TriggerInfo' } self.attribute_map = {", "trigger_info: oci.devops.models.TriggerInfo \"\"\" self.swagger_types = { 'source_type': 'str', 'trigger_id': 'str',", "\"\"\" self.swagger_types = { 'source_type': 'str', 'trigger_id': 'str', 'trigger_info': 'TriggerInfo'", "trigger_id(self): \"\"\" **[Required]** Gets the trigger_id of this GithubBuildRunSource. The", "of this class is ``GITHUB`` and it should not be", "The value to assign to the trigger_id property of this", "{ 'source_type': 'sourceType', 'trigger_id': 'triggerId', 'trigger_info': 'triggerInfo' } self._source_type =", "GithubBuildRunSource. :type trigger_id: str :param trigger_info: The value to assign", "to the trigger_id property of this GithubBuildRunSource. :type trigger_id: str", "self._trigger_id = trigger_id @property def trigger_info(self): \"\"\" **[Required]** Gets the", "def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is", "the source_type property of this GithubBuildRunSource. Allowed values for this", "\"\"\" **[Required]** Gets the trigger_id of this GithubBuildRunSource. The trigger", "'triggerId', 'trigger_info': 'triggerInfo' } self._source_type = None self._trigger_id = None", "# noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class GithubBuildRunSource(BuildRunSource):", "should not be changed. The following keyword arguments are supported", "trigger_id(self, trigger_id): \"\"\" Sets the trigger_id of this GithubBuildRunSource. The", ":type: oci.devops.models.TriggerInfo \"\"\" self._trigger_info = trigger_info def __repr__(self): return formatted_flat_dict(self)", "trigger_id property of this GithubBuildRunSource. :type trigger_id: str :param trigger_info:", "None self._trigger_id = None self._trigger_info = None self._source_type = 'GITHUB'", "The trigger that invoked the build run. :param trigger_id: The", "default value of the :py:attr:`~oci.devops.models.GithubBuildRunSource.source_type` attribute of this class is", "GithubBuildRunSource. :return: The trigger_info of this GithubBuildRunSource. :rtype: oci.devops.models.TriggerInfo \"\"\"", "and/or its affiliates. All rights reserved. # This software is", "build run. :return: The trigger_id of this GithubBuildRunSource. :rtype: str", "__eq__(self, other): if other is None: return False return self.__dict__", "its affiliates. All rights reserved. # This software is dual-licensed", "self.__dict__ == other.__dict__ def __ne__(self, other): return not self ==", "str :param trigger_info: The value to assign to the trigger_info", "'trigger_info': 'triggerInfo' } self._source_type = None self._trigger_id = None self._trigger_info", "License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either", "**[Required]** Gets the trigger_info of this GithubBuildRunSource. :return: The trigger_info", "def trigger_id(self, trigger_id): \"\"\" Sets the trigger_id of this GithubBuildRunSource.", "None self._source_type = 'GITHUB' @property def trigger_id(self): \"\"\" **[Required]** Gets", "value to assign to the trigger_id property of this GithubBuildRunSource.", "the getters/setters of this class): :param source_type: The value to", "not be changed. The following keyword arguments are supported (corresponding", "or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may", "details of build run through GitHub. \"\"\" def __init__(self, **kwargs):", "trigger_info of this GithubBuildRunSource. :param trigger_info: The trigger_info of this", "this class): :param source_type: The value to assign to the", "(c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.", "with values from keyword arguments. The default value of the", "other): if other is None: return False return self.__dict__ ==", "the build run. :return: The trigger_id of this GithubBuildRunSource. :rtype:", "@trigger_info.setter def trigger_info(self, trigger_info): \"\"\" Sets the trigger_info of this", "a new GithubBuildRunSource object with values from keyword arguments. The", ":param trigger_info: The trigger_info of this GithubBuildRunSource. :type: oci.devops.models.TriggerInfo \"\"\"", "the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl", "self._trigger_info @trigger_info.setter def trigger_info(self, trigger_info): \"\"\" Sets the trigger_info of", "The default value of the :py:attr:`~oci.devops.models.GithubBuildRunSource.source_type` attribute of this class", "def trigger_id(self): \"\"\" **[Required]** Gets the trigger_id of this GithubBuildRunSource.", "https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You", "http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from .build_run_source import BuildRunSource", "oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class GithubBuildRunSource(BuildRunSource): \"\"\" Specifies details of", "the trigger_info of this GithubBuildRunSource. :return: The trigger_info of this", "trigger_id: The trigger_id of this GithubBuildRunSource. :type: str \"\"\" self._trigger_id", "2016, 2021, Oracle and/or its affiliates. All rights reserved. #", "License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License", "trigger_info of this GithubBuildRunSource. :rtype: oci.devops.models.TriggerInfo \"\"\" return self._trigger_info @trigger_info.setter", "\"\"\" Sets the trigger_info of this GithubBuildRunSource. :param trigger_info: The", "from .build_run_source import BuildRunSource from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel", "property of this GithubBuildRunSource. :type trigger_id: str :param trigger_info: The", "Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose", "arguments. The default value of the :py:attr:`~oci.devops.models.GithubBuildRunSource.source_type` attribute of this", "trigger_info property of this GithubBuildRunSource. :type trigger_info: oci.devops.models.TriggerInfo \"\"\" self.swagger_types", "trigger that invoked the build run. :param trigger_id: The trigger_id", "= None self._trigger_id = None self._trigger_info = None self._source_type =", "You may choose either license. from .build_run_source import BuildRunSource from", "GithubBuildRunSource. :rtype: oci.devops.models.TriggerInfo \"\"\" return self._trigger_info @trigger_info.setter def trigger_info(self, trigger_info):", "The following keyword arguments are supported (corresponding to the getters/setters", "other is None: return False return self.__dict__ == other.__dict__ def", "dual-licensed to you under the Universal Permissive License (UPL) 1.0", ":param source_type: The value to assign to the source_type property", "for this property are: \"MANUAL\", \"GITHUB\", \"GITLAB\", \"DEVOPS_CODE_REPOSITORY\" :type source_type:", "invoked the build run. :return: The trigger_id of this GithubBuildRunSource.", "@trigger_id.setter def trigger_id(self, trigger_id): \"\"\" Sets the trigger_id of this", "The value to assign to the trigger_info property of this", "this GithubBuildRunSource. :rtype: oci.devops.models.TriggerInfo \"\"\" return self._trigger_info @trigger_info.setter def trigger_info(self,", "assign to the trigger_info property of this GithubBuildRunSource. :type trigger_info:", "choose either license. from .build_run_source import BuildRunSource from oci.util import", "GithubBuildRunSource object with values from keyword arguments. The default value", "the :py:attr:`~oci.devops.models.GithubBuildRunSource.source_type` attribute of this class is ``GITHUB`` and it", "False return self.__dict__ == other.__dict__ def __ne__(self, other): return not", "reserved. # This software is dual-licensed to you under the", "to the trigger_info property of this GithubBuildRunSource. :type trigger_info: oci.devops.models.TriggerInfo", "GithubBuildRunSource. :rtype: str \"\"\" return self._trigger_id @trigger_id.setter def trigger_id(self, trigger_id):", "trigger_info: The value to assign to the trigger_info property of", "return formatted_flat_dict(self) def __eq__(self, other): if other is None: return", "if other is None: return False return self.__dict__ == other.__dict__", "values for this property are: \"MANUAL\", \"GITHUB\", \"GITLAB\", \"DEVOPS_CODE_REPOSITORY\" :type", "GithubBuildRunSource(BuildRunSource): \"\"\" Specifies details of build run through GitHub. \"\"\"", "'triggerInfo' } self._source_type = None self._trigger_id = None self._trigger_info =", "GithubBuildRunSource. :type: oci.devops.models.TriggerInfo \"\"\" self._trigger_info = trigger_info def __repr__(self): return", "GithubBuildRunSource. The trigger that invoked the build run. :param trigger_id:", "run. :return: The trigger_id of this GithubBuildRunSource. :rtype: str \"\"\"", "# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All", "**[Required]** Gets the trigger_id of this GithubBuildRunSource. The trigger that", "values from keyword arguments. The default value of the :py:attr:`~oci.devops.models.GithubBuildRunSource.source_type`", "trigger_id: The value to assign to the trigger_id property of", "All rights reserved. # This software is dual-licensed to you", "this GithubBuildRunSource. Allowed values for this property are: \"MANUAL\", \"GITHUB\",", "trigger_info of this GithubBuildRunSource. :return: The trigger_info of this GithubBuildRunSource.", ":return: The trigger_id of this GithubBuildRunSource. :rtype: str \"\"\" return", "\"\"\" Sets the trigger_id of this GithubBuildRunSource. The trigger that", "run. :param trigger_id: The trigger_id of this GithubBuildRunSource. :type: str", "self._trigger_id = None self._trigger_info = None self._source_type = 'GITHUB' @property", "self._trigger_id @trigger_id.setter def trigger_id(self, trigger_id): \"\"\" Sets the trigger_id of", "from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from", "GithubBuildRunSource. Allowed values for this property are: \"MANUAL\", \"GITHUB\", \"GITLAB\",", "of this GithubBuildRunSource. :rtype: str \"\"\" return self._trigger_id @trigger_id.setter def", "that invoked the build run. :param trigger_id: The trigger_id of", "getters/setters of this class): :param source_type: The value to assign", "The trigger_id of this GithubBuildRunSource. :type: str \"\"\" self._trigger_id =", "keyword arguments are supported (corresponding to the getters/setters of this", "class): :param source_type: The value to assign to the source_type", "of this GithubBuildRunSource. The trigger that invoked the build run.", "may choose either license. from .build_run_source import BuildRunSource from oci.util", "through GitHub. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new", "trigger_id @property def trigger_info(self): \"\"\" **[Required]** Gets the trigger_info of", "# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or", "return self._trigger_info @trigger_info.setter def trigger_info(self, trigger_info): \"\"\" Sets the trigger_info", "2021, Oracle and/or its affiliates. All rights reserved. # This", "@property def trigger_info(self): \"\"\" **[Required]** Gets the trigger_info of this", "Sets the trigger_id of this GithubBuildRunSource. The trigger that invoked", "noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class GithubBuildRunSource(BuildRunSource): \"\"\"", "Sets the trigger_info of this GithubBuildRunSource. :param trigger_info: The trigger_info", "the trigger_info property of this GithubBuildRunSource. :type trigger_info: oci.devops.models.TriggerInfo \"\"\"", "Oracle and/or its affiliates. All rights reserved. # This software", "**kwargs): \"\"\" Initializes a new GithubBuildRunSource object with values from", "under the Universal Permissive License (UPL) 1.0 as shown at", "@property def trigger_id(self): \"\"\" **[Required]** Gets the trigger_id of this", "of the :py:attr:`~oci.devops.models.GithubBuildRunSource.source_type` attribute of this class is ``GITHUB`` and", "trigger that invoked the build run. :return: The trigger_id of", "def trigger_info(self): \"\"\" **[Required]** Gets the trigger_info of this GithubBuildRunSource.", "trigger_info: The trigger_info of this GithubBuildRunSource. :type: oci.devops.models.TriggerInfo \"\"\" self._trigger_info", "(corresponding to the getters/setters of this class): :param source_type: The", "be changed. The following keyword arguments are supported (corresponding to", "at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from .build_run_source import", "\"DEVOPS_CODE_REPOSITORY\" :type source_type: str :param trigger_id: The value to assign", "object with values from keyword arguments. The default value of", "str \"\"\" return self._trigger_id @trigger_id.setter def trigger_id(self, trigger_id): \"\"\" Sets", "None: return False return self.__dict__ == other.__dict__ def __ne__(self, other):", "class GithubBuildRunSource(BuildRunSource): \"\"\" Specifies details of build run through GitHub.", "supported (corresponding to the getters/setters of this class): :param source_type:", "following keyword arguments are supported (corresponding to the getters/setters of", "as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from", "The value to assign to the source_type property of this", "changed. The following keyword arguments are supported (corresponding to the", ":param trigger_id: The value to assign to the trigger_id property", "the trigger_id of this GithubBuildRunSource. The trigger that invoked the", "to the source_type property of this GithubBuildRunSource. Allowed values for", ":param trigger_info: The value to assign to the trigger_info property", "\"\"\" Specifies details of build run through GitHub. \"\"\" def", "This software is dual-licensed to you under the Universal Permissive", "def trigger_info(self, trigger_info): \"\"\" Sets the trigger_info of this GithubBuildRunSource.", "at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0.", "self._trigger_info = None self._source_type = 'GITHUB' @property def trigger_id(self): \"\"\"", "'trigger_id': 'triggerId', 'trigger_info': 'triggerInfo' } self._source_type = None self._trigger_id =", "1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as", "self.attribute_map = { 'source_type': 'sourceType', 'trigger_id': 'triggerId', 'trigger_info': 'triggerInfo' }", "Specifies details of build run through GitHub. \"\"\" def __init__(self,", "either license. from .build_run_source import BuildRunSource from oci.util import formatted_flat_dict,", "of this GithubBuildRunSource. :rtype: oci.devops.models.TriggerInfo \"\"\" return self._trigger_info @trigger_info.setter def", "Gets the trigger_id of this GithubBuildRunSource. The trigger that invoked", "is None: return False return self.__dict__ == other.__dict__ def __ne__(self,", "self._source_type = 'GITHUB' @property def trigger_id(self): \"\"\" **[Required]** Gets the", "\"\"\" Initializes a new GithubBuildRunSource object with values from keyword", "the trigger_id property of this GithubBuildRunSource. :type trigger_id: str :param", "this GithubBuildRunSource. :type trigger_id: str :param trigger_info: The value to", "is dual-licensed to you under the Universal Permissive License (UPL)", "\"GITHUB\", \"GITLAB\", \"DEVOPS_CODE_REPOSITORY\" :type source_type: str :param trigger_id: The value", "of this GithubBuildRunSource. :type: oci.devops.models.TriggerInfo \"\"\" self._trigger_info = trigger_info def", "formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs", "F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class GithubBuildRunSource(BuildRunSource): \"\"\" Specifies", "it should not be changed. The following keyword arguments are", "trigger_info(self): \"\"\" **[Required]** Gets the trigger_info of this GithubBuildRunSource. :return:", "__repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None:", "to assign to the source_type property of this GithubBuildRunSource. Allowed", "of this class): :param source_type: The value to assign to", "from keyword arguments. The default value of the :py:attr:`~oci.devops.models.GithubBuildRunSource.source_type` attribute", ":param trigger_id: The trigger_id of this GithubBuildRunSource. :type: str \"\"\"", "this GithubBuildRunSource. :type: str \"\"\" self._trigger_id = trigger_id @property def", "trigger_info of this GithubBuildRunSource. :type: oci.devops.models.TriggerInfo \"\"\" self._trigger_info = trigger_info", "trigger_info(self, trigger_info): \"\"\" Sets the trigger_info of this GithubBuildRunSource. :param", "this GithubBuildRunSource. The trigger that invoked the build run. :return:", "The trigger_info of this GithubBuildRunSource. :type: oci.devops.models.TriggerInfo \"\"\" self._trigger_info =", ":rtype: str \"\"\" return self._trigger_id @trigger_id.setter def trigger_id(self, trigger_id): \"\"\"", "'str', 'trigger_id': 'str', 'trigger_info': 'TriggerInfo' } self.attribute_map = { 'source_type':", "value to assign to the source_type property of this GithubBuildRunSource.", "Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or", "def __eq__(self, other): if other is None: return False return", "value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class", "str \"\"\" self._trigger_id = trigger_id @property def trigger_info(self): \"\"\" **[Required]**", "license. from .build_run_source import BuildRunSource from oci.util import formatted_flat_dict, NONE_SENTINEL,", "from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class GithubBuildRunSource(BuildRunSource): \"\"\" Specifies details", "of this GithubBuildRunSource. :type: str \"\"\" self._trigger_id = trigger_id @property", "coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its", ".build_run_source import BuildRunSource from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel #", ":rtype: oci.devops.models.TriggerInfo \"\"\" return self._trigger_info @trigger_info.setter def trigger_info(self, trigger_info): \"\"\"", "'TriggerInfo' } self.attribute_map = { 'source_type': 'sourceType', 'trigger_id': 'triggerId', 'trigger_info':", "build run. :param trigger_id: The trigger_id of this GithubBuildRunSource. :type:", "run through GitHub. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a", ":type source_type: str :param trigger_id: The value to assign to", "keyword arguments. The default value of the :py:attr:`~oci.devops.models.GithubBuildRunSource.source_type` attribute of", "import init_model_state_from_kwargs @init_model_state_from_kwargs class GithubBuildRunSource(BuildRunSource): \"\"\" Specifies details of build", "\"\"\" return self._trigger_info @trigger_info.setter def trigger_info(self, trigger_info): \"\"\" Sets the" ]
[ "is_code('random_string') is None assert is_code('fleur.inpGUT') is None assert is_code(99999) is", "of expected code type.\\n\" \"Valid labels for a fleur.fleur executable", "'fleur 30', 'energy_units': 'eV', 'kmax': 4.2, 'fermi_energy': 0.0605833326, 'spin_density': 0.0792504665,", "seems to be now way to add outputs to CalcJobNode\")", "absolute_import import pytest import os # is_code def test_is_code_interface(fixture_code): from", "'energy_core_electrons': -2901.8120489845, 'magnetic_moment_units': 'muBohr', 'overall_charge_density': 0.0682602474, 'creator_target_structure': ' ', 'energy_valence_electrons':", "KpointsData) @pytest.mark.skip(reason=\"Test is not implemented\") def test_determine_favorable_reaction(): from aiida_fleur.tools.common_fleur_wf import", "[147.02734883720933], 'walltime_sec_cor': [43], 'total_cost': [834527.2320000001], 'fermi_energy': [0.0605833326], 'bandgap': [6.0662e-06], 'energy':", "'start_date': {'date': '2019/11/12', 'time': '16:11:25'}, 'parser_info': 'AiiDA Fleur Parser v0.2beta',", "'structure': structure} assert get_inputs_inpgen(**inputs) == returns @pytest.mark.skip(reason=\"Test is not implemented\")", "'code' is a Fleur code etc. ''' from aiida_fleur.tools.common_fleur_wf import", "of FleurinputgenCalculation to check if input types are correct i.e.", "per MPI 2. Number of k-points is 720'), (3, 24,", "'bandgap': [6.0662e-06], 'energy': [-138529.7052157], 'force_largest': [0.0], 'ncores': [12], 'pk': [node.pk],", "'description', 'settings': {'test': 1}, 'serial': False} results = get_inputs_fleur(**inputs) out_options", "= [ (4, 3, 8, 'Computational setup is perfect! Nodes:", "MPIs per node from 8 to 20 an OMP from", "correct i.e. 'code' is a Fleur code etc. ''' from", "20 an OMP from 3 to 1. Changed the number", "structure } assert get_inputs_inpgen(**inputs) == returns # repeat without a", "'' assert results['label'] == '' assert out_options == {'custom_scheduler_commands': 'test_command',", "2. Number of k-points is 720'), (3, 24, 1, 'WARNING:", "aiida_fleur.tools.common_fleur_wf import get_scheduler_extras # test_and_get_codenode def test_test_and_get_codenode_inpgen(fixture_code): from aiida_fleur.tools.common_fleur_wf import", "out_options == {'custom_scheduler_commands': 'test_command', 'withmpi': False, 'resources': {\"num_machines\": 1}} def", "import Dict code = fixture_code('fleur.inpgen') structure = generate_structure() params =", "[], 'orbital_magnetic_spin_down_charges': []}) out.store() node = generate_calc_job_node('fleur.fleur', fixture_localhost) node.store() out.add_incoming(node,", "import optimize_calc_options result = optimize_calc_options(*input) assert result == result_correct def", "True} assert out_settings == {'test': 1} inputs = {'code': 'code',", "work of FleurinputgenCalculation to check if input types are correct", "get_inputs_fleur from aiida.orm import Dict inputs = {'code': 'code', 'remote':", "for fleur.not_existing.\\n\" \"Configure at least one first using\\n\" \" verdi", "node.store() out.add_incoming(node, link_type=LinkType.CREATE, link_label='output_parameters') result = performance_extract_calcs([node.pk]) assert result ==", "Nodes: 4, MPIs per node 6, OMP per MPI 4.", "'WARNING: Changed the number of MPIs per node from 8", "4 to 3'), (4, 20, 1, 'WARNING: Changed the number", "aiida.orm import KpointsData a, b = get_kpoints_mesh_from_kdensity(generate_structure(), 0.1) assert a", "from aiida_fleur.tools.common_fleur_wf import performance_extract_calcs from aiida.common.links import LinkType from aiida.orm", "# @pytest.mark.skip(reason=\"There seems to be now way to add outputs", "implemented\") def test_determine_favorable_reaction(): from aiida_fleur.tools.common_fleur_wf import determine_favorable_reaction # @pytest.mark.skip(reason=\"There seems", "0.0, 'energy_hartree': -5090.8728101494, 'walltime_units': 'seconds', 'charge_density1': 0.0577674505, 'charge_density2': 0.0461840944, 'number_of_atoms':", "now way to add outputs to CalcJobNode\") def test_performance_extract_calcs(fixture_localhost, generate_calc_job_node):", "assert is_code('fleur.inpGUT') is None assert is_code(99999) is None code =", "{'custom_scheduler_commands': 'test_command'}, 'serial': True} results = get_inputs_fleur(**inputs) out_options = results['options'].get_dict()", "(4, 3, 8, 'Computational setup is perfect! Nodes: 4, MPIs", "from aiida.common.exceptions import NotExistent # install code setup code code", "'serial': True} results = get_inputs_fleur(**inputs) out_options = results['options'].get_dict() assert results['description']", "0.5), (4, 8, 3, False, 0.5, None, 720)] results_optimize =", "def test_determine_favorable_reaction(): from aiida_fleur.tools.common_fleur_wf import determine_favorable_reaction # @pytest.mark.skip(reason=\"There seems to", "= generate_work_chain_node('fleur.base_relax', fixture_localhost) node1.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node2.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node3.add_incoming(node_main,", "test_determine_favorable_reaction(): from aiida_fleur.tools.common_fleur_wf import determine_favorable_reaction # @pytest.mark.skip(reason=\"There seems to be", "'2019/11/12', 'time': '16:11:25'}, 'parser_info': 'AiiDA Fleur Parser v0.2beta', 'CalcJob_uuid': '3dc62d43-b607-4415-920f-e0d34e805711',", "'resources': [{'num_machines': 1, 'num_mpiprocs_per_machine': 1}]} inputs_optimize = [(4, 8, 3,", "'charge_density1': 0.0577674505, 'charge_density2': 0.0461840944, 'number_of_atoms': 4, 'parser_warnings': [], 'magnetic_moments': [3.3720063737,", "(3, 24, 1, 'WARNING: Changed the number of nodes from", "generate_work_chain_node('fleur.base_relax', fixture_localhost) node1.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node2.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node3.add_incoming(node_main, link_type=LinkType.CALL_CALC,", "{'date': '2019/11/12', 'time': '16:11:25'}, 'parser_info': 'AiiDA Fleur Parser v0.2beta', 'CalcJob_uuid':", "'walltime': 43, 'warnings': {'info': {}, 'debug': {}, 'error': {}, 'warning':", "assert str(msg.value) == (\"Given Code node is not of expected", "True, 100, None, 720), (4, 8, 3, True, 100, None,", "6, OMP per MPI 4. Number of k-points is 720'),", "@pytest.mark.parametrize('input,result_correct', zip(inputs_optimize, results_optimize)) def test_optimize_calc_options(input, result_correct): from aiida_fleur.tools.common_fleur_wf import optimize_calc_options", "code_fleur.label = 'fleur_test' code_fleur.store() expected = 'fleur.inpgen' nonexpected = 'fleur.fleur'", "[], 'density_convergence_units': 'me/bohr^3', 'number_of_spin_components': 2, 'charge_den_xc_den_integral': -223.295208608, 'magnetic_spin_down_charges': [5.777470284, 5.7775460208,", "([21, 21, 21], [0.0, 0.0, 0.0]) assert isinstance(b, KpointsData) @pytest.mark.skip(reason=\"Test", "'fleur_test' code_fleur.store() expected = 'fleur.inpgen' nonexpected = 'fleur.fleur' not_existing =", "generate_calc_job_node('fleur.fleur', fixture_localhost) node.store() out.add_incoming(node, link_type=LinkType.CREATE, link_label='output_parameters') result = performance_extract_calcs([node.pk]) assert", "Nodes: 4, MPIs per node 12, OMP per MPI 2.", "install code setup code code = fixture_code('fleur.inpgen') code_fleur = fixture_code('fleur.fleur')", "number of nodes from 4 to 3'), (4, 20, 1,", "aiida_fleur.tools.common_fleur_wf import optimize_calc_options result = optimize_calc_options(*input) assert result == result_correct", "from aiida.orm import Code from aiida.common.exceptions import NotExistent # install", "generate_calc_job_node('fleur.fleur', fixture_localhost) node3 = generate_calc_job_node('fleur.fleur', fixture_localhost) node_main = generate_work_chain_node('fleur.base_relax', fixture_localhost)", "node from 8 to 20 an OMP from 3 to", "30', 'energy_units': 'eV', 'kmax': 4.2, 'fermi_energy': 0.0605833326, 'spin_density': 0.0792504665, 'bandgap_units':", "assert isinstance(b, KpointsData) @pytest.mark.skip(reason=\"Test is not implemented\") def test_determine_favorable_reaction(): from", "0.0, 0.0]) assert isinstance(b, KpointsData) @pytest.mark.skip(reason=\"Test is not implemented\") def", "'number_of_atom_types': 4, 'number_of_iterations': 11, 'number_of_symmetries': 8, 'energy_core_electrons': -2901.8120489845, 'magnetic_moment_units': 'muBohr',", "per MPI 8. Number of k-points is 720'), (4, 6,", "for a fleur.fleur executable are:\\n\" \"* fleur_test@localhost-test\") with pytest.raises(ValueError) as", "test_test_and_get_codenode_inpgen(fixture_code): from aiida_fleur.tools.common_fleur_wf import test_and_get_codenode from aiida.orm import Code from", "'energy_units': 'eV', 'kmax': 4.2, 'fermi_energy': 0.0605833326, 'spin_density': 0.0792504665, 'bandgap_units': 'eV',", "way to add outputs to CalcJobNode\") def test_performance_extract_calcs(fixture_localhost, generate_calc_job_node): from", "out_options = results['options'].get_dict() assert results['description'] == '' assert results['label'] ==", "'energy_hartree_units': 'Htr', 'number_of_atom_types': 4, 'number_of_iterations': 11, 'number_of_symmetries': 8, 'energy_core_electrons': -2901.8120489845,", "6.0662e-06, 'end_date': {'date': '2019/11/12', 'time': '16:12:08'}, 'unparsed': [], 'walltime': 43,", "generate_calc_job_node, generate_work_chain_node): from aiida_fleur.tools.common_fleur_wf import find_last_in_restart from aiida.common.links import LinkType", "the work of FleurCalculation to check if input types are", "check if input types are correct i.e. 'code' is a", "'magnetic_moment_units': 'muBohr', 'overall_charge_density': 0.0682602474, 'creator_target_structure': ' ', 'energy_valence_electrons': -71.6009296831, 'magnetic_spin_up_charges':", "== 'description' assert results['label'] == 'label' assert out_options == {'custom_scheduler_commands':", "'options': {'custom_scheduler_commands': 'test_command'}, 'label': 'label', 'description': 'description', 'settings': {'test': 1},", "generate_structure() params = Dict(dict={'test': 1}) inputs = {'structure': structure, 'inpgencode':", "fixture_localhost) node2 = generate_calc_job_node('fleur.fleur', fixture_localhost) node3 = generate_calc_job_node('fleur.fleur', fixture_localhost) node_main", "'eV', 'force_largest': 0.0, 'energy_hartree': -5090.8728101494, 'walltime_units': 'seconds', 'charge_density1': 0.0577674505, 'charge_density2':", "'settings': {'test': 1}, 'serial': False} results = get_inputs_fleur(**inputs) out_options =", "perfect! Nodes: 4, MPIs per node 12, OMP per MPI", "[43], 'walltime_sec_per_it': [3.909090909090909], 'n_iterations_total': [11], 'density_distance': [0.0682602474], 'computer': ['localhost-test'], 'n_atoms':", "None code = fixture_code('fleur.inpgen') code.store() assert is_code(code.uuid) assert is_code(code.pk) assert", "k-points is 720'), (4, 12, 2, 'Computational setup is perfect!", "from aiida.common.links import LinkType from aiida.orm import Dict out =", "description inputs = {'structure': structure, 'inpgencode': code, 'options': {}, 'params':", "8, 3, False, 0.5, None, 720)] results_optimize = [ (4,", "{'test': 1} inputs = {'code': 'code', 'remote': 'remote', 'fleurinp': 'fleurinp',", "\" verdi code setup\") def test_get_kpoints_mesh_from_kdensity(generate_structure): from aiida_fleur.tools.common_fleur_wf import get_kpoints_mesh_from_kdensity", "'walltime_units': 'seconds', 'charge_density1': 0.0577674505, 'charge_density2': 0.0461840944, 'number_of_atoms': 4, 'parser_warnings': [],", "== returns # repeat without a label and description inputs", "per node 12, OMP per MPI 2. Number of k-points", "[3.3720063737, 3.3719345944, 3.3719329177, 3.3719329162], 'number_of_kpoints': 8, 'number_of_species': 1, 'fermi_energy_units': 'Htr',", "correctly. Note it is the work of FleurCalculation to check", "executable are:\\n\" \"* fleur_test@localhost-test\") with pytest.raises(ValueError) as msg: test_and_get_codenode(code, not_existing,", "results['parent_folder'] == 'remote' assert results['description'] == 'description' assert results['label'] ==", "import find_last_in_restart from aiida.common.links import LinkType node1 = generate_calc_job_node('fleur.fleur', fixture_localhost)", "import test_and_get_codenode from aiida.orm import Code from aiida.common.exceptions import NotExistent", "def test_get_inputs_fleur(): ''' Tests if get_inputs_fleur assembles inputs correctly. Note", "{'custom_scheduler_commands': 'test_command'}, 'label': 'label', 'description': 'description', 'settings': {'test': 1}, 'serial':", "number of nodes from 4 to 4. Number of k-points", "'inpgencode': code, 'options': {}, 'label': 'label', 'description': 'description', 'params': params}", "code = fixture_code('fleur.inpgen') code.store() assert is_code(code.uuid) assert is_code(code.pk) assert is_code('@'.join([code.label,", "results['settings'].get_dict() assert results['code'] == 'code' assert results['fleurinpdata'] == 'fleurinp' assert", "assert get_inputs_inpgen(**inputs) == returns @pytest.mark.skip(reason=\"Test is not implemented\") def test_get_scheduler_extras():", "msg: test_and_get_codenode(code, nonexpected, use_exceptions=True) assert str(msg.value) == (\"Given Code node", "= {'code': 'code', 'remote': 'remote', 'fleurinp': 'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'},", "21, 21], [0.0, 0.0, 0.0]) assert isinstance(b, KpointsData) @pytest.mark.skip(reason=\"Test is", "setup code code = fixture_code('fleur.inpgen') code_fleur = fixture_code('fleur.fleur') code_fleur.label =", "structure, 'inpgencode': code, 'options': {}, 'params': params} returns = {'metadata':", "is_code('fleur.inpGUT') is None assert is_code(99999) is None code = fixture_code('fleur.inpgen')", "'fermi_energy': [0.0605833326], 'bandgap': [6.0662e-06], 'energy': [-138529.7052157], 'force_largest': [0.0], 'ncores': [12],", "0.0]) assert isinstance(b, KpointsData) @pytest.mark.skip(reason=\"Test is not implemented\") def test_determine_favorable_reaction():", "results['label'] == '' assert out_options == {'custom_scheduler_commands': 'test_command', 'withmpi': False,", "0.0682602474, 'creator_target_structure': ' ', 'energy_valence_electrons': -71.6009296831, 'magnetic_spin_up_charges': [9.1494766577, 9.1494806151, 9.1494806833,", "{'custom_scheduler_commands': 'test_command', 'withmpi': False, 'resources': {\"num_machines\": 1}} def test_get_inputs_inpgen(fixture_code, generate_structure):", "link_label='output_parameters') result = performance_extract_calcs([node.pk]) assert result == {'n_symmetries': [8], 'n_spin_components':", "[11], 'density_distance': [0.0682602474], 'computer': ['localhost-test'], 'n_atoms': [4], 'kmax': [4.2], 'cost':", "[834527.2320000001], 'fermi_energy': [0.0605833326], 'bandgap': [6.0662e-06], 'energy': [-138529.7052157], 'force_largest': [0.0], 'ncores':", "True} results = get_inputs_fleur(**inputs) out_options = results['options'].get_dict() assert results['description'] ==", "'magnetic_spin_down_charges': [5.777470284, 5.7775460208, 5.7775477657, 5.7775477672], 'number_of_iterations_total': 11, 'creator_target_architecture': 'GEN', 'orbital_magnetic_moment_units':", "valid, and no valid codes for fleur.not_existing.\\n\" \"Configure at least", "test_get_kpoints_mesh_from_kdensity(generate_structure): from aiida_fleur.tools.common_fleur_wf import get_kpoints_mesh_from_kdensity from aiida.orm import KpointsData a,", "<filename>aiida_fleur/tests/tools/test_common_fleur_wf.py<gh_stars>0 from __future__ import absolute_import import pytest import os #", "'test_command'}, 'serial': True} results = get_inputs_fleur(**inputs) out_options = results['options'].get_dict() assert", "{}}, 'start_date': {'date': '2019/11/12', 'time': '16:11:25'}, 'parser_info': 'AiiDA Fleur Parser", "link_type=LinkType.CALL_CALC, link_label='CALL') node2.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node3.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node1.store() node2.store()", "{ 'options': {'withmpi': False, 'resources': {'num_machines': 1}}, 'description': '', 'label':", "= 'fleur.inpgen' nonexpected = 'fleur.fleur' not_existing = 'fleur.not_existing' assert isinstance(test_and_get_codenode(code,", "test_get_inputs_inpgen(fixture_code, generate_structure): ''' Tests if get_inputs_fleur assembles inputs correctly. Note", "'eV', 'kmax': 4.2, 'fermi_energy': 0.0605833326, 'spin_density': 0.0792504665, 'bandgap_units': 'eV', 'force_largest':", "720), (4, 8, 3, True, 100, None, 720, 0.5), (4,", "implemented\") def test_get_scheduler_extras(): from aiida_fleur.tools.common_fleur_wf import get_scheduler_extras # test_and_get_codenode def", "0.0461840944, 'number_of_atoms': 4, 'parser_warnings': [], 'magnetic_moments': [3.3720063737, 3.3719345944, 3.3719329177, 3.3719329162],", "'overall_charge_density': 0.0682602474, 'creator_target_structure': ' ', 'energy_valence_electrons': -71.6009296831, 'magnetic_spin_up_charges': [9.1494766577, 9.1494806151,", "'orbital_magnetic_moment_units': 'muBohr', 'orbital_magnetic_spin_up_charges': [], 'orbital_magnetic_spin_down_charges': []}) out.store() node = generate_calc_job_node('fleur.fleur',", "[2], 'n_kpoints': [8], 'n_iterations': [11], 'walltime_sec': [43], 'walltime_sec_per_it': [3.909090909090909], 'n_iterations_total':", "is_code(code.uuid) assert is_code(code.pk) assert is_code('@'.join([code.label, code.get_computer_name()])) assert is_code(code) def test_get_inputs_fleur():", "== {'custom_scheduler_commands': 'test_command', 'withmpi': True} assert out_settings == {'test': 1}", "inputs = {'structure': structure, 'inpgencode': code, 'options': {}, 'params': params}", "'warning': {}}, 'start_date': {'date': '2019/11/12', 'time': '16:11:25'}, 'parser_info': 'AiiDA Fleur", "[node.uuid], 'serial': [False], 'resources': [{'num_machines': 1, 'num_mpiprocs_per_machine': 1}]} inputs_optimize =", "of k-points is 720'), (3, 24, 1, 'WARNING: Changed the", "find_last_in_restart from aiida.common.links import LinkType node1 = generate_calc_job_node('fleur.fleur', fixture_localhost) node2", "aiida.common.links import LinkType node1 = generate_calc_job_node('fleur.fleur', fixture_localhost) node2 = generate_calc_job_node('fleur.fleur',", "import NotExistent # install code setup code code = fixture_code('fleur.inpgen')", "from aiida_fleur.tools.common_fleur_wf import get_scheduler_extras # test_and_get_codenode def test_test_and_get_codenode_inpgen(fixture_code): from aiida_fleur.tools.common_fleur_wf", "aiida_fleur.tools.common_fleur_wf import get_kpoints_mesh_from_kdensity from aiida.orm import KpointsData a, b =", "determine_favorable_reaction # @pytest.mark.skip(reason=\"There seems to be now way to add", "\"* fleur_test@localhost-test\") with pytest.raises(ValueError) as msg: test_and_get_codenode(code, not_existing, use_exceptions=True) assert", "fixture_code('fleur.inpgen') code_fleur = fixture_code('fleur.fleur') code_fleur.label = 'fleur_test' code_fleur.store() expected =", "assert out_options == {'custom_scheduler_commands': 'test_command', 'withmpi': False, 'resources': {\"num_machines\": 1}}", "get_inputs_inpgen from aiida.orm import Dict code = fixture_code('fleur.inpgen') structure =", "== '' assert results['label'] == '' assert out_options == {'custom_scheduler_commands':", "it is the work of FleurCalculation to check if input", "4, MPIs per node 6, OMP per MPI 4. Number", "MPI 2. Number of k-points is 720'), (3, 24, 1,", "9.1494806834], 'orbital_magnetic_moments': [], 'density_convergence_units': 'me/bohr^3', 'number_of_spin_components': 2, 'charge_den_xc_den_integral': -223.295208608, 'magnetic_spin_down_charges':", "[8], 'n_iterations': [11], 'walltime_sec': [43], 'walltime_sec_per_it': [3.909090909090909], 'n_iterations_total': [11], 'density_distance':", "[3.909090909090909], 'n_iterations_total': [11], 'density_distance': [0.0682602474], 'computer': ['localhost-test'], 'n_atoms': [4], 'kmax':", "performance_extract_calcs from aiida.common.links import LinkType from aiida.orm import Dict out", "k-points is 720'), (3, 24, 1, 'WARNING: Changed the number", "generate_calc_job_node): from aiida_fleur.tools.common_fleur_wf import performance_extract_calcs from aiida.common.links import LinkType from", "valid codes for fleur.not_existing.\\n\" \"Configure at least one first using\\n\"", "3, True, 2, None, 720), (4, 8, 3, True, 100,", "str(msg.value) == (\"Given Code node is not of expected code", "aiida.common.exceptions import NotExistent # install code setup code code =", "inputs correctly. Note it is the work of FleurinputgenCalculation to", "out_options = results['options'].get_dict() out_settings = results['settings'].get_dict() assert results['code'] == 'code'", "setup is perfect! Nodes: 4, MPIs per node 6, OMP", "code etc. ''' from aiida_fleur.tools.common_fleur_wf import get_inputs_fleur from aiida.orm import", "= optimize_calc_options(*input) assert result == result_correct def test_find_last_in_restart(fixture_localhost, generate_calc_job_node, generate_work_chain_node):", "per MPI 4. Number of k-points is 720'), (4, 12,", "it is the work of FleurinputgenCalculation to check if input", "9.1494806151, 9.1494806833, 9.1494806834], 'orbital_magnetic_moments': [], 'density_convergence_units': 'me/bohr^3', 'number_of_spin_components': 2, 'charge_den_xc_den_integral':", "fixture_code('fleur.inpgen') structure = generate_structure() params = Dict(dict={'test': 1}) inputs =", "outputs to CalcJobNode\") def test_performance_extract_calcs(fixture_localhost, generate_calc_job_node): from aiida_fleur.tools.common_fleur_wf import performance_extract_calcs", "FleurCalculation to check if input types are correct i.e. 'code'", "5.7775460208, 5.7775477657, 5.7775477672], 'number_of_iterations_total': 11, 'creator_target_architecture': 'GEN', 'orbital_magnetic_moment_units': 'muBohr', 'orbital_magnetic_spin_up_charges':", "4 to 4. Number of k-points is 720.')] @pytest.mark.parametrize('input,result_correct', zip(inputs_optimize,", "node3.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node1.store() node2.store() node3.store() result = find_last_in_restart(node_main) assert", "types are correct i.e. 'code' is a Fleur code etc.", "{'withmpi': False, 'resources': {'num_machines': 1}}, 'description': 'description', 'label': 'label'}, 'code':", "node_main = generate_work_chain_node('fleur.base_relax', fixture_localhost) node1.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node2.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL')", "'num_mpiprocs_per_machine': 1}]} inputs_optimize = [(4, 8, 3, True, 0.5, None,", "'number_of_iterations_total': 11, 'creator_target_architecture': 'GEN', 'orbital_magnetic_moment_units': 'muBohr', 'orbital_magnetic_spin_up_charges': [], 'orbital_magnetic_spin_down_charges': []})", "= generate_calc_job_node('fleur.fleur', fixture_localhost) node.store() out.add_incoming(node, link_type=LinkType.CREATE, link_label='output_parameters') result = performance_extract_calcs([node.pk])", "\"Valid labels for a fleur.fleur executable are:\\n\" \"* fleur_test@localhost-test\") with", "== (\"Given Code node is not of expected code type.\\n\"", "assert is_code(code.uuid) assert is_code(code.pk) assert is_code('@'.join([code.label, code.get_computer_name()])) assert is_code(code) def", "to 4. Number of k-points is 720.')] @pytest.mark.parametrize('input,result_correct', zip(inputs_optimize, results_optimize))", "node3 = generate_calc_job_node('fleur.fleur', fixture_localhost) node_main = generate_work_chain_node('fleur.base_relax', fixture_localhost) node1.add_incoming(node_main, link_type=LinkType.CALL_CALC,", "input generator calculation with aiida', 'energy': -138529.7052157, 'bandgap': 6.0662e-06, 'end_date':", "'code', 'remote': 'remote', 'fleurinp': 'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'}, 'serial': True}", "4, 'number_of_iterations': 11, 'number_of_symmetries': 8, 'energy_core_electrons': -2901.8120489845, 'magnetic_moment_units': 'muBohr', 'overall_charge_density':", "labels for a fleur.fleur executable are:\\n\" \"* fleur_test@localhost-test\") with pytest.raises(ValueError)", "'remote': 'remote', 'fleurinp': 'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'}, 'label': 'label', 'description':", "def test_is_code_interface(fixture_code): from aiida_fleur.tools.common_fleur_wf import is_code assert is_code('random_string') is None", "returns @pytest.mark.skip(reason=\"Test is not implemented\") def test_get_scheduler_extras(): from aiida_fleur.tools.common_fleur_wf import", "{}, 'warning': {}}, 'start_date': {'date': '2019/11/12', 'time': '16:11:25'}, 'parser_info': 'AiiDA", "from 4 to 4. Number of k-points is 720.')] @pytest.mark.parametrize('input,result_correct',", "[-138529.7052157], 'force_largest': [0.0], 'ncores': [12], 'pk': [node.pk], 'uuid': [node.uuid], 'serial':", "is the work of FleurinputgenCalculation to check if input types", "not_existing, use_exceptions=True) assert str(msg.value) == (\"Code not valid, and no", "'walltime_sec_per_it': [3.909090909090909], 'n_iterations_total': [11], 'density_distance': [0.0682602474], 'computer': ['localhost-test'], 'n_atoms': [4],", "pytest.raises(ValueError) as msg: test_and_get_codenode(code, nonexpected, use_exceptions=True) assert str(msg.value) == (\"Given", "'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'}, 'label': 'label', 'description': 'description', 'settings': {'test':", "without a label and description inputs = {'structure': structure, 'inpgencode':", "as msg: test_and_get_codenode(code, not_existing, use_exceptions=True) assert str(msg.value) == (\"Code not", "if input types are correct i.e. 'code' is a Fleur", "the work of FleurinputgenCalculation to check if input types are", "is_code(code.pk) assert is_code('@'.join([code.label, code.get_computer_name()])) assert is_code(code) def test_get_inputs_fleur(): ''' Tests", "to 3'), (4, 20, 1, 'WARNING: Changed the number of", "a Fleur code etc. ''' from aiida_fleur.tools.common_fleur_wf import get_inputs_fleur from", "a label and description inputs = {'structure': structure, 'inpgencode': code,", "'time': '16:12:08'}, 'unparsed': [], 'walltime': 43, 'warnings': {'info': {}, 'debug':", "the number of MPIs per node from 8 to 20", "'AiiDA Fleur Parser v0.2beta', 'CalcJob_uuid': '3dc62d43-b607-4415-920f-e0d34e805711', 'creator_name': 'fleur 30', 'energy_units':", "'parser_warnings': [], 'magnetic_moments': [3.3720063737, 3.3719345944, 3.3719329177, 3.3719329162], 'number_of_kpoints': 8, 'number_of_species':", "import KpointsData a, b = get_kpoints_mesh_from_kdensity(generate_structure(), 0.1) assert a ==", "assert out_options == {'custom_scheduler_commands': 'test_command', 'withmpi': True} assert out_settings ==", "'parser_info': 'AiiDA Fleur Parser v0.2beta', 'CalcJob_uuid': '3dc62d43-b607-4415-920f-e0d34e805711', 'creator_name': 'fleur 30',", "assembles inputs correctly. Note it is the work of FleurinputgenCalculation", "code setup code code = fixture_code('fleur.inpgen') code_fleur = fixture_code('fleur.fleur') code_fleur.label", "fixture_localhost) node.store() out.add_incoming(node, link_type=LinkType.CREATE, link_label='output_parameters') result = performance_extract_calcs([node.pk]) assert result", "is_code assert is_code('random_string') is None assert is_code('fleur.inpGUT') is None assert", "from aiida.orm import Dict inputs = {'code': 'code', 'remote': 'remote',", "assert is_code(code.pk) assert is_code('@'.join([code.label, code.get_computer_name()])) assert is_code(code) def test_get_inputs_fleur(): '''", "21], [0.0, 0.0, 0.0]) assert isinstance(b, KpointsData) @pytest.mark.skip(reason=\"Test is not", "params, 'structure': structure } assert get_inputs_inpgen(**inputs) == returns # repeat", "[75866.11200000001], 'costkonstant': [147.02734883720933], 'walltime_sec_cor': [43], 'total_cost': [834527.2320000001], 'fermi_energy': [0.0605833326], 'bandgap':", "msg: test_and_get_codenode(code, not_existing, use_exceptions=True) assert str(msg.value) == (\"Code not valid,", "assert results['label'] == 'label' assert out_options == {'custom_scheduler_commands': 'test_command', 'withmpi':", "nodes from 4 to 3'), (4, 20, 1, 'WARNING: Changed", "0.0577674505, 'charge_density2': 0.0461840944, 'number_of_atoms': 4, 'parser_warnings': [], 'magnetic_moments': [3.3720063737, 3.3719345944,", "0.0605833326, 'spin_density': 0.0792504665, 'bandgap_units': 'eV', 'force_largest': 0.0, 'energy_hartree': -5090.8728101494, 'walltime_units':", "'code': code, 'parameters': params, 'structure': structure } assert get_inputs_inpgen(**inputs) ==", "test_performance_extract_calcs(fixture_localhost, generate_calc_job_node): from aiida_fleur.tools.common_fleur_wf import performance_extract_calcs from aiida.common.links import LinkType", "'description', 'label': 'label'}, 'code': code, 'parameters': params, 'structure': structure }", "'n_spin_components': [2], 'n_kpoints': [8], 'n_iterations': [11], 'walltime_sec': [43], 'walltime_sec_per_it': [3.909090909090909],", "'16:11:25'}, 'parser_info': 'AiiDA Fleur Parser v0.2beta', 'CalcJob_uuid': '3dc62d43-b607-4415-920f-e0d34e805711', 'creator_name': 'fleur", "== 'fleurinp' assert results['parent_folder'] == 'remote' assert results['description'] == 'description'", "Dict inputs = {'code': 'code', 'remote': 'remote', 'fleurinp': 'fleurinp', 'options':", "3, True, 0.5, None, 720), (4, 8, 3, True, 2,", "code_fleur = fixture_code('fleur.fleur') code_fleur.label = 'fleur_test' code_fleur.store() expected = 'fleur.inpgen'", "9.1494806833, 9.1494806834], 'orbital_magnetic_moments': [], 'density_convergence_units': 'me/bohr^3', 'number_of_spin_components': 2, 'charge_den_xc_den_integral': -223.295208608,", "720), (4, 8, 3, True, 100, None, 720), (4, 8,", "perfect! Nodes: 4, MPIs per node 3, OMP per MPI", "no valid codes for fleur.not_existing.\\n\" \"Configure at least one first", "4, 'Computational setup is perfect! Nodes: 4, MPIs per node", "'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'}, 'serial': True} results = get_inputs_fleur(**inputs) out_options", "'fleur.not_existing' assert isinstance(test_and_get_codenode(code, expected), Code) with pytest.raises(ValueError) as msg: test_and_get_codenode(code,", "1, 'WARNING: Changed the number of nodes from 4 to", "False, 'resources': {'num_machines': 1}}, 'description': 'description', 'label': 'label'}, 'code': code,", "'description': 'description', 'settings': {'test': 1}, 'serial': False} results = get_inputs_fleur(**inputs)", "MPI 4. Number of k-points is 720'), (4, 12, 2,", "None assert is_code('fleur.inpGUT') is None assert is_code(99999) is None code", "node = generate_calc_job_node('fleur.fleur', fixture_localhost) node.store() out.add_incoming(node, link_type=LinkType.CREATE, link_label='output_parameters') result =", "'Computational setup is perfect! Nodes: 4, MPIs per node 12,", "get_inputs_inpgen(**inputs) == returns # repeat without a label and description", "{'info': {}, 'debug': {}, 'error': {}, 'warning': {}}, 'start_date': {'date':", "node1.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node2.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node3.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node1.store()", "''' from aiida_fleur.tools.common_fleur_wf import get_inputs_fleur from aiida.orm import Dict inputs", "'remote': 'remote', 'fleurinp': 'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'}, 'serial': True} results", "returns # repeat without a label and description inputs =", "is not implemented\") def test_determine_favorable_reaction(): from aiida_fleur.tools.common_fleur_wf import determine_favorable_reaction #", "-138529.7052157, 'bandgap': 6.0662e-06, 'end_date': {'date': '2019/11/12', 'time': '16:12:08'}, 'unparsed': [],", "correctly. Note it is the work of FleurinputgenCalculation to check", "expected), Code) with pytest.raises(ValueError) as msg: test_and_get_codenode(code, nonexpected, use_exceptions=True) assert", "structure = generate_structure() params = Dict(dict={'test': 1}) inputs = {'structure':", "'charge_density2': 0.0461840944, 'number_of_atoms': 4, 'parser_warnings': [], 'magnetic_moments': [3.3720063737, 3.3719345944, 3.3719329177,", "[], 'walltime': 43, 'warnings': {'info': {}, 'debug': {}, 'error': {},", "type.\\n\" \"Valid labels for a fleur.fleur executable are:\\n\" \"* fleur_test@localhost-test\")", "use_exceptions=True) assert str(msg.value) == (\"Given Code node is not of", "3.3719329162], 'number_of_kpoints': 8, 'number_of_species': 1, 'fermi_energy_units': 'Htr', 'sum_of_eigenvalues': -2973.4129786677, 'output_file_version':", "'label': 'label'}, 'code': code, 'parameters': params, 'structure': structure } assert", "4, MPIs per node 12, OMP per MPI 2. Number", "assert is_code(99999) is None code = fixture_code('fleur.inpgen') code.store() assert is_code(code.uuid)", "import get_inputs_inpgen from aiida.orm import Dict code = fixture_code('fleur.inpgen') structure", "= fixture_code('fleur.inpgen') code_fleur = fixture_code('fleur.fleur') code_fleur.label = 'fleur_test' code_fleur.store() expected", "results = get_inputs_fleur(**inputs) out_options = results['options'].get_dict() out_settings = results['settings'].get_dict() assert", "is a Fleur code etc. ''' from aiida_fleur.tools.common_fleur_wf import get_inputs_fleur", "8. Number of k-points is 720'), (4, 6, 4, 'Computational", "is 720'), (4, 6, 4, 'Computational setup is perfect! Nodes:", "pytest.raises(ValueError) as msg: test_and_get_codenode(code, not_existing, use_exceptions=True) assert str(msg.value) == (\"Code", "code setup\") def test_get_kpoints_mesh_from_kdensity(generate_structure): from aiida_fleur.tools.common_fleur_wf import get_kpoints_mesh_from_kdensity from aiida.orm", "'remote', 'fleurinp': 'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'}, 'serial': True} results =", "fleur_test@localhost-test\") with pytest.raises(ValueError) as msg: test_and_get_codenode(code, not_existing, use_exceptions=True) assert str(msg.value)", "inputs = {'structure': structure, 'inpgencode': code, 'options': {}, 'label': 'label',", "isinstance(b, KpointsData) @pytest.mark.skip(reason=\"Test is not implemented\") def test_determine_favorable_reaction(): from aiida_fleur.tools.common_fleur_wf", "== 'label' assert out_options == {'custom_scheduler_commands': 'test_command', 'withmpi': True} assert", "'code': code, 'parameters': params, 'structure': structure} assert get_inputs_inpgen(**inputs) == returns", "1}, 'serial': False} results = get_inputs_fleur(**inputs) out_options = results['options'].get_dict() out_settings", "Fleur code etc. ''' from aiida_fleur.tools.common_fleur_wf import get_inputs_fleur from aiida.orm", "'output_file_version': '0.27', 'energy_hartree_units': 'Htr', 'number_of_atom_types': 4, 'number_of_iterations': 11, 'number_of_symmetries': 8,", "'sum_of_eigenvalues': -2973.4129786677, 'output_file_version': '0.27', 'energy_hartree_units': 'Htr', 'number_of_atom_types': 4, 'number_of_iterations': 11,", "out.store() node = generate_calc_job_node('fleur.fleur', fixture_localhost) node.store() out.add_incoming(node, link_type=LinkType.CREATE, link_label='output_parameters') result", "is None assert is_code(99999) is None code = fixture_code('fleur.inpgen') code.store()", "'total_cost': [834527.2320000001], 'fermi_energy': [0.0605833326], 'bandgap': [6.0662e-06], 'energy': [-138529.7052157], 'force_largest': [0.0],", "2, None, 720), (4, 8, 3, True, 100, None, 720),", "2, 'charge_den_xc_den_integral': -223.295208608, 'magnetic_spin_down_charges': [5.777470284, 5.7775460208, 5.7775477657, 5.7775477672], 'number_of_iterations_total': 11,", "results['options'].get_dict() out_settings = results['settings'].get_dict() assert results['code'] == 'code' assert results['fleurinpdata']", "3.3719345944, 3.3719329177, 3.3719329162], 'number_of_kpoints': 8, 'number_of_species': 1, 'fermi_energy_units': 'Htr', 'sum_of_eigenvalues':", "1}]} inputs_optimize = [(4, 8, 3, True, 0.5, None, 720),", "OMP per MPI 4. Number of k-points is 720'), (4,", "setup\") def test_get_kpoints_mesh_from_kdensity(generate_structure): from aiida_fleur.tools.common_fleur_wf import get_kpoints_mesh_from_kdensity from aiida.orm import", "'serial': False} results = get_inputs_fleur(**inputs) out_options = results['options'].get_dict() out_settings =", "from aiida.orm import KpointsData a, b = get_kpoints_mesh_from_kdensity(generate_structure(), 0.1) assert", "fleur.not_existing.\\n\" \"Configure at least one first using\\n\" \" verdi code", "at least one first using\\n\" \" verdi code setup\") def", "one first using\\n\" \" verdi code setup\") def test_get_kpoints_mesh_from_kdensity(generate_structure): from", "= generate_calc_job_node('fleur.fleur', fixture_localhost) node3 = generate_calc_job_node('fleur.fleur', fixture_localhost) node_main = generate_work_chain_node('fleur.base_relax',", "'', 'label': ''}, 'code': code, 'parameters': params, 'structure': structure} assert", "100, None, 720), (4, 8, 3, True, 100, None, 720,", "pytest import os # is_code def test_is_code_interface(fixture_code): from aiida_fleur.tools.common_fleur_wf import", "'options': {'withmpi': False, 'resources': {'num_machines': 1}}, 'description': 'description', 'label': 'label'},", "'resources': {\"num_machines\": 1}} def test_get_inputs_inpgen(fixture_code, generate_structure): ''' Tests if get_inputs_fleur", "' ', 'energy_valence_electrons': -71.6009296831, 'magnetic_spin_up_charges': [9.1494766577, 9.1494806151, 9.1494806833, 9.1494806834], 'orbital_magnetic_moments':", "'end_date': {'date': '2019/11/12', 'time': '16:12:08'}, 'unparsed': [], 'walltime': 43, 'warnings':", "per node from 8 to 20 an OMP from 3", "is not implemented\") def test_get_scheduler_extras(): from aiida_fleur.tools.common_fleur_wf import get_scheduler_extras #", "# test_and_get_codenode def test_test_and_get_codenode_inpgen(fixture_code): from aiida_fleur.tools.common_fleur_wf import test_and_get_codenode from aiida.orm", "to be now way to add outputs to CalcJobNode\") def", "{'test': 1}, 'serial': False} results = get_inputs_fleur(**inputs) out_options = results['options'].get_dict()", "__future__ import absolute_import import pytest import os # is_code def", "per node 6, OMP per MPI 4. Number of k-points", "assert results['description'] == 'description' assert results['label'] == 'label' assert out_options", "'serial': [False], 'resources': [{'num_machines': 1, 'num_mpiprocs_per_machine': 1}]} inputs_optimize = [(4,", "720'), (4, 12, 2, 'Computational setup is perfect! Nodes: 4,", "4, MPIs per node 3, OMP per MPI 8. Number", "out.add_incoming(node, link_type=LinkType.CREATE, link_label='output_parameters') result = performance_extract_calcs([node.pk]) assert result == {'n_symmetries':", "from 4 to 3'), (4, 20, 1, 'WARNING: Changed the", "assert results['parent_folder'] == 'remote' assert results['description'] == 'description' assert results['label']", "with pytest.raises(ValueError) as msg: test_and_get_codenode(code, nonexpected, use_exceptions=True) assert str(msg.value) ==", "an OMP from 3 to 1. Changed the number of", "5.7775477672], 'number_of_iterations_total': 11, 'creator_target_architecture': 'GEN', 'orbital_magnetic_moment_units': 'muBohr', 'orbital_magnetic_spin_up_charges': [], 'orbital_magnetic_spin_down_charges':", "(4, 8, 3, False, 0.5, None, 720)] results_optimize = [", "1, 'WARNING: Changed the number of MPIs per node from", "1}}, 'description': '', 'label': ''}, 'code': code, 'parameters': params, 'structure':", "test_find_last_in_restart(fixture_localhost, generate_calc_job_node, generate_work_chain_node): from aiida_fleur.tools.common_fleur_wf import find_last_in_restart from aiida.common.links import", "'muBohr', 'orbital_magnetic_spin_up_charges': [], 'orbital_magnetic_spin_down_charges': []}) out.store() node = generate_calc_job_node('fleur.fleur', fixture_localhost)", "= [(4, 8, 3, True, 0.5, None, 720), (4, 8,", "first using\\n\" \" verdi code setup\") def test_get_kpoints_mesh_from_kdensity(generate_structure): from aiida_fleur.tools.common_fleur_wf", "Number of k-points is 720'), (4, 12, 2, 'Computational setup", "with pytest.raises(ValueError) as msg: test_and_get_codenode(code, not_existing, use_exceptions=True) assert str(msg.value) ==", "not implemented\") def test_get_scheduler_extras(): from aiida_fleur.tools.common_fleur_wf import get_scheduler_extras # test_and_get_codenode", "test_and_get_codenode(code, not_existing, use_exceptions=True) assert str(msg.value) == (\"Code not valid, and", "import determine_favorable_reaction # @pytest.mark.skip(reason=\"There seems to be now way to", "['localhost-test'], 'n_atoms': [4], 'kmax': [4.2], 'cost': [75866.11200000001], 'costkonstant': [147.02734883720933], 'walltime_sec_cor':", "out_options == {'custom_scheduler_commands': 'test_command', 'withmpi': True} assert out_settings == {'test':", "import get_scheduler_extras # test_and_get_codenode def test_test_and_get_codenode_inpgen(fixture_code): from aiida_fleur.tools.common_fleur_wf import test_and_get_codenode", "= results['settings'].get_dict() assert results['code'] == 'code' assert results['fleurinpdata'] == 'fleurinp'", "Number of k-points is 720'), (4, 6, 4, 'Computational setup", "from aiida_fleur.tools.common_fleur_wf import optimize_calc_options result = optimize_calc_options(*input) assert result ==", "aiida.orm import Dict inputs = {'code': 'code', 'remote': 'remote', 'fleurinp':", "8, 'energy_core_electrons': -2901.8120489845, 'magnetic_moment_units': 'muBohr', 'overall_charge_density': 0.0682602474, 'creator_target_structure': ' ',", "'code' assert results['fleurinpdata'] == 'fleurinp' assert results['parent_folder'] == 'remote' assert", "= {'structure': structure, 'inpgencode': code, 'options': {}, 'params': params} returns", "Parser v0.2beta', 'CalcJob_uuid': '3dc62d43-b607-4415-920f-e0d34e805711', 'creator_name': 'fleur 30', 'energy_units': 'eV', 'kmax':", "node1 = generate_calc_job_node('fleur.fleur', fixture_localhost) node2 = generate_calc_job_node('fleur.fleur', fixture_localhost) node3 =", "'force_largest': 0.0, 'energy_hartree': -5090.8728101494, 'walltime_units': 'seconds', 'charge_density1': 0.0577674505, 'charge_density2': 0.0461840944,", "node2 = generate_calc_job_node('fleur.fleur', fixture_localhost) node3 = generate_calc_job_node('fleur.fleur', fixture_localhost) node_main =", "are:\\n\" \"* fleur_test@localhost-test\") with pytest.raises(ValueError) as msg: test_and_get_codenode(code, not_existing, use_exceptions=True)", "8, 'Computational setup is perfect! Nodes: 4, MPIs per node", "'seconds', 'charge_density1': 0.0577674505, 'charge_density2': 0.0461840944, 'number_of_atoms': 4, 'parser_warnings': [], 'magnetic_moments':", "MPIs per node 12, OMP per MPI 2. Number of", "8, 3, True, 0.5, None, 720), (4, 8, 3, True,", "100, None, 720, 0.5), (4, 8, 3, False, 0.5, None,", "node 3, OMP per MPI 8. Number of k-points is", "12, 2, 'Computational setup is perfect! Nodes: 4, MPIs per", "(4, 20, 1, 'WARNING: Changed the number of MPIs per", "2, 'Computational setup is perfect! Nodes: 4, MPIs per node", "add outputs to CalcJobNode\") def test_performance_extract_calcs(fixture_localhost, generate_calc_job_node): from aiida_fleur.tools.common_fleur_wf import", "assert result == {'n_symmetries': [8], 'n_spin_components': [2], 'n_kpoints': [8], 'n_iterations':", "assert results['label'] == '' assert out_options == {'custom_scheduler_commands': 'test_command', 'withmpi':", "params = Dict(dict={'test': 1}) inputs = {'structure': structure, 'inpgencode': code,", "'density_distance': [0.0682602474], 'computer': ['localhost-test'], 'n_atoms': [4], 'kmax': [4.2], 'cost': [75866.11200000001],", "'options': {}, 'params': params} returns = {'metadata': { 'options': {'withmpi':", "isinstance(test_and_get_codenode(code, expected), Code) with pytest.raises(ValueError) as msg: test_and_get_codenode(code, nonexpected, use_exceptions=True)", "using\\n\" \" verdi code setup\") def test_get_kpoints_mesh_from_kdensity(generate_structure): from aiida_fleur.tools.common_fleur_wf import", "i.e. 'code' is a Fleur code etc. ''' from aiida_fleur.tools.common_fleur_wf", "def test_find_last_in_restart(fixture_localhost, generate_calc_job_node, generate_work_chain_node): from aiida_fleur.tools.common_fleur_wf import find_last_in_restart from aiida.common.links", "number of MPIs per node from 8 to 20 an", "assembles inputs correctly. Note it is the work of FleurCalculation", "= 'fleur.not_existing' assert isinstance(test_and_get_codenode(code, expected), Code) with pytest.raises(ValueError) as msg:", "'density_convergence_units': 'me/bohr^3', 'number_of_spin_components': 2, 'charge_den_xc_den_integral': -223.295208608, 'magnetic_spin_down_charges': [5.777470284, 5.7775460208, 5.7775477657,", "generate_calc_job_node('fleur.fleur', fixture_localhost) node_main = generate_work_chain_node('fleur.base_relax', fixture_localhost) node1.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node2.add_incoming(node_main,", "[11], 'walltime_sec': [43], 'walltime_sec_per_it': [3.909090909090909], 'n_iterations_total': [11], 'density_distance': [0.0682602474], 'computer':", "is perfect! Nodes: 4, MPIs per node 12, OMP per", "from 8 to 20 an OMP from 3 to 1.", "'orbital_magnetic_moments': [], 'density_convergence_units': 'me/bohr^3', 'number_of_spin_components': 2, 'charge_den_xc_den_integral': -223.295208608, 'magnetic_spin_down_charges': [5.777470284,", "'test_command'}, 'label': 'label', 'description': 'description', 'settings': {'test': 1}, 'serial': False}", "1} inputs = {'code': 'code', 'remote': 'remote', 'fleurinp': 'fleurinp', 'options':", "'Computational setup is perfect! Nodes: 4, MPIs per node 3,", "node 12, OMP per MPI 2. Number of k-points is", "720), (4, 8, 3, True, 2, None, 720), (4, 8,", "'Htr', 'number_of_atom_types': 4, 'number_of_iterations': 11, 'number_of_symmetries': 8, 'energy_core_electrons': -2901.8120489845, 'magnetic_moment_units':", "aiida.orm import Dict out = Dict(dict={'title': 'A Fleur input generator", "(4, 8, 3, True, 100, None, 720), (4, 8, 3,", "structure} assert get_inputs_inpgen(**inputs) == returns @pytest.mark.skip(reason=\"Test is not implemented\") def", "MPI 8. Number of k-points is 720'), (4, 6, 4,", "-71.6009296831, 'magnetic_spin_up_charges': [9.1494766577, 9.1494806151, 9.1494806833, 9.1494806834], 'orbital_magnetic_moments': [], 'density_convergence_units': 'me/bohr^3',", "[12], 'pk': [node.pk], 'uuid': [node.uuid], 'serial': [False], 'resources': [{'num_machines': 1,", "KpointsData a, b = get_kpoints_mesh_from_kdensity(generate_structure(), 0.1) assert a == ([21,", "generator calculation with aiida', 'energy': -138529.7052157, 'bandgap': 6.0662e-06, 'end_date': {'date':", "aiida_fleur.tools.common_fleur_wf import get_inputs_fleur from aiida.orm import Dict inputs = {'code':", "'remote' assert results['description'] == 'description' assert results['label'] == 'label' assert", "code, 'options': {}, 'label': 'label', 'description': 'description', 'params': params} returns", "etc. ''' from aiida_fleur.tools.common_fleur_wf import get_inputs_inpgen from aiida.orm import Dict", "Dict(dict={'title': 'A Fleur input generator calculation with aiida', 'energy': -138529.7052157,", "True, 0.5, None, 720), (4, 8, 3, True, 2, None,", "'options': {'custom_scheduler_commands': 'test_command'}, 'serial': True} results = get_inputs_fleur(**inputs) out_options =", "'number_of_symmetries': 8, 'energy_core_electrons': -2901.8120489845, 'magnetic_moment_units': 'muBohr', 'overall_charge_density': 0.0682602474, 'creator_target_structure': '", "assert get_inputs_inpgen(**inputs) == returns # repeat without a label and", "', 'energy_valence_electrons': -71.6009296831, 'magnetic_spin_up_charges': [9.1494766577, 9.1494806151, 9.1494806833, 9.1494806834], 'orbital_magnetic_moments': [],", "= Dict(dict={'title': 'A Fleur input generator calculation with aiida', 'energy':", "[False], 'resources': [{'num_machines': 1, 'num_mpiprocs_per_machine': 1}]} inputs_optimize = [(4, 8,", "results = get_inputs_fleur(**inputs) out_options = results['options'].get_dict() assert results['description'] == ''", "{'metadata': { 'options': {'withmpi': False, 'resources': {'num_machines': 1}}, 'description': '',", "''' Tests if get_inputs_fleur assembles inputs correctly. Note it is", "== (\"Code not valid, and no valid codes for fleur.not_existing.\\n\"", "import get_inputs_fleur from aiida.orm import Dict inputs = {'code': 'code',", "720.')] @pytest.mark.parametrize('input,result_correct', zip(inputs_optimize, results_optimize)) def test_optimize_calc_options(input, result_correct): from aiida_fleur.tools.common_fleur_wf import", "fixture_code('fleur.fleur') code_fleur.label = 'fleur_test' code_fleur.store() expected = 'fleur.inpgen' nonexpected =", "perfect! Nodes: 4, MPIs per node 6, OMP per MPI", "'debug': {}, 'error': {}, 'warning': {}}, 'start_date': {'date': '2019/11/12', 'time':", "results_optimize = [ (4, 3, 8, 'Computational setup is perfect!", "fleur.fleur executable are:\\n\" \"* fleur_test@localhost-test\") with pytest.raises(ValueError) as msg: test_and_get_codenode(code,", "is 720'), (4, 12, 2, 'Computational setup is perfect! Nodes:", "0.0792504665, 'bandgap_units': 'eV', 'force_largest': 0.0, 'energy_hartree': -5090.8728101494, 'walltime_units': 'seconds', 'charge_density1':", "a == ([21, 21, 21], [0.0, 0.0, 0.0]) assert isinstance(b,", "Nodes: 4, MPIs per node 3, OMP per MPI 8.", "OMP from 3 to 1. Changed the number of nodes", "'params': params} returns = {'metadata': { 'options': {'withmpi': False, 'resources':", "@pytest.mark.skip(reason=\"There seems to be now way to add outputs to", "'structure': structure } assert get_inputs_inpgen(**inputs) == returns # repeat without", "= get_inputs_fleur(**inputs) out_options = results['options'].get_dict() assert results['description'] == '' assert", "'parameters': params, 'structure': structure } assert get_inputs_inpgen(**inputs) == returns #", "'withmpi': True} assert out_settings == {'test': 1} inputs = {'code':", "node2.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node3.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node1.store() node2.store() node3.store() result", "assert str(msg.value) == (\"Code not valid, and no valid codes", "0.5, None, 720)] results_optimize = [ (4, 3, 8, 'Computational", "'fleur.fleur' not_existing = 'fleur.not_existing' assert isinstance(test_and_get_codenode(code, expected), Code) with pytest.raises(ValueError)", "import get_kpoints_mesh_from_kdensity from aiida.orm import KpointsData a, b = get_kpoints_mesh_from_kdensity(generate_structure(),", "is_code(code) def test_get_inputs_fleur(): ''' Tests if get_inputs_fleur assembles inputs correctly.", "optimize_calc_options result = optimize_calc_options(*input) assert result == result_correct def test_find_last_in_restart(fixture_localhost,", "'charge_den_xc_den_integral': -223.295208608, 'magnetic_spin_down_charges': [5.777470284, 5.7775460208, 5.7775477657, 5.7775477672], 'number_of_iterations_total': 11, 'creator_target_architecture':", "is None assert is_code('fleur.inpGUT') is None assert is_code(99999) is None", "aiida_fleur.tools.common_fleur_wf import find_last_in_restart from aiida.common.links import LinkType node1 = generate_calc_job_node('fleur.fleur',", "test_is_code_interface(fixture_code): from aiida_fleur.tools.common_fleur_wf import is_code assert is_code('random_string') is None assert", "def test_get_scheduler_extras(): from aiida_fleur.tools.common_fleur_wf import get_scheduler_extras # test_and_get_codenode def test_test_and_get_codenode_inpgen(fixture_code):", "work of FleurCalculation to check if input types are correct", "input types are correct i.e. 'code' is a Fleur code", "4. Number of k-points is 720'), (4, 12, 2, 'Computational", "8, 'number_of_species': 1, 'fermi_energy_units': 'Htr', 'sum_of_eigenvalues': -2973.4129786677, 'output_file_version': '0.27', 'energy_hartree_units':", "None, 720, 0.5), (4, 8, 3, False, 0.5, None, 720)]", "test_and_get_codenode(code, nonexpected, use_exceptions=True) assert str(msg.value) == (\"Given Code node is", "not valid, and no valid codes for fleur.not_existing.\\n\" \"Configure at", "720'), (4, 6, 4, 'Computational setup is perfect! Nodes: 4,", "Changed the number of MPIs per node from 8 to", "== '' assert out_options == {'custom_scheduler_commands': 'test_command', 'withmpi': False, 'resources':", "def test_get_kpoints_mesh_from_kdensity(generate_structure): from aiida_fleur.tools.common_fleur_wf import get_kpoints_mesh_from_kdensity from aiida.orm import KpointsData", "calculation with aiida', 'energy': -138529.7052157, 'bandgap': 6.0662e-06, 'end_date': {'date': '2019/11/12',", "[8], 'n_spin_components': [2], 'n_kpoints': [8], 'n_iterations': [11], 'walltime_sec': [43], 'walltime_sec_per_it':", "label and description inputs = {'structure': structure, 'inpgencode': code, 'options':", "from aiida_fleur.tools.common_fleur_wf import test_and_get_codenode from aiida.orm import Code from aiida.common.exceptions", "of FleurCalculation to check if input types are correct i.e.", "= fixture_code('fleur.inpgen') code.store() assert is_code(code.uuid) assert is_code(code.pk) assert is_code('@'.join([code.label, code.get_computer_name()]))", "''' from aiida_fleur.tools.common_fleur_wf import get_inputs_inpgen from aiida.orm import Dict code", "Code) with pytest.raises(ValueError) as msg: test_and_get_codenode(code, nonexpected, use_exceptions=True) assert str(msg.value)", "import os # is_code def test_is_code_interface(fixture_code): from aiida_fleur.tools.common_fleur_wf import is_code", "== result_correct def test_find_last_in_restart(fixture_localhost, generate_calc_job_node, generate_work_chain_node): from aiida_fleur.tools.common_fleur_wf import find_last_in_restart", "returns = {'metadata': { 'options': {'withmpi': False, 'resources': {'num_machines': 1}},", "== {'custom_scheduler_commands': 'test_command', 'withmpi': False, 'resources': {\"num_machines\": 1}} def test_get_inputs_inpgen(fixture_code,", "from aiida_fleur.tools.common_fleur_wf import find_last_in_restart from aiida.common.links import LinkType node1 =", "[node.pk], 'uuid': [node.uuid], 'serial': [False], 'resources': [{'num_machines': 1, 'num_mpiprocs_per_machine': 1}]}", "'time': '16:11:25'}, 'parser_info': 'AiiDA Fleur Parser v0.2beta', 'CalcJob_uuid': '3dc62d43-b607-4415-920f-e0d34e805711', 'creator_name':", "= Dict(dict={'test': 1}) inputs = {'structure': structure, 'inpgencode': code, 'options':", "not implemented\") def test_determine_favorable_reaction(): from aiida_fleur.tools.common_fleur_wf import determine_favorable_reaction # @pytest.mark.skip(reason=\"There", "results['fleurinpdata'] == 'fleurinp' assert results['parent_folder'] == 'remote' assert results['description'] ==", "[0.0], 'ncores': [12], 'pk': [node.pk], 'uuid': [node.uuid], 'serial': [False], 'resources':", "'cost': [75866.11200000001], 'costkonstant': [147.02734883720933], 'walltime_sec_cor': [43], 'total_cost': [834527.2320000001], 'fermi_energy': [0.0605833326],", "assert is_code('@'.join([code.label, code.get_computer_name()])) assert is_code(code) def test_get_inputs_fleur(): ''' Tests if", "''}, 'code': code, 'parameters': params, 'structure': structure} assert get_inputs_inpgen(**inputs) ==", "'' assert out_options == {'custom_scheduler_commands': 'test_command', 'withmpi': False, 'resources': {\"num_machines\":", "= results['options'].get_dict() assert results['description'] == '' assert results['label'] == ''", "import performance_extract_calcs from aiida.common.links import LinkType from aiida.orm import Dict", "result = performance_extract_calcs([node.pk]) assert result == {'n_symmetries': [8], 'n_spin_components': [2],", "-2901.8120489845, 'magnetic_moment_units': 'muBohr', 'overall_charge_density': 0.0682602474, 'creator_target_structure': ' ', 'energy_valence_electrons': -71.6009296831,", "Code from aiida.common.exceptions import NotExistent # install code setup code", "the number of nodes from 4 to 4. Number of", "fixture_localhost) node1.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node2.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node3.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL')", "'label' assert out_options == {'custom_scheduler_commands': 'test_command', 'withmpi': True} assert out_settings", "20, 1, 'WARNING: Changed the number of MPIs per node", "assert results['code'] == 'code' assert results['fleurinpdata'] == 'fleurinp' assert results['parent_folder']", "codes for fleur.not_existing.\\n\" \"Configure at least one first using\\n\" \"", "-2973.4129786677, 'output_file_version': '0.27', 'energy_hartree_units': 'Htr', 'number_of_atom_types': 4, 'number_of_iterations': 11, 'number_of_symmetries':", "'fermi_energy': 0.0605833326, 'spin_density': 0.0792504665, 'bandgap_units': 'eV', 'force_largest': 0.0, 'energy_hartree': -5090.8728101494,", "result == {'n_symmetries': [8], 'n_spin_components': [2], 'n_kpoints': [8], 'n_iterations': [11],", "'label', 'description': 'description', 'params': params} returns = {'metadata': { 'options':", "[]}) out.store() node = generate_calc_job_node('fleur.fleur', fixture_localhost) node.store() out.add_incoming(node, link_type=LinkType.CREATE, link_label='output_parameters')", "results['label'] == 'label' assert out_options == {'custom_scheduler_commands': 'test_command', 'withmpi': True}", "'number_of_iterations': 11, 'number_of_symmetries': 8, 'energy_core_electrons': -2901.8120489845, 'magnetic_moment_units': 'muBohr', 'overall_charge_density': 0.0682602474,", "of nodes from 4 to 4. Number of k-points is", "assert results['description'] == '' assert results['label'] == '' assert out_options", "[43], 'total_cost': [834527.2320000001], 'fermi_energy': [0.0605833326], 'bandgap': [6.0662e-06], 'energy': [-138529.7052157], 'force_largest':", "Tests if get_inputs_fleur assembles inputs correctly. Note it is the", "result_correct def test_find_last_in_restart(fixture_localhost, generate_calc_job_node, generate_work_chain_node): from aiida_fleur.tools.common_fleur_wf import find_last_in_restart from", "'remote', 'fleurinp': 'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'}, 'label': 'label', 'description': 'description',", "import is_code assert is_code('random_string') is None assert is_code('fleur.inpGUT') is None", "= fixture_code('fleur.inpgen') structure = generate_structure() params = Dict(dict={'test': 1}) inputs", "{ 'options': {'withmpi': False, 'resources': {'num_machines': 1}}, 'description': 'description', 'label':", "24, 1, 'WARNING: Changed the number of nodes from 4", "test_and_get_codenode from aiida.orm import Code from aiida.common.exceptions import NotExistent #", "get_kpoints_mesh_from_kdensity(generate_structure(), 0.1) assert a == ([21, 21, 21], [0.0, 0.0,", "'pk': [node.pk], 'uuid': [node.uuid], 'serial': [False], 'resources': [{'num_machines': 1, 'num_mpiprocs_per_machine':", "get_inputs_fleur(**inputs) out_options = results['options'].get_dict() out_settings = results['settings'].get_dict() assert results['code'] ==", "the number of nodes from 4 to 3'), (4, 20,", "= get_kpoints_mesh_from_kdensity(generate_structure(), 0.1) assert a == ([21, 21, 21], [0.0,", "link_type=LinkType.CREATE, link_label='output_parameters') result = performance_extract_calcs([node.pk]) assert result == {'n_symmetries': [8],", "43, 'warnings': {'info': {}, 'debug': {}, 'error': {}, 'warning': {}},", "nodes from 4 to 4. Number of k-points is 720.')]", "None, 720), (4, 8, 3, True, 2, None, 720), (4,", "'magnetic_spin_up_charges': [9.1494766577, 9.1494806151, 9.1494806833, 9.1494806834], 'orbital_magnetic_moments': [], 'density_convergence_units': 'me/bohr^3', 'number_of_spin_components':", "is perfect! Nodes: 4, MPIs per node 3, OMP per", "results['options'].get_dict() assert results['description'] == '' assert results['label'] == '' assert", "= {'metadata': { 'options': {'withmpi': False, 'resources': {'num_machines': 1}}, 'description':", "is_code def test_is_code_interface(fixture_code): from aiida_fleur.tools.common_fleur_wf import is_code assert is_code('random_string') is", "'options': {}, 'label': 'label', 'description': 'description', 'params': params} returns =", "code = fixture_code('fleur.inpgen') code_fleur = fixture_code('fleur.fleur') code_fleur.label = 'fleur_test' code_fleur.store()", "'0.27', 'energy_hartree_units': 'Htr', 'number_of_atom_types': 4, 'number_of_iterations': 11, 'number_of_symmetries': 8, 'energy_core_electrons':", "results['description'] == '' assert results['label'] == '' assert out_options ==", "'creator_target_structure': ' ', 'energy_valence_electrons': -71.6009296831, 'magnetic_spin_up_charges': [9.1494766577, 9.1494806151, 9.1494806833, 9.1494806834],", "'Htr', 'sum_of_eigenvalues': -2973.4129786677, 'output_file_version': '0.27', 'energy_hartree_units': 'Htr', 'number_of_atom_types': 4, 'number_of_iterations':", "[4.2], 'cost': [75866.11200000001], 'costkonstant': [147.02734883720933], 'walltime_sec_cor': [43], 'total_cost': [834527.2320000001], 'fermi_energy':", "Changed the number of nodes from 4 to 4. Number", "k-points is 720.')] @pytest.mark.parametrize('input,result_correct', zip(inputs_optimize, results_optimize)) def test_optimize_calc_options(input, result_correct): from", "import LinkType node1 = generate_calc_job_node('fleur.fleur', fixture_localhost) node2 = generate_calc_job_node('fleur.fleur', fixture_localhost)", "'test_command', 'withmpi': False, 'resources': {\"num_machines\": 1}} def test_get_inputs_inpgen(fixture_code, generate_structure): '''", "get_inputs_fleur assembles inputs correctly. Note it is the work of", "'number_of_atoms': 4, 'parser_warnings': [], 'magnetic_moments': [3.3720063737, 3.3719345944, 3.3719329177, 3.3719329162], 'number_of_kpoints':", "etc. ''' from aiida_fleur.tools.common_fleur_wf import get_inputs_fleur from aiida.orm import Dict", "= get_inputs_fleur(**inputs) out_options = results['options'].get_dict() out_settings = results['settings'].get_dict() assert results['code']", "v0.2beta', 'CalcJob_uuid': '3dc62d43-b607-4415-920f-e0d34e805711', 'creator_name': 'fleur 30', 'energy_units': 'eV', 'kmax': 4.2,", "'number_of_kpoints': 8, 'number_of_species': 1, 'fermi_energy_units': 'Htr', 'sum_of_eigenvalues': -2973.4129786677, 'output_file_version': '0.27',", "(4, 6, 4, 'Computational setup is perfect! Nodes: 4, MPIs", "of k-points is 720'), (4, 12, 2, 'Computational setup is", "Number of k-points is 720'), (3, 24, 1, 'WARNING: Changed", "is perfect! Nodes: 4, MPIs per node 6, OMP per", "fixture_localhost) node_main = generate_work_chain_node('fleur.base_relax', fixture_localhost) node1.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node2.add_incoming(node_main, link_type=LinkType.CALL_CALC,", "FleurinputgenCalculation to check if input types are correct i.e. 'code'", "'inpgencode': code, 'options': {}, 'params': params} returns = {'metadata': {", "'label': 'label', 'description': 'description', 'settings': {'test': 1}, 'serial': False} results", "'resources': {'num_machines': 1}}, 'description': 'description', 'label': 'label'}, 'code': code, 'parameters':", "is a Fleur code etc. ''' from aiida_fleur.tools.common_fleur_wf import get_inputs_inpgen", "to check if input types are correct i.e. 'code' is", "setup is perfect! Nodes: 4, MPIs per node 3, OMP", "test_get_scheduler_extras(): from aiida_fleur.tools.common_fleur_wf import get_scheduler_extras # test_and_get_codenode def test_test_and_get_codenode_inpgen(fixture_code): from", "-5090.8728101494, 'walltime_units': 'seconds', 'charge_density1': 0.0577674505, 'charge_density2': 0.0461840944, 'number_of_atoms': 4, 'parser_warnings':", "None, 720), (4, 8, 3, True, 100, None, 720), (4,", "'description': '', 'label': ''}, 'code': code, 'parameters': params, 'structure': structure}", "0.1) assert a == ([21, 21, 21], [0.0, 0.0, 0.0])", "[0.0, 0.0, 0.0]) assert isinstance(b, KpointsData) @pytest.mark.skip(reason=\"Test is not implemented\")", "be now way to add outputs to CalcJobNode\") def test_performance_extract_calcs(fixture_localhost,", "performance_extract_calcs([node.pk]) assert result == {'n_symmetries': [8], 'n_spin_components': [2], 'n_kpoints': [8],", "-223.295208608, 'magnetic_spin_down_charges': [5.777470284, 5.7775460208, 5.7775477657, 5.7775477672], 'number_of_iterations_total': 11, 'creator_target_architecture': 'GEN',", "optimize_calc_options(*input) assert result == result_correct def test_find_last_in_restart(fixture_localhost, generate_calc_job_node, generate_work_chain_node): from", "MPIs per node 3, OMP per MPI 8. Number of", "link_label='CALL') node1.store() node2.store() node3.store() result = find_last_in_restart(node_main) assert result ==", "{\"num_machines\": 1}} def test_get_inputs_inpgen(fixture_code, generate_structure): ''' Tests if get_inputs_fleur assembles", "{}, 'debug': {}, 'error': {}, 'warning': {}}, 'start_date': {'date': '2019/11/12',", "'n_iterations_total': [11], 'density_distance': [0.0682602474], 'computer': ['localhost-test'], 'n_atoms': [4], 'kmax': [4.2],", "{}, 'label': 'label', 'description': 'description', 'params': params} returns = {'metadata':", "'walltime_sec': [43], 'walltime_sec_per_it': [3.909090909090909], 'n_iterations_total': [11], 'density_distance': [0.0682602474], 'computer': ['localhost-test'],", "code.get_computer_name()])) assert is_code(code) def test_get_inputs_fleur(): ''' Tests if get_inputs_fleur assembles", "is 720'), (3, 24, 1, 'WARNING: Changed the number of", "= performance_extract_calcs([node.pk]) assert result == {'n_symmetries': [8], 'n_spin_components': [2], 'n_kpoints':", "code type.\\n\" \"Valid labels for a fleur.fleur executable are:\\n\" \"*", "'fleurinp': 'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'}, 'label': 'label', 'description': 'description', 'settings':", "'n_atoms': [4], 'kmax': [4.2], 'cost': [75866.11200000001], 'costkonstant': [147.02734883720933], 'walltime_sec_cor': [43],", "3 to 1. Changed the number of nodes from 4", "'number_of_species': 1, 'fermi_energy_units': 'Htr', 'sum_of_eigenvalues': -2973.4129786677, 'output_file_version': '0.27', 'energy_hartree_units': 'Htr',", "'parameters': params, 'structure': structure} assert get_inputs_inpgen(**inputs) == returns @pytest.mark.skip(reason=\"Test is", "== {'n_symmetries': [8], 'n_spin_components': [2], 'n_kpoints': [8], 'n_iterations': [11], 'walltime_sec':", "8, 3, True, 2, None, 720), (4, 8, 3, True,", "to CalcJobNode\") def test_performance_extract_calcs(fixture_localhost, generate_calc_job_node): from aiida_fleur.tools.common_fleur_wf import performance_extract_calcs from", "3, OMP per MPI 8. Number of k-points is 720'),", "[5.777470284, 5.7775460208, 5.7775477657, 5.7775477672], 'number_of_iterations_total': 11, 'creator_target_architecture': 'GEN', 'orbital_magnetic_moment_units': 'muBohr',", "'me/bohr^3', 'number_of_spin_components': 2, 'charge_den_xc_den_integral': -223.295208608, 'magnetic_spin_down_charges': [5.777470284, 5.7775460208, 5.7775477657, 5.7775477672],", "'withmpi': False, 'resources': {\"num_machines\": 1}} def test_get_inputs_inpgen(fixture_code, generate_structure): ''' Tests", "least one first using\\n\" \" verdi code setup\") def test_get_kpoints_mesh_from_kdensity(generate_structure):", "# install code setup code code = fixture_code('fleur.inpgen') code_fleur =", "get_kpoints_mesh_from_kdensity from aiida.orm import KpointsData a, b = get_kpoints_mesh_from_kdensity(generate_structure(), 0.1)", "from aiida.common.links import LinkType node1 = generate_calc_job_node('fleur.fleur', fixture_localhost) node2 =", "out_settings == {'test': 1} inputs = {'code': 'code', 'remote': 'remote',", "== returns @pytest.mark.skip(reason=\"Test is not implemented\") def test_get_scheduler_extras(): from aiida_fleur.tools.common_fleur_wf", "'fleur.inpgen' nonexpected = 'fleur.fleur' not_existing = 'fleur.not_existing' assert isinstance(test_and_get_codenode(code, expected),", "structure, 'inpgencode': code, 'options': {}, 'label': 'label', 'description': 'description', 'params':", "8, 3, True, 100, None, 720), (4, 8, 3, True,", "3, True, 100, None, 720, 0.5), (4, 8, 3, False,", "CalcJobNode\") def test_performance_extract_calcs(fixture_localhost, generate_calc_job_node): from aiida_fleur.tools.common_fleur_wf import performance_extract_calcs from aiida.common.links", "Changed the number of nodes from 4 to 3'), (4,", "fixture_code('fleur.inpgen') code.store() assert is_code(code.uuid) assert is_code(code.pk) assert is_code('@'.join([code.label, code.get_computer_name()])) assert", "720'), (3, 24, 1, 'WARNING: Changed the number of nodes", "'fleurinp': 'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'}, 'serial': True} results = get_inputs_fleur(**inputs)", "assert out_settings == {'test': 1} inputs = {'code': 'code', 'remote':", "with aiida', 'energy': -138529.7052157, 'bandgap': 6.0662e-06, 'end_date': {'date': '2019/11/12', 'time':", "1. Changed the number of nodes from 4 to 4.", "a Fleur code etc. ''' from aiida_fleur.tools.common_fleur_wf import get_inputs_inpgen from", "720, 0.5), (4, 8, 3, False, 0.5, None, 720)] results_optimize", "aiida.orm import Code from aiida.common.exceptions import NotExistent # install code", "== 'remote' assert results['description'] == 'description' assert results['label'] == 'label'", "from __future__ import absolute_import import pytest import os # is_code", "{'code': 'code', 'remote': 'remote', 'fleurinp': 'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'}, 'label':", "LinkType node1 = generate_calc_job_node('fleur.fleur', fixture_localhost) node2 = generate_calc_job_node('fleur.fleur', fixture_localhost) node3", "b = get_kpoints_mesh_from_kdensity(generate_structure(), 0.1) assert a == ([21, 21, 21],", "3'), (4, 20, 1, 'WARNING: Changed the number of MPIs", "is not of expected code type.\\n\" \"Valid labels for a", "import Dict out = Dict(dict={'title': 'A Fleur input generator calculation", "'energy': -138529.7052157, 'bandgap': 6.0662e-06, 'end_date': {'date': '2019/11/12', 'time': '16:12:08'}, 'unparsed':", "inputs = {'code': 'code', 'remote': 'remote', 'fleurinp': 'fleurinp', 'options': {'custom_scheduler_commands':", "fixture_localhost) node3 = generate_calc_job_node('fleur.fleur', fixture_localhost) node_main = generate_work_chain_node('fleur.base_relax', fixture_localhost) node1.add_incoming(node_main,", "expected code type.\\n\" \"Valid labels for a fleur.fleur executable are:\\n\"", "'computer': ['localhost-test'], 'n_atoms': [4], 'kmax': [4.2], 'cost': [75866.11200000001], 'costkonstant': [147.02734883720933],", "'2019/11/12', 'time': '16:12:08'}, 'unparsed': [], 'walltime': 43, 'warnings': {'info': {},", "8 to 20 an OMP from 3 to 1. Changed", "assert isinstance(test_and_get_codenode(code, expected), Code) with pytest.raises(ValueError) as msg: test_and_get_codenode(code, nonexpected,", "'energy': [-138529.7052157], 'force_largest': [0.0], 'ncores': [12], 'pk': [node.pk], 'uuid': [node.uuid],", "11, 'number_of_symmetries': 8, 'energy_core_electrons': -2901.8120489845, 'magnetic_moment_units': 'muBohr', 'overall_charge_density': 0.0682602474, 'creator_target_structure':", "[9.1494766577, 9.1494806151, 9.1494806833, 9.1494806834], 'orbital_magnetic_moments': [], 'density_convergence_units': 'me/bohr^3', 'number_of_spin_components': 2,", "0.5, None, 720), (4, 8, 3, True, 2, None, 720),", "False} results = get_inputs_fleur(**inputs) out_options = results['options'].get_dict() out_settings = results['settings'].get_dict()", "from aiida_fleur.tools.common_fleur_wf import determine_favorable_reaction # @pytest.mark.skip(reason=\"There seems to be now", "OMP per MPI 8. Number of k-points is 720'), (4,", "'Computational setup is perfect! Nodes: 4, MPIs per node 6,", "None, 720)] results_optimize = [ (4, 3, 8, 'Computational setup", "generate_calc_job_node('fleur.fleur', fixture_localhost) node2 = generate_calc_job_node('fleur.fleur', fixture_localhost) node3 = generate_calc_job_node('fleur.fleur', fixture_localhost)", "Note it is the work of FleurinputgenCalculation to check if", "'3dc62d43-b607-4415-920f-e0d34e805711', 'creator_name': 'fleur 30', 'energy_units': 'eV', 'kmax': 4.2, 'fermi_energy': 0.0605833326,", "is 720.')] @pytest.mark.parametrize('input,result_correct', zip(inputs_optimize, results_optimize)) def test_optimize_calc_options(input, result_correct): from aiida_fleur.tools.common_fleur_wf", "False, 'resources': {'num_machines': 1}}, 'description': '', 'label': ''}, 'code': code,", "'label': 'label', 'description': 'description', 'params': params} returns = {'metadata': {", "{'withmpi': False, 'resources': {'num_machines': 1}}, 'description': '', 'label': ''}, 'code':", "and no valid codes for fleur.not_existing.\\n\" \"Configure at least one", "is None code = fixture_code('fleur.inpgen') code.store() assert is_code(code.uuid) assert is_code(code.pk)", "Code node is not of expected code type.\\n\" \"Valid labels", "{}, 'params': params} returns = {'metadata': { 'options': {'withmpi': False,", "code etc. ''' from aiida_fleur.tools.common_fleur_wf import get_inputs_inpgen from aiida.orm import", "'kmax': [4.2], 'cost': [75866.11200000001], 'costkonstant': [147.02734883720933], 'walltime_sec_cor': [43], 'total_cost': [834527.2320000001],", "{'date': '2019/11/12', 'time': '16:12:08'}, 'unparsed': [], 'walltime': 43, 'warnings': {'info':", "1}}, 'description': 'description', 'label': 'label'}, 'code': code, 'parameters': params, 'structure':", "= {'structure': structure, 'inpgencode': code, 'options': {}, 'label': 'label', 'description':", "node is not of expected code type.\\n\" \"Valid labels for", "'WARNING: Changed the number of nodes from 4 to 3'),", "'n_kpoints': [8], 'n_iterations': [11], 'walltime_sec': [43], 'walltime_sec_per_it': [3.909090909090909], 'n_iterations_total': [11],", "code = fixture_code('fleur.inpgen') structure = generate_structure() params = Dict(dict={'test': 1})", "get_scheduler_extras # test_and_get_codenode def test_test_and_get_codenode_inpgen(fixture_code): from aiida_fleur.tools.common_fleur_wf import test_and_get_codenode from", "from aiida_fleur.tools.common_fleur_wf import get_inputs_inpgen from aiida.orm import Dict code =", "'options': {'withmpi': False, 'resources': {'num_machines': 1}}, 'description': '', 'label': ''},", "expected = 'fleur.inpgen' nonexpected = 'fleur.fleur' not_existing = 'fleur.not_existing' assert", "nonexpected = 'fleur.fleur' not_existing = 'fleur.not_existing' assert isinstance(test_and_get_codenode(code, expected), Code)", "'fermi_energy_units': 'Htr', 'sum_of_eigenvalues': -2973.4129786677, 'output_file_version': '0.27', 'energy_hartree_units': 'Htr', 'number_of_atom_types': 4,", "Fleur code etc. ''' from aiida_fleur.tools.common_fleur_wf import get_inputs_inpgen from aiida.orm", "to 20 an OMP from 3 to 1. Changed the", "assert a == ([21, 21, 21], [0.0, 0.0, 0.0]) assert", "is_code(99999) is None code = fixture_code('fleur.inpgen') code.store() assert is_code(code.uuid) assert", "{'metadata': { 'options': {'withmpi': False, 'resources': {'num_machines': 1}}, 'description': 'description',", "Number of k-points is 720.')] @pytest.mark.parametrize('input,result_correct', zip(inputs_optimize, results_optimize)) def test_optimize_calc_options(input,", "[0.0682602474], 'computer': ['localhost-test'], 'n_atoms': [4], 'kmax': [4.2], 'cost': [75866.11200000001], 'costkonstant':", "[ (4, 3, 8, 'Computational setup is perfect! Nodes: 4,", "'description', 'params': params} returns = {'metadata': { 'options': {'withmpi': False,", "11, 'creator_target_architecture': 'GEN', 'orbital_magnetic_moment_units': 'muBohr', 'orbital_magnetic_spin_up_charges': [], 'orbital_magnetic_spin_down_charges': []}) out.store()", "results_optimize)) def test_optimize_calc_options(input, result_correct): from aiida_fleur.tools.common_fleur_wf import optimize_calc_options result =", "assert results['fleurinpdata'] == 'fleurinp' assert results['parent_folder'] == 'remote' assert results['description']", "'magnetic_moments': [3.3720063737, 3.3719345944, 3.3719329177, 3.3719329162], 'number_of_kpoints': 8, 'number_of_species': 1, 'fermi_energy_units':", "'orbital_magnetic_spin_up_charges': [], 'orbital_magnetic_spin_down_charges': []}) out.store() node = generate_calc_job_node('fleur.fleur', fixture_localhost) node.store()", "== ([21, 21, 21], [0.0, 0.0, 0.0]) assert isinstance(b, KpointsData)", "aiida.orm import Dict code = fixture_code('fleur.inpgen') structure = generate_structure() params", "3, 8, 'Computational setup is perfect! Nodes: 4, MPIs per", "'force_largest': [0.0], 'ncores': [12], 'pk': [node.pk], 'uuid': [node.uuid], 'serial': [False],", "are correct i.e. 'code' is a Fleur code etc. '''", "'muBohr', 'overall_charge_density': 0.0682602474, 'creator_target_structure': ' ', 'energy_valence_electrons': -71.6009296831, 'magnetic_spin_up_charges': [9.1494766577,", "'kmax': 4.2, 'fermi_energy': 0.0605833326, 'spin_density': 0.0792504665, 'bandgap_units': 'eV', 'force_largest': 0.0,", "'bandgap_units': 'eV', 'force_largest': 0.0, 'energy_hartree': -5090.8728101494, 'walltime_units': 'seconds', 'charge_density1': 0.0577674505,", "get_inputs_inpgen(**inputs) == returns @pytest.mark.skip(reason=\"Test is not implemented\") def test_get_scheduler_extras(): from", "test_and_get_codenode def test_test_and_get_codenode_inpgen(fixture_code): from aiida_fleur.tools.common_fleur_wf import test_and_get_codenode from aiida.orm import", "out_settings = results['settings'].get_dict() assert results['code'] == 'code' assert results['fleurinpdata'] ==", "'label'}, 'code': code, 'parameters': params, 'structure': structure } assert get_inputs_inpgen(**inputs)", "False, 'resources': {\"num_machines\": 1}} def test_get_inputs_inpgen(fixture_code, generate_structure): ''' Tests if", "Dict code = fixture_code('fleur.inpgen') structure = generate_structure() params = Dict(dict={'test':", "'description': 'description', 'params': params} returns = {'metadata': { 'options': {'withmpi':", "link_type=LinkType.CALL_CALC, link_label='CALL') node1.store() node2.store() node3.store() result = find_last_in_restart(node_main) assert result", "assert is_code(code) def test_get_inputs_fleur(): ''' Tests if get_inputs_fleur assembles inputs", "'uuid': [node.uuid], 'serial': [False], 'resources': [{'num_machines': 1, 'num_mpiprocs_per_machine': 1}]} inputs_optimize", "'creator_name': 'fleur 30', 'energy_units': 'eV', 'kmax': 4.2, 'fermi_energy': 0.0605833326, 'spin_density':", "False, 0.5, None, 720)] results_optimize = [ (4, 3, 8,", "if get_inputs_fleur assembles inputs correctly. Note it is the work", "Note it is the work of FleurCalculation to check if", "{'structure': structure, 'inpgencode': code, 'options': {}, 'params': params} returns =", "720)] results_optimize = [ (4, 3, 8, 'Computational setup is", "6, 4, 'Computational setup is perfect! Nodes: 4, MPIs per", "from aiida.orm import Dict code = fixture_code('fleur.inpgen') structure = generate_structure()", "# is_code def test_is_code_interface(fixture_code): from aiida_fleur.tools.common_fleur_wf import is_code assert is_code('random_string')", "'creator_target_architecture': 'GEN', 'orbital_magnetic_moment_units': 'muBohr', 'orbital_magnetic_spin_up_charges': [], 'orbital_magnetic_spin_down_charges': []}) out.store() node", "k-points is 720'), (4, 6, 4, 'Computational setup is perfect!", "'description': 'description', 'label': 'label'}, 'code': code, 'parameters': params, 'structure': structure", "zip(inputs_optimize, results_optimize)) def test_optimize_calc_options(input, result_correct): from aiida_fleur.tools.common_fleur_wf import optimize_calc_options result", "code.store() assert is_code(code.uuid) assert is_code(code.pk) assert is_code('@'.join([code.label, code.get_computer_name()])) assert is_code(code)", "not_existing = 'fleur.not_existing' assert isinstance(test_and_get_codenode(code, expected), Code) with pytest.raises(ValueError) as", "a fleur.fleur executable are:\\n\" \"* fleur_test@localhost-test\") with pytest.raises(ValueError) as msg:", "import Dict inputs = {'code': 'code', 'remote': 'remote', 'fleurinp': 'fleurinp',", "def test_test_and_get_codenode_inpgen(fixture_code): from aiida_fleur.tools.common_fleur_wf import test_and_get_codenode from aiida.orm import Code", "{'custom_scheduler_commands': 'test_command', 'withmpi': True} assert out_settings == {'test': 1} inputs", "test_optimize_calc_options(input, result_correct): from aiida_fleur.tools.common_fleur_wf import optimize_calc_options result = optimize_calc_options(*input) assert", "(\"Code not valid, and no valid codes for fleur.not_existing.\\n\" \"Configure", "@pytest.mark.skip(reason=\"Test is not implemented\") def test_determine_favorable_reaction(): from aiida_fleur.tools.common_fleur_wf import determine_favorable_reaction", "'16:12:08'}, 'unparsed': [], 'walltime': 43, 'warnings': {'info': {}, 'debug': {},", "to 1. Changed the number of nodes from 4 to", "setup is perfect! Nodes: 4, MPIs per node 12, OMP", "aiida_fleur.tools.common_fleur_wf import test_and_get_codenode from aiida.orm import Code from aiida.common.exceptions import", "3.3719329177, 3.3719329162], 'number_of_kpoints': 8, 'number_of_species': 1, 'fermi_energy_units': 'Htr', 'sum_of_eigenvalues': -2973.4129786677,", "True, 2, None, 720), (4, 8, 3, True, 100, None,", "Fleur Parser v0.2beta', 'CalcJob_uuid': '3dc62d43-b607-4415-920f-e0d34e805711', 'creator_name': 'fleur 30', 'energy_units': 'eV',", "[6.0662e-06], 'energy': [-138529.7052157], 'force_largest': [0.0], 'ncores': [12], 'pk': [node.pk], 'uuid':", "= 'fleur_test' code_fleur.store() expected = 'fleur.inpgen' nonexpected = 'fleur.fleur' not_existing", "True, 100, None, 720, 0.5), (4, 8, 3, False, 0.5,", "of MPIs per node from 8 to 20 an OMP", "\"Configure at least one first using\\n\" \" verdi code setup\")", "= generate_calc_job_node('fleur.fleur', fixture_localhost) node_main = generate_work_chain_node('fleur.base_relax', fixture_localhost) node1.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL')", "{'num_machines': 1}}, 'description': '', 'label': ''}, 'code': code, 'parameters': params,", "'costkonstant': [147.02734883720933], 'walltime_sec_cor': [43], 'total_cost': [834527.2320000001], 'fermi_energy': [0.0605833326], 'bandgap': [6.0662e-06],", "str(msg.value) == (\"Code not valid, and no valid codes for", "def test_get_inputs_inpgen(fixture_code, generate_structure): ''' Tests if get_inputs_fleur assembles inputs correctly.", "'label': ''}, 'code': code, 'parameters': params, 'structure': structure} assert get_inputs_inpgen(**inputs)", "1}) inputs = {'structure': structure, 'inpgencode': code, 'options': {}, 'label':", "results['code'] == 'code' assert results['fleurinpdata'] == 'fleurinp' assert results['parent_folder'] ==", "'warnings': {'info': {}, 'debug': {}, 'error': {}, 'warning': {}}, 'start_date':", "aiida.common.links import LinkType from aiida.orm import Dict out = Dict(dict={'title':", "'error': {}, 'warning': {}}, 'start_date': {'date': '2019/11/12', 'time': '16:11:25'}, 'parser_info':", "4.2, 'fermi_energy': 0.0605833326, 'spin_density': 0.0792504665, 'bandgap_units': 'eV', 'force_largest': 0.0, 'energy_hartree':", "per node 3, OMP per MPI 8. Number of k-points", "from aiida_fleur.tools.common_fleur_wf import is_code assert is_code('random_string') is None assert is_code('fleur.inpGUT')", "aiida_fleur.tools.common_fleur_wf import get_inputs_inpgen from aiida.orm import Dict code = fixture_code('fleur.inpgen')", "{'code': 'code', 'remote': 'remote', 'fleurinp': 'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'}, 'serial':", "== {'test': 1} inputs = {'code': 'code', 'remote': 'remote', 'fleurinp':", "NotExistent # install code setup code code = fixture_code('fleur.inpgen') code_fleur", "(4, 8, 3, True, 100, None, 720, 0.5), (4, 8,", "{'n_symmetries': [8], 'n_spin_components': [2], 'n_kpoints': [8], 'n_iterations': [11], 'walltime_sec': [43],", "of k-points is 720'), (4, 6, 4, 'Computational setup is", "[], 'magnetic_moments': [3.3720063737, 3.3719345944, 3.3719329177, 3.3719329162], 'number_of_kpoints': 8, 'number_of_species': 1,", "get_inputs_fleur(**inputs) out_options = results['options'].get_dict() assert results['description'] == '' assert results['label']", "import absolute_import import pytest import os # is_code def test_is_code_interface(fixture_code):", "from aiida_fleur.tools.common_fleur_wf import get_kpoints_mesh_from_kdensity from aiida.orm import KpointsData a, b", "link_type=LinkType.CALL_CALC, link_label='CALL') node3.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node1.store() node2.store() node3.store() result =", "result = optimize_calc_options(*input) assert result == result_correct def test_find_last_in_restart(fixture_localhost, generate_calc_job_node,", "'CalcJob_uuid': '3dc62d43-b607-4415-920f-e0d34e805711', 'creator_name': 'fleur 30', 'energy_units': 'eV', 'kmax': 4.2, 'fermi_energy':", "'n_iterations': [11], 'walltime_sec': [43], 'walltime_sec_per_it': [3.909090909090909], 'n_iterations_total': [11], 'density_distance': [0.0682602474],", "generate_work_chain_node): from aiida_fleur.tools.common_fleur_wf import find_last_in_restart from aiida.common.links import LinkType node1", "assert result == result_correct def test_find_last_in_restart(fixture_localhost, generate_calc_job_node, generate_work_chain_node): from aiida_fleur.tools.common_fleur_wf", "4, 'parser_warnings': [], 'magnetic_moments': [3.3720063737, 3.3719345944, 3.3719329177, 3.3719329162], 'number_of_kpoints': 8,", "'number_of_spin_components': 2, 'charge_den_xc_den_integral': -223.295208608, 'magnetic_spin_down_charges': [5.777470284, 5.7775460208, 5.7775477657, 5.7775477672], 'number_of_iterations_total':", "'energy_valence_electrons': -71.6009296831, 'magnetic_spin_up_charges': [9.1494766577, 9.1494806151, 9.1494806833, 9.1494806834], 'orbital_magnetic_moments': [], 'density_convergence_units':", "3, False, 0.5, None, 720)] results_optimize = [ (4, 3,", "nonexpected, use_exceptions=True) assert str(msg.value) == (\"Given Code node is not", "'walltime_sec_cor': [43], 'total_cost': [834527.2320000001], 'fermi_energy': [0.0605833326], 'bandgap': [6.0662e-06], 'energy': [-138529.7052157],", "(4, 8, 3, True, 2, None, 720), (4, 8, 3,", "'resources': {'num_machines': 1}}, 'description': '', 'label': ''}, 'code': code, 'parameters':", "is the work of FleurCalculation to check if input types", "code code = fixture_code('fleur.inpgen') code_fleur = fixture_code('fleur.fleur') code_fleur.label = 'fleur_test'", "'spin_density': 0.0792504665, 'bandgap_units': 'eV', 'force_largest': 0.0, 'energy_hartree': -5090.8728101494, 'walltime_units': 'seconds',", "result == result_correct def test_find_last_in_restart(fixture_localhost, generate_calc_job_node, generate_work_chain_node): from aiida_fleur.tools.common_fleur_wf import", "} assert get_inputs_inpgen(**inputs) == returns # repeat without a label", "= 'fleur.fleur' not_existing = 'fleur.not_existing' assert isinstance(test_and_get_codenode(code, expected), Code) with", "'code', 'remote': 'remote', 'fleurinp': 'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'}, 'label': 'label',", "inputs correctly. Note it is the work of FleurCalculation to", "not of expected code type.\\n\" \"Valid labels for a fleur.fleur", "aiida_fleur.tools.common_fleur_wf import determine_favorable_reaction # @pytest.mark.skip(reason=\"There seems to be now way", "= generate_structure() params = Dict(dict={'test': 1}) inputs = {'structure': structure,", "[4], 'kmax': [4.2], 'cost': [75866.11200000001], 'costkonstant': [147.02734883720933], 'walltime_sec_cor': [43], 'total_cost':", "aiida_fleur.tools.common_fleur_wf import performance_extract_calcs from aiida.common.links import LinkType from aiida.orm import", "@pytest.mark.skip(reason=\"Test is not implemented\") def test_get_scheduler_extras(): from aiida_fleur.tools.common_fleur_wf import get_scheduler_extras", "8, 3, True, 100, None, 720, 0.5), (4, 8, 3,", "'fleurinp' assert results['parent_folder'] == 'remote' assert results['description'] == 'description' assert", "1, 'num_mpiprocs_per_machine': 1}]} inputs_optimize = [(4, 8, 3, True, 0.5,", "= fixture_code('fleur.fleur') code_fleur.label = 'fleur_test' code_fleur.store() expected = 'fleur.inpgen' nonexpected", "of k-points is 720.')] @pytest.mark.parametrize('input,result_correct', zip(inputs_optimize, results_optimize)) def test_optimize_calc_options(input, result_correct):", "result_correct): from aiida_fleur.tools.common_fleur_wf import optimize_calc_options result = optimize_calc_options(*input) assert result", "[{'num_machines': 1, 'num_mpiprocs_per_machine': 1}]} inputs_optimize = [(4, 8, 3, True,", "= generate_calc_job_node('fleur.fleur', fixture_localhost) node2 = generate_calc_job_node('fleur.fleur', fixture_localhost) node3 = generate_calc_job_node('fleur.fleur',", "code, 'options': {}, 'params': params} returns = {'metadata': { 'options':", "assert is_code('random_string') is None assert is_code('fleur.inpGUT') is None assert is_code(99999)", "'ncores': [12], 'pk': [node.pk], 'uuid': [node.uuid], 'serial': [False], 'resources': [{'num_machines':", "node 6, OMP per MPI 4. Number of k-points is", "LinkType from aiida.orm import Dict out = Dict(dict={'title': 'A Fleur", "and description inputs = {'structure': structure, 'inpgencode': code, 'options': {},", "aiida_fleur.tools.common_fleur_wf import is_code assert is_code('random_string') is None assert is_code('fleur.inpGUT') is", "of nodes from 4 to 3'), (4, 20, 1, 'WARNING:", "'orbital_magnetic_spin_down_charges': []}) out.store() node = generate_calc_job_node('fleur.fleur', fixture_localhost) node.store() out.add_incoming(node, link_type=LinkType.CREATE,", "MPIs per node 6, OMP per MPI 4. Number of", "None assert is_code(99999) is None code = fixture_code('fleur.inpgen') code.store() assert", "Dict out = Dict(dict={'title': 'A Fleur input generator calculation with", "'energy_hartree': -5090.8728101494, 'walltime_units': 'seconds', 'charge_density1': 0.0577674505, 'charge_density2': 0.0461840944, 'number_of_atoms': 4,", "inputs_optimize = [(4, 8, 3, True, 0.5, None, 720), (4,", "aiida', 'energy': -138529.7052157, 'bandgap': 6.0662e-06, 'end_date': {'date': '2019/11/12', 'time': '16:12:08'},", "generate_structure): ''' Tests if get_inputs_fleur assembles inputs correctly. Note it", "'test_command', 'withmpi': True} assert out_settings == {'test': 1} inputs =", "code_fleur.store() expected = 'fleur.inpgen' nonexpected = 'fleur.fleur' not_existing = 'fleur.not_existing'", "as msg: test_and_get_codenode(code, nonexpected, use_exceptions=True) assert str(msg.value) == (\"Given Code", "verdi code setup\") def test_get_kpoints_mesh_from_kdensity(generate_structure): from aiida_fleur.tools.common_fleur_wf import get_kpoints_mesh_from_kdensity from", "to add outputs to CalcJobNode\") def test_performance_extract_calcs(fixture_localhost, generate_calc_job_node): from aiida_fleur.tools.common_fleur_wf", "4. Number of k-points is 720.')] @pytest.mark.parametrize('input,result_correct', zip(inputs_optimize, results_optimize)) def", "def test_optimize_calc_options(input, result_correct): from aiida_fleur.tools.common_fleur_wf import optimize_calc_options result = optimize_calc_options(*input)", "repeat without a label and description inputs = {'structure': structure,", "'label', 'description': 'description', 'settings': {'test': 1}, 'serial': False} results =", "[0.0605833326], 'bandgap': [6.0662e-06], 'energy': [-138529.7052157], 'force_largest': [0.0], 'ncores': [12], 'pk':", "None, 720), (4, 8, 3, True, 100, None, 720, 0.5),", "12, OMP per MPI 2. Number of k-points is 720'),", "= results['options'].get_dict() out_settings = results['settings'].get_dict() assert results['code'] == 'code' assert", "code, 'parameters': params, 'structure': structure} assert get_inputs_inpgen(**inputs) == returns @pytest.mark.skip(reason=\"Test", "{'num_machines': 1}}, 'description': 'description', 'label': 'label'}, 'code': code, 'parameters': params,", "1, 'fermi_energy_units': 'Htr', 'sum_of_eigenvalues': -2973.4129786677, 'output_file_version': '0.27', 'energy_hartree_units': 'Htr', 'number_of_atom_types':", "'bandgap': 6.0662e-06, 'end_date': {'date': '2019/11/12', 'time': '16:12:08'}, 'unparsed': [], 'walltime':", "def test_performance_extract_calcs(fixture_localhost, generate_calc_job_node): from aiida_fleur.tools.common_fleur_wf import performance_extract_calcs from aiida.common.links import", "params, 'structure': structure} assert get_inputs_inpgen(**inputs) == returns @pytest.mark.skip(reason=\"Test is not", "# repeat without a label and description inputs = {'structure':", "5.7775477657, 5.7775477672], 'number_of_iterations_total': 11, 'creator_target_architecture': 'GEN', 'orbital_magnetic_moment_units': 'muBohr', 'orbital_magnetic_spin_up_charges': [],", "OMP per MPI 2. Number of k-points is 720'), (3,", "from 3 to 1. Changed the number of nodes from", "is_code('@'.join([code.label, code.get_computer_name()])) assert is_code(code) def test_get_inputs_fleur(): ''' Tests if get_inputs_fleur", "code, 'parameters': params, 'structure': structure } assert get_inputs_inpgen(**inputs) == returns", "a, b = get_kpoints_mesh_from_kdensity(generate_structure(), 0.1) assert a == ([21, 21,", "from aiida.orm import Dict out = Dict(dict={'title': 'A Fleur input", "[(4, 8, 3, True, 0.5, None, 720), (4, 8, 3,", "Dict(dict={'test': 1}) inputs = {'structure': structure, 'inpgencode': code, 'options': {},", "from aiida_fleur.tools.common_fleur_wf import get_inputs_fleur from aiida.orm import Dict inputs =", "out = Dict(dict={'title': 'A Fleur input generator calculation with aiida',", "import Code from aiida.common.exceptions import NotExistent # install code setup", "link_label='CALL') node3.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node1.store() node2.store() node3.store() result = find_last_in_restart(node_main)", "'GEN', 'orbital_magnetic_moment_units': 'muBohr', 'orbital_magnetic_spin_up_charges': [], 'orbital_magnetic_spin_down_charges': []}) out.store() node =", "node1.store() node2.store() node3.store() result = find_last_in_restart(node_main) assert result == node3.uuid", "params} returns = {'metadata': { 'options': {'withmpi': False, 'resources': {'num_machines':", "== 'code' assert results['fleurinpdata'] == 'fleurinp' assert results['parent_folder'] == 'remote'", "'A Fleur input generator calculation with aiida', 'energy': -138529.7052157, 'bandgap':", "'unparsed': [], 'walltime': 43, 'warnings': {'info': {}, 'debug': {}, 'error':", "os # is_code def test_is_code_interface(fixture_code): from aiida_fleur.tools.common_fleur_wf import is_code assert", "1}} def test_get_inputs_inpgen(fixture_code, generate_structure): ''' Tests if get_inputs_fleur assembles inputs", "import LinkType from aiida.orm import Dict out = Dict(dict={'title': 'A", "{'structure': structure, 'inpgencode': code, 'options': {}, 'label': 'label', 'description': 'description',", "{}, 'error': {}, 'warning': {}}, 'start_date': {'date': '2019/11/12', 'time': '16:11:25'},", "import pytest import os # is_code def test_is_code_interface(fixture_code): from aiida_fleur.tools.common_fleur_wf", "test_get_inputs_fleur(): ''' Tests if get_inputs_fleur assembles inputs correctly. Note it", "use_exceptions=True) assert str(msg.value) == (\"Code not valid, and no valid", "3, True, 100, None, 720), (4, 8, 3, True, 100,", "(\"Given Code node is not of expected code type.\\n\" \"Valid", "link_label='CALL') node2.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node3.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node1.store() node2.store() node3.store()", "(4, 12, 2, 'Computational setup is perfect! Nodes: 4, MPIs", "Fleur input generator calculation with aiida', 'energy': -138529.7052157, 'bandgap': 6.0662e-06,", "'description' assert results['label'] == 'label' assert out_options == {'custom_scheduler_commands': 'test_command',", "results['description'] == 'description' assert results['label'] == 'label' assert out_options ==" ]
[ "`np.ndarray` of type `np.float_`, which is not possible \" f\"for", "self.__logpmf = logpmf super().__init__( shape=shape, dtype=dtype, random_state=random_state, parameters=parameters, sample=sample, in_support=in_support,", "cumulative distribution function. The shape of this argument should be", "None, entropy: Optional[Callable[[], np.float_]] = None, as_value_type: Optional[Callable[[Any], _ValueType]] =", "(): raise NotImplementedError( \"The quantile function is only defined for", "the :attr:`median`. It will be set to the dtype arising", "be either None or an existing RandomState object. If None", "\"\"\" if self.__pdf is not None: return ContinuousRandomVariable._ensure_numpy_float( \"pdf\", self.__pdf(self._as_value_type(x))", "The standard deviation of the distribution. \"\"\" if self.__std is", "elif self.__logcdf is not None: cdf = np.exp(self.logcdf(self._as_value_type(x))) assert isinstance(cdf,", "if isinstance(median, np.ndarray): median.setflags(write=False) return median @cached_property def mean(self) ->", "elif self.__pdf is not None: logpdf = np.log(self.__pdf(self._as_value_type(x))) assert isinstance(logpdf,", "RandomState object to use for drawing realizations from this random", "None, median: Optional[Callable[[], _ValueType]] = None, mean: Optional[Callable[[], _ValueType]] =", "random variable encoding the prior distribution as input and outputs", "evaluation will be broadcast over all additional dimensions. Returns -------", "(np.float_, np.ndarray)) return value class DiscreteRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape:", "can be used to transform user-supplied arguments, interpreted as realizations", "that, in general, :math:`Q(0.5)` is not equal to the :attr:`median`", "RandomVariable with the correct shape is returned. \"\"\" def __add__(self,", ":math:`\\\\operatorname{Var}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))^2)` of the distribution. To learn about the", "def __getitem__(self, key: ArrayLikeGetitemArgType) -> \"RandomVariable\": return RandomVariable( shape=np.empty(shape=self.shape)[key].shape, dtype=self.dtype,", "drawing realizations from this random variable. If None (or np.random),", "RandomVariable._ensure_numpy_float( \"cdf\", self.__cdf(self._as_value_type(x)) ) elif self.__logcdf is not None: cdf", "import mod return mod(other, self) def __divmod__(self, other: Any) ->", "None, pdf: Optional[Callable[[_ValueType], np.float_]] = None, logpdf: Optional[Callable[[_ValueType], np.float_]] =", "size: +self.sample(size=size), in_support=lambda x: self.in_support(+x), mode=lambda: +self.mode, median=lambda: +self.median, mean=lambda:", "np.float_: \"\"\" Probability density or mass function. Following the predominant", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import floordiv return floordiv(other,", "Any) -> _ValueType: if self.__as_value_type is not None: return self.__as_value_type(x)", "in_support=in_support, cdf=cdf, logcdf=logcdf, quantile=quantile, mode=mode, median=median, mean=mean, cov=cov, var=var, std=std,", "\" f\"{type(value)} is not scalar.\" ) assert isinstance(value, (np.float_, np.ndarray))", "of the random variable. From the definition it follows that", "f\"with type `{type(self).__name__}` is implemented.\" ) def quantile(self, p: FloatArgType)", "entropy: Optional[Callable[[], np.float_]] = None, ): # Probability density function", "entropy = RandomVariable._ensure_numpy_float( \"entropy\", entropy, force_scalar=True ) return entropy def", "def random_state(self) -> RandomStateType: \"\"\"Random state of the random variable.", "f\"Neither the `cdf` nor the `logcdf` of the random variable", "None, dtype: Optional[np.dtype] = None, ): if shape is not", "self._random_state = _utils.as_random_state(random_state) # Probability distribution of the random variable", "\\\\colon [0, 1] \\\\to \\\\mathbb{R}` of a random variable :math:`X`", "variable.\"\"\" self.__shape = _utils.as_shape(shape) # Data Types self.__dtype = np.dtype(dtype)", "follows that the quantile function always returns values of the", "shape(self) -> ShapeType: \"\"\"Shape of realizations of the random variable.\"\"\"", "variable. \"\"\" if self.__mode is None: raise NotImplementedError mode =", "= np.log(self.__pdf(self._as_value_type(x))) assert isinstance(logpdf, np.float_) return logpdf else: raise NotImplementedError(", "of RandomVariables but a RandomVariable with the correct shape is", "motivated by the fact that, even for discrete random variables,", "consequence, altering the internal state of a :class:`RandomVariable` (e.g. its", "np.float_: \"\"\" Natural logarithm of the probability density function. Parameters", "the dtype arising from the multiplication of values with dtypes", "cdf = np.exp(self.logcdf(self._as_value_type(x))) assert isinstance(cdf, np.float_) return cdf else: raise", "as_value_type=self.__as_value_type, ) T = property(transpose) # Unary arithmetic operations def", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import sub return sub(other, self)", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import matmul return matmul(other, self) def __truediv__(self,", "not None: if not np.issubdtype(value.dtype, dtype): raise ValueError( f\"The {name}", "+self.median, mean=lambda: +self.mean, cov=lambda: self.cov, var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type,", "f\"Neither the `logpmf` nor the `pmf` of the discrete random", "logpdf: Optional[Callable[[_ValueType], np.float_]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None,", "the discrete random variable \" f\"object with type `{type(self).__name__}` is", "\"\"\" if self.__logcdf is not None: return RandomVariable._ensure_numpy_float( \"logcdf\", self.__logcdf(self._as_value_type(x))", "computation. The generic signature of a probabilistic numerical method is:", "random variable. dtype : Data type of realizations of this", "altering the internal state of a :class:`RandomVariable` (e.g. its mean,", "cetera stored in a ``dict``. \"\"\" return self.__parameters.copy() @cached_property def", "NotImplementedError( \"The quantile function is only defined for scalar random", "entropy # Utilities self.__as_value_type = as_value_type def __repr__(self) -> str:", "of the underlying distribution. This can be either None or", "the distribution. To learn about the dtype of the standard", "the cumulative density function at the given points. \"\"\" if", "at the given points. \"\"\" if self.__logpdf is not None:", ":math:`\\\\operatorname{Cov}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))(X-\\\\mathbb{E}(X))^\\\\top)` of the random variable. To learn about", "newshape), mode=lambda: self.mode.reshape(newshape), median=lambda: self.median.reshape(newshape), mean=lambda: self.mean.reshape(newshape), cov=lambda: self.cov, var=lambda:", "a realization of this random variable.\"\"\" return self.__dtype @property def", "logpmf = np.log(self.__pmf(self._as_value_type(x))) assert isinstance(logpmf, np.float_) return logpmf else: raise", "median is only defined for scalar random variables.\" ) median", "mean, see :attr:`moment_dtype`. Returns ------- mean : array-like The mean", "{type(value)}.\" ) from err elif not force_scalar: try: value =", "density / mass function at the given points. \"\"\" if", "x : array-like Evaluation points of the cumulative distribution function.", "except TypeError as exc: raise TypeError( \"The given argument `p`", ":meth:`cdf` of the random variable. From the definition it follows", "-> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import truediv return", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import truediv return truediv(self, other)", "instead of elementwise. Thus no array of RandomVariables but a", "def parameters(self) -> Dict[str, Any]: \"\"\" Parameters of the probability", "value that can be \" f\"converted to a `np.float_`, which", "isinstance(var, np.ndarray): var.setflags(write=False) return var @cached_property def std(self) -> _ValueType:", "= probnum_method(input_rv, method_params)`` In practice, most random variables used by", "of realizations of the random variable.\"\"\" return self.__shape @cached_property def", "None: raise NotImplementedError(\"No sampling method provided.\") return self.__sample(size=_utils.as_shape(size)) def cdf(self,", "median @cached_property def mean(self) -> _ValueType: \"\"\" Mean :math:`\\\\mathbb{E}(X)` of", "dtype of any (function of a) moment of the random", "sample(self, size: ShapeArgType = ()) -> _ValueType: \"\"\" Draw realizations", "return pow_(self, other) def __rpow__(self, other: Any) -> \"RandomVariable\": #", "return RandomVariable._ensure_numpy_float( \"cdf\", self.__cdf(self._as_value_type(x)) ) elif self.__logcdf is not None:", "def __mod__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "= np.log(self.__cdf(x)) assert isinstance(logcdf, np.float_) return logcdf else: raise NotImplementedError(", "\\\\inf\\\\{ x \\\\in \\\\mathbb{R} \\\\colon p \\\\le F_X(x) \\\\}`, where", "(``log``)``pdf`` both only work on :class:`np.float_` arguments, but we still", "= RandomVariable.infer_median_dtype(self.__dtype) self.__moment_dtype = RandomVariable.infer_moment_dtype(self.__dtype) self._random_state = _utils.as_random_state(random_state) # Probability", "std, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(std, np.ndarray):", "points. \"\"\" if self.__logpdf is not None: return ContinuousRandomVariable._ensure_numpy_float( \"logpdf\",", "p \\\\le F_X(x) \\\\}`, where :math:`F_X \\\\colon \\\\mathbb{R} \\\\to [0,", "of the random variable. \"\"\" # pylint: disable=line-too-long if self.__cov", "FloatArgType, RandomStateArgType, RandomStateType, ShapeArgType, ShapeType, ) try: # functools.cached_property is", "The generic signature of a probabilistic numerical method is: ``output_rv", "about the dtype of the covariance, see :attr:`moment_dtype`. Returns -------", "See Also -------- asrandvar : Transform into a :class:`RandomVariable`. Examples", "y = np.array([1, 1]) + RV to call the arithmetic", "any (function of a) moment of the random variable, e.g.", "# Make immutable if isinstance(mode, np.ndarray): mode.setflags(write=False) return mode @cached_property", "or an integral over products of probabilities and values of", "pow_ return pow_(self, other) def __rpow__(self, other: Any) -> \"RandomVariable\":", "is useful if (``log``)``cdf`` and (``log``)``pdf`` both only work on", "None: return self.__as_value_type(x) return x @staticmethod def _check_property_value( name: str,", "= np.exp(self.__logpdf(self._as_value_type(x))) assert isinstance(pdf, np.float_) return pdf raise NotImplementedError( f\"Neither", "shape: ShapeArgType, dtype: DTypeArgType, random_state: Optional[RandomStateType] = None, parameters: Optional[Dict[str,", "computations more efficient. As a consequence, altering the internal state", "quantile.shape != self.__shape: raise ValueError( f\"The quantile function should return", "``shape``. \"\"\" newshape = _utils.as_shape(newshape) return RandomVariable( shape=newshape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state),", "``output_rv = probnum_method(input_rv, method_params)`` In practice, most random variables used", "np.array([1, 1]) + RV to call the arithmetic operations defined", "an existing RandomState object. If None (or np.random), use the", "\" f\"`{cls.__name__}` must return a scalar value, but {value} of", "None, logcdf: Optional[Callable[[_ValueType], np.float_]] = None, quantile: Optional[Callable[[FloatArgType], _ValueType]] =", "pow_(self, other) def __rpow__(self, other: Any) -> \"RandomVariable\": # pylint:", "measure unless stated otherwise. Parameters ---------- x : array-like Evaluation", "the local :class:`~numpy.random.RandomState` instance. \"\"\" return self._random_state @random_state.setter def random_state(self,", "the same dtype as the random variable. For instance, for", "see :attr:`moment_dtype`. Returns ------- mean : array-like The mean of", "of the mean, see :attr:`moment_dtype`. Returns ------- mean : array-like", "raise ValueError( f\"The function `in_support` must return a `bool`, but", "\"\"\" Cumulative distribution function. Parameters ---------- x : array-like Evaluation", "array-like The variance of the distribution. \"\"\" if self.__var is", "variable. If None (or np.random), the global np.random state is", "random variable.\"\"\" self.__shape = _utils.as_shape(shape) # Data Types self.__dtype =", "NotImplementedError( \"The median is only defined for scalar random variables.\"", "the :attr:`shape` of the random variable. The pdf evaluation will", "where :math:`F_X \\\\colon \\\\mathbb{R} \\\\to [0, 1]` is the :meth:`cdf`", "call the arithmetic operations defined by RandomVariable instead of elementwise.", "to transform user-supplied arguments, interpreted as realizations of this random", "array-like Value of the cumulative density function at the given", "mode self.__median = median self.__mean = mean self.__cov = cov", "self.__std = std self.__entropy = entropy # Utilities self.__as_value_type =", "__array_ufunc__ = None \"\"\" This prevents numpy from calling elementwise", "def pmf(self, x: _ValueType) -> np.float_: if self.__pmf is not", "other) def __rmod__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "and potentially by similar functions in subclasses. For instance, this", "of the drawn sample of realizations. Returns ------- sample :", "over all additional dimensions. Returns ------- logp : array-like Value", "but its return value \" f\"is of type `{type(x)}`.\" )", "Thus no array of RandomVariables but a RandomVariable with the", "[0, 1]` is the :meth:`cdf` of the random variable. From", "shape=newshape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).reshape(size + newshape), mode=lambda: self.mode.reshape(newshape),", "np.float_]] = None, ): # Probability mass function self.__pmf =", "returned a value with \" f\"{quantile.shape}.\" ) if quantile.dtype !=", "with arrays and linear operators. This may change their ``distribution``", "isinstance(in_support, bool): raise ValueError( f\"The function `in_support` must return a", "if self.__shape != (): raise NotImplementedError( \"The quantile function is", "New shape for the random variable. It must be compatible", "\"logcdf\", self.__logcdf(self._as_value_type(x)) ) elif self.__cdf is not None: logcdf =", "x: _ValueType) -> np.float_: \"\"\" Log-cumulative distribution function. Parameters ----------", "\\\\mathbb{R} \\\\to [0, 1]` is the :meth:`cdf` of the random", "def __rsub__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import divmod_", "realizations of this random variable. If ``object`` will be converted", "std : array-like The standard deviation of the distribution. \"\"\"", "-> \"RandomVariable\": \"\"\" Transpose the random variable. Parameters ---------- axes", "state of the random variable. This attribute defines the RandomState", "dtype=self.__moment_dtype, ) # Make immutable if isinstance(var, np.ndarray): var.setflags(write=False) return", "probability density / mass function at the given points. \"\"\"", "of realizations with the given ``size`` and the inherent ``shape``.", "Give a new shape to a random variable. Parameters ----------", "isinstance(mean, np.ndarray): mean.setflags(write=False) return mean @cached_property def cov(self) -> _ValueType:", "from ._arithmetic import truediv return truediv(other, self) def __floordiv__(self, other:", "\"variance\", var, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(var,", "to the :attr:`median` as it is defined in this class.", "np.ndarray): var.setflags(write=False) return var @cached_property def std(self) -> _ValueType: \"\"\"", "type of (elements of) a realization of this random variable.\"\"\"", "np.ndarray): std.setflags(write=False) return std @cached_property def entropy(self) -> np.float_: if", "generic signature of a probabilistic numerical method is: ``output_rv =", "np.dtype(dtype) self.__median_dtype = RandomVariable.infer_median_dtype(self.__dtype) self.__moment_dtype = RandomVariable.infer_moment_dtype(self.__dtype) self._random_state = _utils.as_random_state(random_state)", "Optional[Callable[[FloatArgType], _ValueType]] = None, mode: Optional[Callable[[], _ValueType]] = None, median:", "-------- \"\"\" # pylint: disable=too-many-instance-attributes,too-many-public-methods def __init__( self, shape: ShapeArgType,", "Draw realizations from a random variable. Parameters ---------- size :", "median=lambda: -self.median, mean=lambda: -self.mean, cov=lambda: self.cov, var=lambda: self.var, std=lambda: self.std,", ":math:`2.5`. \"\"\" return self.__median_dtype @property def moment_dtype(self) -> np.dtype: \"\"\"The", "self.cov, var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type, ) def __pos__(self) ->", "f\"{type(value)} is not scalar.\" ) assert isinstance(value, (np.float_, np.ndarray)) return", "which are represented as using the dtypes :class:`np.float_` and :attr:`dtype`,", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import add return add(self, other) def __radd__(self,", "discrete random variable \" f\"object with type `{type(self).__name__}` is implemented.\"", "value \" f\"with dtype `{quantile.dtype.name}`.\" ) return quantile def __getitem__(self,", "Parameters ---------- x : array-like Evaluation points of the probability", "exc else: var = self.__var() RandomVariable._check_property_value( \"variance\", var, shape=self.__shape, dtype=self.__moment_dtype,", "points. \"\"\" if self.__logcdf is not None: return RandomVariable._ensure_numpy_float( \"logcdf\",", "global np.random state is used. If integer, it is used", "= sample self.__in_support = in_support self.__cdf = cdf self.__logcdf =", "variables.\" ) if self.__quantile is None: raise NotImplementedError try: p", "realizations with the given ``size`` and the inherent ``shape``. \"\"\"", "Dirac or Gaussian measure. Instances of :class:`RandomVariable` can be added,", "of (elements of) a realization of this random variable.\"\"\" return", "does not have the correct \" f\"shape. Expected {shape} but", "state of a :class:`RandomVariable` (e.g. its mean, cov, sampling function,", "None: raise NotImplementedError mean = self.__mean() RandomVariable._check_property_value( \"mean\", mean, shape=self.__shape,", "value.shape != shape: raise ValueError( f\"The {name} of the random", "the given ``size`` and the inherent ``shape``. \"\"\" if self.__sample", "as_value_type=self.__as_value_type, ) def __pos__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype,", "mode, shape=self.__shape, dtype=self.__dtype, ) # Make immutable if isinstance(mode, np.ndarray):", "from ._arithmetic import divmod_ return divmod_(other, self) def __pow__(self, other:", "_ValueType) -> bool: if self.__in_support is None: raise NotImplementedError in_support", "-> \"RandomVariable\": return RandomVariable( shape=np.empty(shape=self.shape)[key].shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size)[key],", "parameters(self) -> Dict[str, Any]: \"\"\" Parameters of the probability distribution.", "sampling method provided.\") return self.__sample(size=_utils.as_shape(size)) def cdf(self, x: _ValueType) ->", "shape is returned. \"\"\" def __add__(self, other: Any) -> \"RandomVariable\":", "to a `np.floating` object.\" ) from exc quantile = self.__quantile(p)", "Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeType], _ValueType]] = None, in_support:", "other) def __rfloordiv__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "Data type of realizations of this random variable. If ``object``", "functions in subclasses. For instance, this method is useful if", "sample: Optional[Callable[[ShapeType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None,", "the definition it follows that the quantile function always returns", "same dtype as the \" f\"random variable, i.e. `{self.__dtype.name}`, but", "__neg__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size:", "import matmul return matmul(other, self) def __truediv__(self, other: Any) ->", "Instances of :class:`RandomVariable` can be added, multiplied, etc. with arrays", "RandomVariable._check_property_value( \"standard deviation\", std, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable", "Examples -------- \"\"\" # pylint: disable=too-many-instance-attributes,too-many-public-methods def __init__( self, shape:", "when subclassing :class:`RandomVariable` or any of its descendants. Parameters ----------", "are the main objects used by probabilistic numerical methods. Every", "mean=lambda: self.mean[key], var=lambda: self.var[key], std=lambda: self.std[key], entropy=lambda: self.entropy, as_value_type=self.__as_value_type, )", "learn about the dtype of the median, see :attr:`median_dtype`. Returns", "array of RandomVariables but a RandomVariable with the correct shape", "to transform the argument of functions like ``in_support``, ``cdf`` and", "of the distribution. To learn about the dtype of the", "x: self.in_support(+x), mode=lambda: +self.mode, median=lambda: +self.median, mean=lambda: +self.mean, cov=lambda: self.cov,", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mul return mul(other, self) def", "if not isinstance(in_support, bool): raise ValueError( f\"The function `in_support` must", "import mod return mod(self, other) def __rmod__(self, other: Any) ->", ":attr:`moment_dtype`. Returns ------- mean : array-like The mean of the", "@cached_property def std(self) -> _ValueType: \"\"\" Standard deviation of the", "subclassing :class:`RandomVariable` or any of its descendants. Parameters ---------- shape", "newshape: ShapeArgType) -> \"RandomVariable\": \"\"\" Give a new shape to", "general, :math:`Q(0.5)` is not equal to the :attr:`median` as it", "raise TypeError( f\"The function `{name}` specified via the constructor of", "the given points. \"\"\" if self.__cdf is not None: return", "values of the same dtype as the random variable. For", "input and outputs a random variable whose distribution encodes the", "should return values of the same dtype as the \"", "additional dimensions. Returns ------- p : array-like Value of the", ": tuple Size of the drawn sample of realizations. Returns", "random variable. If ``object`` will be converted to ``numpy.dtype``. as_value_type", "__getitem__(self, key: ArrayLikeGetitemArgType) -> \"RandomVariable\": return RandomVariable( shape=np.empty(shape=self.shape)[key].shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state),", "from exc else: var = self.__var() RandomVariable._check_property_value( \"variance\", var, shape=self.__shape,", "function at the given points. \"\"\" if self.__pdf is not", "more details and examples. \"\"\" if self.__shape != (): raise", "which can be used to transform user-supplied arguments, interpreted as", "random variables. Random variables are the main in- and outputs", "sub return sub(self, other) def __rsub__(self, other: Any) -> \"RandomVariable\":", "def __rmod__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "def transpose(self, *axes: int) -> \"RandomVariable\": \"\"\" Transpose the random", "._arithmetic import pow_ return pow_(self, other) def __rpow__(self, other: Any)", ": array-like Value of the log-probability density / mass function", "None: raise NotImplementedError cov = self.__cov() RandomVariable._check_property_value( \"covariance\", cov, shape=(self.size,", "about the dtype of the mean, see :attr:`moment_dtype`. Returns -------", "size: self.sample(size).transpose(*axes), mode=lambda: self.mode.transpose(*axes), median=lambda: self.median.transpose(*axes), mean=lambda: self.mean.transpose(*axes), cov=lambda: self.cov,", "utils as _utils from probnum.type import ( ArrayLikeGetitemArgType, DTypeArgType, FloatArgType,", "self.var, std=lambda: self.std, as_value_type=self.__as_value_type, ) def __pos__(self) -> \"RandomVariable\": return", "self) def __truediv__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "Covariance :math:`\\\\operatorname{Cov}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))(X-\\\\mathbb{E}(X))^\\\\top)` of the random variable. To learn", "-> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import add return", "random variable. The logpdf evaluation will be broadcast over all", "respectively. \"\"\" return self.__moment_dtype @property def random_state(self) -> RandomStateType: \"\"\"Random", "_ValueType]] = None, var: Optional[Callable[[], _ValueType]] = None, std: Optional[Callable[[],", "---------- x : array-like Evaluation points of the probability density", "other) def __rsub__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", ":code:`(S1, ..., SN)` is the :attr:`shape` of the random variable.", "ImportError: from cached_property import cached_property _ValueType = TypeVar(\"ValueType\") class RandomVariable(Generic[_ValueType]):", "and outputs a random variable whose distribution encodes the uncertainty", "multiplied, etc. with arrays and linear operators. This may change", "random variable, which are represented as using the dtypes :class:`np.float_`", "seed the local :class:`~numpy.random.RandomState` instance. \"\"\" return self._random_state @random_state.setter def", "can be \" f\"converted to a `np.float_`, which is not", "`p` can not be cast to a `np.floating` object.\" )", "other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import", "dtype of the covariance, see :attr:`moment_dtype`. Returns ------- cov :", "Variance :math:`\\\\operatorname{Var}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))^2)` of the distribution. To learn about", "-> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import floordiv return", "if self.__std is None: try: std = np.sqrt(self.var) except NotImplementedError", "random variable \" f\"object with type `{type(self).__name__}` is implemented.\" )", "of the covariance, see :attr:`moment_dtype`. Returns ------- cov : array-like", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import mod return mod(self, other) def __rmod__(self,", "def random_state(self, seed: RandomStateArgType): \"\"\"Get or set the RandomState object", "array-like Value of the log-cumulative density function at the given", "dtype `{quantile.dtype.name}`.\" ) return quantile def __getitem__(self, key: ArrayLikeGetitemArgType) ->", "self.__cdf is not None: return RandomVariable._ensure_numpy_float( \"cdf\", self.__cdf(self._as_value_type(x)) ) elif", "For instance, for a discrete distribution over the integers, the", "__sub__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "type `{type(self).__name__}` is implemented.\" ) def logpdf(self, x: _ValueType) ->", "and the inherent ``shape``. \"\"\" if self.__sample is None: raise", "the distribution. \"\"\" if self.__std is None: try: std =", "singleton used by np.random. If already a RandomState instance, use", "cov, shape=(self.size, self.size) if self.ndim > 0 else (), dtype=self.__moment_dtype,", "random variable. If None (or np.random), the global np.random state", "pdf(self, x: _ValueType) -> np.float_: \"\"\" Probability density or mass", "array-like Sample of realizations with the given ``size`` and the", "of realizations. Returns ------- sample : array-like Sample of realizations", "{value.shape}.\" ) if dtype is not None: if not np.issubdtype(value.dtype,", "mod return mod(other, self) def __divmod__(self, other: Any) -> \"RandomVariable\":", "pylint: disable=too-many-instance-attributes,too-many-public-methods def __init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state:", "for the random variable. It must be compatible with the", ": Transform into a :class:`RandomVariable`. Examples -------- \"\"\" # pylint:", ") return quantile def __getitem__(self, key: ArrayLikeGetitemArgType) -> \"RandomVariable\": return", "def infer_median_dtype(value_dtype: DTypeArgType) -> np.dtype: return RandomVariable.infer_moment_dtype(value_dtype) @staticmethod def infer_moment_dtype(value_dtype:", "dimensions. Returns ------- logp : array-like Value of the log-probability", "None: cdf = np.exp(self.logcdf(self._as_value_type(x))) assert isinstance(cdf, np.float_) return cdf else:", "pow_ return pow_(other, self) @staticmethod def infer_median_dtype(value_dtype: DTypeArgType) -> np.dtype:", "dtype=self.__moment_dtype, ) # Make immutable if isinstance(std, np.ndarray): std.setflags(write=False) return", "the `logpdf` of the continuous random variable \" f\"object with", "self.__mean() RandomVariable._check_property_value( \"mean\", mean, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable", "return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: +self.sample(size=size), in_support=lambda x:", "None, var: Optional[Callable[[], _ValueType]] = None, std: Optional[Callable[[], _ValueType]] =", "variance, see :attr:`moment_dtype`. Returns ------- var : array-like The variance", "variables, e.g. integer-valued random variables, the :attr:`median` might lie in", "\"\"\" newshape = _utils.as_shape(newshape) return RandomVariable( shape=newshape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda", "\"\"\" if self.__sample is None: raise NotImplementedError(\"No sampling method provided.\")", "these values are averaged. For example, a uniform random variable", "---------- newshape : int or tuple of ints New shape", "of the distribution such as mean, variance, et cetera stored", "dtype of the standard deviation, see :attr:`moment_dtype`. Returns ------- std", "the correct shape is returned. \"\"\" def __add__(self, other: Any)", "self.sample(size).reshape(size + newshape), mode=lambda: self.mode.reshape(newshape), median=lambda: self.median.reshape(newshape), mean=lambda: self.mean.reshape(newshape), cov=lambda:", "lie in between two values in which case these values", "Make immutable if isinstance(median, np.ndarray): median.setflags(write=False) return median @cached_property def", "of the same dtype as the \" f\"random variable, i.e.", "example, a uniform random variable on :math:`\\\\{ 1, 2, 3,", "nor the `pmf` of the discrete random variable \" f\"object", "size(self) -> int: return int(np.prod(self.__shape)) @property def dtype(self) -> np.dtype:", "if self.__mean is None: raise NotImplementedError mean = self.__mean() RandomVariable._check_property_value(", ": array-like The variance of the distribution. \"\"\" if self.__var", "array-like Value of the probability density / mass function at", "disable=too-many-instance-attributes,too-many-public-methods def __init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state: RandomStateArgType", "be constant over their whole lifecycle. This is due to", "Any, shape: Optional[Tuple[int, ...]] = None, dtype: Optional[np.dtype] = None,", "variables. Random variables are the main in- and outputs of", "dtype={self.dtype}>\" @property def shape(self) -> ShapeType: \"\"\"Shape of realizations of", "numpy.ndarray.transpose. Returns ------- transposed_rv : The transposed random variable. \"\"\"", "immutable if isinstance(var, np.ndarray): var.setflags(write=False) return var @cached_property def std(self)", "def _ensure_numpy_float( cls, name: str, value: Any, force_scalar: bool =", "return self.__sample(size=_utils.as_shape(size)) def cdf(self, x: _ValueType) -> np.float_: \"\"\" Cumulative", "not force_scalar: try: value = np.asarray(value, dtype=np.float_) except TypeError as", "probnum_method(input_rv, method_params)`` In practice, most random variables used by methods", ") # Make immutable if isinstance(var, np.ndarray): var.setflags(write=False) return var", "[0, 1] \\\\to \\\\mathbb{R}` of a random variable :math:`X` is", "possible \" f\"for {value} of type {type(value)}.\" ) from err", "in_support self.__cdf = cdf self.__logcdf = logcdf self.__quantile = quantile", "return self.__moment_dtype @property def random_state(self) -> RandomStateType: \"\"\"Random state of", ") if quantile.dtype != self.__dtype: raise ValueError( f\"The quantile function", "\"\"\"The dtype of the :attr:`median`. It will be set to", "Returns ------- q : array-like Value of the log-cumulative density", "values with dtypes :attr:`dtype` and :class:`np.float_`. This is motivated by", "called internally to transform the argument of functions like ``in_support``,", "entropy=entropy, ) def pmf(self, x: _ValueType) -> np.float_: if self.__pmf", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mod return mod(other, self)", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import floordiv return floordiv(self, other)", "via the constructor of \" f\"`{cls.__name__}` must return a value", "of the same dtype as the random variable. For instance,", "+ RV to call the arithmetic operations defined by RandomVariable", "mul(self, other) def __rmul__(self, other: Any) -> \"RandomVariable\": # pylint:", "self) def __pow__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "self.cov, var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type, ) def __abs__(self) ->", "it follows that the quantile function always returns values of", "self.__var() RandomVariable._check_property_value( \"variance\", var, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable", "dtype is not None: if not np.issubdtype(value.dtype, dtype): raise ValueError(", "distribution. \"\"\" if self.__var is None: try: var = np.diag(self.cov).reshape(self.__shape).copy()", "\\\\mathbb{R}` of a random variable :math:`X` is defined as :math:`Q(p)", "dtype: Optional[np.dtype] = None, ): if shape is not None:", "= np.dtype(dtype) self.__median_dtype = RandomVariable.infer_median_dtype(self.__dtype) self.__moment_dtype = RandomVariable.infer_moment_dtype(self.__dtype) self._random_state =", "its mean, cov, sampling function, etc.) will result in undefined", "median_dtype(self) -> np.dtype: \"\"\"The dtype of the :attr:`median`. It will", "logpdf evaluation will be broadcast over all additional dimensions. Returns", "def __add__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "function self.__pmf = pmf self.__logpmf = logpmf super().__init__( shape=shape, dtype=dtype,", "represented as using the dtypes :class:`np.float_` and :attr:`dtype`, respectively. \"\"\"", "if isinstance(var, np.ndarray): var.setflags(write=False) return var @cached_property def std(self) ->", "want the user to be able to pass Python :class:`float`.", "``as_value_type`` should be set to something like ``lambda x: np.float64(x)``.", "variables used by methods in ProbNum have Dirac or Gaussian", "products of probabilities and values of the random variable, which", "of the distribution. \"\"\" if self.__shape != (): raise NotImplementedError(", "raise TypeError( \"The given argument `p` can not be cast", "RV to call the arithmetic operations defined by RandomVariable instead", "NotImplementedError( f\"Neither the `pmf` nor the `logpmf` of the discrete", "x: np.float64(x)``. See Also -------- asrandvar : Transform into a", "()) -> _ValueType: \"\"\" Draw realizations from a random variable.", "only available in Python >=3.8 from functools import cached_property except", "of the random variable self.__mode = mode self.__median = median", "in_support: Optional[Callable[[_ValueType], bool]] = None, pdf: Optional[Callable[[_ValueType], np.float_]] = None,", "``numpy.dtype``. as_value_type : Function which can be used to transform", "by methods in ProbNum have Dirac or Gaussian measure. Instances", "cls, name: str, value: Any, force_scalar: bool = False )", ") # Make immutable if isinstance(std, np.ndarray): std.setflags(write=False) return std", "such as mean, variance, et cetera stored in a ``dict``.", "the :attr:`shape` of the random variable. The cdf evaluation will", "cdf(self, x: _ValueType) -> np.float_: \"\"\" Cumulative distribution function. Parameters", "None: pmf = np.exp(self.__logpmf(x)) assert isinstance(pmf, np.float_) return pmf else:", "p = _utils.as_numpy_scalar(p, dtype=np.floating) except TypeError as exc: raise TypeError(", "other) def __rdivmod__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "a RandomVariable with the correct shape is returned. \"\"\" def", "self.__parameters.copy() @cached_property def mode(self) -> _ValueType: \"\"\" Mode of the", "no array of RandomVariables but a RandomVariable with the correct", "shape to a random variable. Parameters ---------- newshape : int", "realizations from this random variable. If None (or np.random), the", "the dtypes :class:`np.float_` and :attr:`dtype`, respectively. \"\"\" return self.__moment_dtype @property", "logcdf(self, x: _ValueType) -> np.float_: \"\"\" Log-cumulative distribution function. Parameters", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import divmod_ return divmod_(other, self) def __pow__(self,", "of the log-probability density/mass function. The shape of this argument", "return in_support def sample(self, size: ShapeArgType = ()) -> _ValueType:", "return truediv(other, self) def __floordiv__(self, other: Any) -> \"RandomVariable\": #", "return floordiv(self, other) def __rfloordiv__(self, other: Any) -> \"RandomVariable\": #", "quantile function is only defined for scalar random variables.\" )", "random variable, to an easy-to-process, normalized format. Will be called", "Probability distribution of the random variable self.__parameters = parameters.copy() if", "object of the underlying distribution. This can be either None", "return self.__as_value_type(x) return x @staticmethod def _check_property_value( name: str, value:", "self._random_state @random_state.setter def random_state(self, seed: RandomStateArgType): \"\"\"Get or set the", "which is not possible \" f\"for {value} of type {type(value)}.\"", "else: raise NotImplementedError( f\"Neither the `logpdf` nor the `pdf` of", "def cov(self) -> _ValueType: \"\"\" Covariance :math:`\\\\operatorname{Cov}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))(X-\\\\mathbb{E}(X))^\\\\top)` of", "self.__logpmf is not None: return DiscreteRandomVariable._ensure_numpy_float( \"logpmf\", self.__logpmf(self._as_value_type(x)) ) elif", "stated otherwise. Parameters ---------- x : array-like Evaluation points of", "shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(var, np.ndarray): var.setflags(write=False)", "not None: logcdf = np.log(self.__cdf(x)) assert isinstance(logcdf, np.float_) return logcdf", "random variables.\" ) if self.__quantile is None: raise NotImplementedError try:", "\"\"\" Probability density or mass function. Following the predominant convention", "but we still want the user to be able to", "a median of :math:`2.5`. \"\"\" return self.__median_dtype @property def moment_dtype(self)", "raise NotImplementedError( f\"Neither the `cdf` nor the `logcdf` of the", "DTypeArgType, random_state: Optional[RandomStateType] = None, parameters: Optional[Dict[str, Any]] = None,", "np.dtype: return np.promote_types(value_dtype, np.float_) def _as_value_type(self, x: Any) -> _ValueType:", "Make immutable if isinstance(mean, np.ndarray): mean.setflags(write=False) return mean @cached_property def", "in a ``dict``. \"\"\" return self.__parameters.copy() @cached_property def mode(self) ->", "ShapeArgType, dtype: DTypeArgType, random_state: RandomStateArgType = None, parameters: Optional[Dict[str, Any]]", "import utils as _utils from probnum.type import ( ArrayLikeGetitemArgType, DTypeArgType,", "1]` is the :meth:`cdf` of the random variable. From the", "is not possible for \" f\"{value} of type {type(value)}.\" )", "-> np.float_: \"\"\" Natural logarithm of the probability density function.", "by similar functions in subclasses. For instance, this method is", "the original shape. Returns ------- reshaped_rv : ``self`` with the", "the probability density / mass function at the given points.", "of ints, or n ints See documentation of numpy.ndarray.transpose. Returns", "_ValueType) -> np.float_: if self.__pmf is not None: return DiscreteRandomVariable._ensure_numpy_float(\"pmf\",", "into a :class:`RandomVariable`. Examples -------- \"\"\" # pylint: disable=too-many-instance-attributes,too-many-public-methods def", "sample of realizations. Returns ------- sample : array-like Sample of", "the caches used to make certain computations more efficient. As", "efficient. As a consequence, altering the internal state of a", "-> _ValueType: \"\"\" Mean :math:`\\\\mathbb{E}(X)` of the distribution. To learn", "pylint: disable=line-too-long if self.__cov is None: raise NotImplementedError cov =", "able to pass Python :class:`float`. Then ``as_value_type`` should be set", "if self.__quantile is None: raise NotImplementedError try: p = _utils.as_numpy_scalar(p,", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import floordiv return floordiv(self,", "original shape. Returns ------- reshaped_rv : ``self`` with the new", "`{name}` specified via the constructor of \" f\"`{cls.__name__}` must return", ") def __pos__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state),", "in_support(self, x: _ValueType) -> bool: if self.__in_support is None: raise", "dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: +self.sample(size=size), in_support=lambda x: self.in_support(+x), mode=lambda: +self.mode,", "something like ``lambda x: np.float64(x)``. See Also -------- asrandvar :", "random variable. To learn about the dtype of the covariance,", "`{type(self).__name__}` is implemented.\" ) def logpmf(self, x: _ValueType) -> np.float_:", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import divmod_ return divmod_(other, self)", "e.g. its :attr:`mean`, :attr:`cov`, :attr:`var`, or :attr:`std`. It will be", "raise NotImplementedError from exc else: var = self.__var() RandomVariable._check_property_value( \"variance\",", "!= shape: raise ValueError( f\"The {name} of the random variable", "f\"The function `{name}` specified via the constructor of \" f\"`{cls.__name__}`", "Evaluation points of the probability density / mass function. The", "...]] = None, dtype: Optional[np.dtype] = None, ): if shape", "RandomState instance, use it. If an int, use a new", "not scalar.\" ) assert isinstance(value, (np.float_, np.ndarray)) return value class", "RandomVariable._check_property_value( \"covariance\", cov, shape=(self.size, self.size) if self.ndim > 0 else", "None: return RandomVariable._ensure_numpy_float( \"logcdf\", self.__logcdf(self._as_value_type(x)) ) elif self.__cdf is not", "= None, quantile: Optional[Callable[[FloatArgType], _ValueType]] = None, mode: Optional[Callable[[], _ValueType]]", "arising from the multiplication of values with dtypes :attr:`dtype` and", "fact that, even for discrete random variables, e.g. integer-valued random", "_ValueType: \"\"\" Variance :math:`\\\\operatorname{Var}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))^2)` of the distribution. To", "ShapeArgType, dtype: DTypeArgType, random_state: Optional[RandomStateType] = None, parameters: Optional[Dict[str, Any]]", "logpmf(self, x: _ValueType) -> np.float_: if self.__logpmf is not None:", "NotImplementedError from exc else: std = self.__std() RandomVariable._check_property_value( \"standard deviation\",", "= None, ): # Probability density function self.__pdf = pdf", "@cached_property def mean(self) -> _ValueType: \"\"\" Mean :math:`\\\\mathbb{E}(X)` of the", "with type `{type(self).__name__}` is implemented.\" ) def logpdf(self, x: _ValueType)", "have the correct \" f\"dtype. Expected {dtype.name} but got {value.dtype.name}.\"", "see :attr:`moment_dtype`. Returns ------- var : array-like The variance of", "\"median\", median, shape=self.__shape, dtype=self.__median_dtype, ) # Make immutable if isinstance(median,", "it is defined in this class. See https://en.wikipedia.org/wiki/Quantile_function for more", "pdf = np.exp(self.__logpdf(self._as_value_type(x))) assert isinstance(pdf, np.float_) return pdf raise NotImplementedError(", "dtypes :attr:`dtype` and :class:`np.float_`. This is motivated by the fact", "or :attr:`std`. It will be set to the dtype arising", "of :class:`RandomVariable` objects are assumed to be constant over their", "the distribution. To learn about the dtype of the mean,", "of the log-cumulative density function at the given points. \"\"\"", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import sub return sub(self, other) def __rsub__(self,", "Properties of the random variable self.__mode = mode self.__median =", "try: p = _utils.as_numpy_scalar(p, dtype=np.floating) except TypeError as exc: raise", "`cdf` nor the `logcdf` of the random variable object \"", "the dtype of the median, see :attr:`median_dtype`. Returns ------- median", "@staticmethod def infer_moment_dtype(value_dtype: DTypeArgType) -> np.dtype: return np.promote_types(value_dtype, np.float_) def", "NotImplementedError cov = self.__cov() RandomVariable._check_property_value( \"covariance\", cov, shape=(self.size, self.size) if", "Optional[np.dtype] = None, ): if shape is not None: if", "return DiscreteRandomVariable._ensure_numpy_float(\"pmf\", self.__pmf(x)) elif self.__logpmf is not None: pmf =", "a random variable encoding the prior distribution as input and", "-> Dict[str, Any]: \"\"\" Parameters of the probability distribution. The", "std=lambda: self.std[key], entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def reshape(self, newshape: ShapeArgType)", "---------- axes : None, tuple of ints, or n ints", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import add return add(other,", "if shape is not None: if value.shape != shape: raise", "variable. Returns ------- mode : float The mode of the", "---------- x : array-like Evaluation points of the cumulative distribution", "objects are assumed to be constant over their whole lifecycle.", "function. Following the predominant convention in mathematics, we express pdfs", "the same shape as the \" f\"random variable, i.e. {self.__shape},", ") def logpmf(self, x: _ValueType) -> np.float_: if self.__logpmf is", "Returns ------- sample : array-like Sample of realizations with the", "if isinstance(cov, np.ndarray): cov.setflags(write=False) return cov @cached_property def var(self) ->", "set to the dtype arising from the multiplication of values", "random variable does not have the correct \" f\"shape. Expected", "the quantile function always returns values of the same dtype", "-> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: +self.sample(size=size),", "random variable.\"\"\" return self.__shape @cached_property def ndim(self) -> int: return", "The pdf evaluation will be broadcast over all additional dimensions.", "= None, pdf: Optional[Callable[[_ValueType], np.float_]] = None, logpdf: Optional[Callable[[_ValueType], np.float_]]", "the arithmetic operations defined by RandomVariable instead of elementwise. Thus", "that can be \" f\"converted to a `np.float_`, which is", "more efficient. As a consequence, altering the internal state of", "None: return RandomVariable._ensure_numpy_float( \"cdf\", self.__cdf(self._as_value_type(x)) ) elif self.__logcdf is not", "DTypeArgType) -> np.dtype: return RandomVariable.infer_moment_dtype(value_dtype) @staticmethod def infer_moment_dtype(value_dtype: DTypeArgType) ->", "if self.__in_support is None: raise NotImplementedError in_support = self.__in_support(self._as_value_type(x)) if", "from ._arithmetic import mod return mod(other, self) def __divmod__(self, other:", "distribution as input and outputs a random variable whose distribution", "the random variable.\"\"\" return self.__shape @cached_property def ndim(self) -> int:", "of the random variable self.__parameters = parameters.copy() if parameters is", "This may change their ``distribution`` and not necessarily all previously", "easy-to-process, normalized format. Will be called internally to transform the", "of the random variable.\"\"\" return self.__shape @cached_property def ndim(self) ->", "a) moment of the random variable, e.g. its :attr:`mean`, :attr:`cov`,", "probnum.type import ( ArrayLikeGetitemArgType, DTypeArgType, FloatArgType, RandomStateArgType, RandomStateType, ShapeArgType, ShapeType,", "-> _ValueType: if self.__as_value_type is not None: return self.__as_value_type(x) return", "scalar.\" ) assert isinstance(value, (np.float_, np.ndarray)) return value class DiscreteRandomVariable(RandomVariable[_ValueType]):", "as using the dtypes :class:`np.float_` and :attr:`dtype`, respectively. \"\"\" return", "return self.__median_dtype @property def moment_dtype(self) -> np.dtype: \"\"\"The dtype of", "return RandomVariable._ensure_numpy_float( \"logcdf\", self.__logcdf(self._as_value_type(x)) ) elif self.__cdf is not None:", "TypeVar(\"ValueType\") class RandomVariable(Generic[_ValueType]): \"\"\" Random variables are the main objects", "about the dtype of the standard deviation, see :attr:`moment_dtype`. Returns", "try: std = np.sqrt(self.var) except NotImplementedError as exc: raise NotImplementedError", "new dimensions of ``shape``. \"\"\" newshape = _utils.as_shape(newshape) return RandomVariable(", "None, in_support: Optional[Callable[[_ValueType], bool]] = None, pdf: Optional[Callable[[_ValueType], np.float_]] =", "f\"Neither the `pdf` nor the `logpdf` of the continuous random", "self.__median = median self.__mean = mean self.__cov = cov self.__var", "\"\"\"Random state of the random variable. This attribute defines the", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import sub return sub(other,", "elif self.__logpmf is not None: pmf = np.exp(self.__logpmf(x)) assert isinstance(pmf,", "Unary arithmetic operations def __neg__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape,", "normalized format. Will be called internally to transform the argument", "def __mul__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", ":attr:`dtype` and :class:`np.float_`. This is motivated by the mathematical definition", "_ValueType]] = None, mean: Optional[Callable[[], _ValueType]] = None, cov: Optional[Callable[[],", "self, shape: ShapeArgType, dtype: DTypeArgType, random_state: RandomStateArgType = None, parameters:", "quantile function :math:`Q \\\\colon [0, 1] \\\\to \\\\mathbb{R}` of a", "return pdf raise NotImplementedError( f\"Neither the `pdf` nor the `logpdf`", "``pdf`` and ``logpdf`` (in :class:`ContinuousRandomVariable`), and potentially by similar functions", "float The mode of the random variable. \"\"\" if self.__mode", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mul return mul(self, other) def", "def entropy(self) -> np.float_: if self.__entropy is None: raise NotImplementedError", "x : array-like Evaluation points of the probability density /", "in general, :math:`Q(0.5)` is not equal to the :attr:`median` as", "it returned a value \" f\"with dtype `{quantile.dtype.name}`.\" ) return", "from typing import Any, Callable, Dict, Generic, Optional, Tuple, TypeVar,", ":attr:`moment_dtype`. Returns ------- std : array-like The standard deviation of", "value = _utils.as_numpy_scalar(value, dtype=np.float_) except TypeError as err: raise TypeError(", "mode=lambda: self.mode[key], mean=lambda: self.mean[key], var=lambda: self.var[key], std=lambda: self.std[key], entropy=lambda: self.entropy,", "`logpdf` of the continuous random variable \" f\"object with type", "RandomVariable( shape=np.empty(shape=self.shape).transpose(*axes).shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).transpose(*axes), mode=lambda: self.mode.transpose(*axes), median=lambda:", "continuous random variable \" f\"object with type `{type(self).__name__}` is implemented.\"", "DiscreteRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state: Optional[RandomStateType]", "= _utils.as_numpy_scalar(value, dtype=np.float_) except TypeError as err: raise TypeError( f\"The", "\"\"\" if self.__cdf is not None: return RandomVariable._ensure_numpy_float( \"cdf\", self.__cdf(self._as_value_type(x))", "points of the log-probability density/mass function. The shape of this", "realization of this random variable.\"\"\" return self.__dtype @property def median_dtype(self)", "def quantile(self, p: FloatArgType) -> _ValueType: \"\"\"Quantile function. The quantile", "else {} self.__sample = sample self.__in_support = in_support self.__cdf =", "exc else: std = self.__std() RandomVariable._check_property_value( \"standard deviation\", std, shape=self.__shape,", "Median of the random variable. To learn about the dtype", "is not None: return self.__as_value_type(x) return x @staticmethod def _check_property_value(", "RandomVariable instead of elementwise. Thus no array of RandomVariables but", "an integral over products of probabilities and values of the", "self.__as_value_type = as_value_type def __repr__(self) -> str: return f\"<{self.shape} {self.__class__.__name__}", "\"\"\" return self.__median_dtype @property def moment_dtype(self) -> np.dtype: \"\"\"The dtype", "the fact that, even for discrete random variables, e.g. integer-valued", "function self.__pdf = pdf self.__logpdf = logpdf super().__init__( shape=shape, dtype=dtype,", "and :class:`np.float_`. This is motivated by the fact that, even", "``shape``. \"\"\" if self.__sample is None: raise NotImplementedError(\"No sampling method", "= None, sample: Optional[Callable[[ShapeType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]]", "used to seed the local :class:`~numpy.random.RandomState` instance. \"\"\" return self._random_state", "tuple of ints, or n ints See documentation of numpy.ndarray.transpose.", "If integer, it is used to seed the local :class:`~numpy.random.RandomState`", "SN)` is the :attr:`shape` of the random variable. The logcdf", "self.mode[key], mean=lambda: self.mean[key], var=lambda: self.var[key], std=lambda: self.std[key], entropy=lambda: self.entropy, as_value_type=self.__as_value_type,", "-> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import sub return", "and not necessarily all previously available methods are retained. The", "The variance of the distribution. \"\"\" if self.__var is None:", ") def __abs__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state),", "= in_support self.__cdf = cdf self.__logcdf = logcdf self.__quantile =", "methods. Every probabilistic numerical method takes a random variable encoding", ") return entropy def in_support(self, x: _ValueType) -> bool: if", "shape for the random variable. It must be compatible with", "return floordiv(other, self) def __mod__(self, other: Any) -> \"RandomVariable\": #", "pmf: Optional[Callable[[_ValueType], np.float_]] = None, logpmf: Optional[Callable[[_ValueType], np.float_]] = None,", "is returned. \"\"\" def __add__(self, other: Any) -> \"RandomVariable\": #", "def logcdf(self, x: _ValueType) -> np.float_: \"\"\" Log-cumulative distribution function.", "var=var, std=std, entropy=entropy, ) def pmf(self, x: _ValueType) -> np.float_:", "self.__std() RandomVariable._check_property_value( \"standard deviation\", std, shape=self.__shape, dtype=self.__moment_dtype, ) # Make", "@cached_property def size(self) -> int: return int(np.prod(self.__shape)) @property def dtype(self)", "truediv(other, self) def __floordiv__(self, other: Any) -> \"RandomVariable\": # pylint:", "of the random variable. The logpdf evaluation will be broadcast", "self.std, as_value_type=self.__as_value_type, ) def __pos__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape,", "sample=lambda size: self.sample(size).reshape(size + newshape), mode=lambda: self.mode.reshape(newshape), median=lambda: self.median.reshape(newshape), mean=lambda:", "dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: -self.sample(size=size), in_support=lambda x: self.in_support(-x), mode=lambda: -self.mode,", "lifecycle. This is due to the caches used to make", "encodes the uncertainty arising from finite computation. The generic signature", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mul return mul(other, self)", "\" f\"{value} of type {type(value)}.\" ) from err elif not", ") # Make immutable if isinstance(median, np.ndarray): median.setflags(write=False) return median", "add return add(other, self) def __sub__(self, other: Any) -> \"RandomVariable\":", "._arithmetic import floordiv return floordiv(other, self) def __mod__(self, other: Any)", "raise NotImplementedError mode = self.__mode() RandomVariable._check_property_value( \"mode\", mode, shape=self.__shape, dtype=self.__dtype,", "we still want the user to be able to pass", "!= self.__shape: raise ValueError( f\"The quantile function should return values", "cdf else: raise NotImplementedError( f\"Neither the `cdf` nor the `logcdf`", "{self.__class__.__name__} with dtype={self.dtype}>\" @property def shape(self) -> ShapeType: \"\"\"Shape of", "scalar random variables.\" ) median = self.__median() RandomVariable._check_property_value( \"median\", median,", "entropy=entropy, ) def pdf(self, x: _ValueType) -> np.float_: \"\"\" Probability", "numerical methods. Every probabilistic numerical method takes a random variable", "If already a RandomState instance, use it. If an int,", "object to use for drawing realizations from this random variable.", "exc: raise NotImplementedError from exc else: std = self.__std() RandomVariable._check_property_value(", "self.mode.transpose(*axes), median=lambda: self.median.transpose(*axes), mean=lambda: self.mean.transpose(*axes), cov=lambda: self.cov, var=lambda: self.var.transpose(*axes), std=lambda:", "from ._arithmetic import sub return sub(self, other) def __rsub__(self, other:", "divmod_ return divmod_(self, other) def __rdivmod__(self, other: Any) -> \"RandomVariable\":", "operations defined by RandomVariable instead of elementwise. Thus no array", "from cached_property import cached_property _ValueType = TypeVar(\"ValueType\") class RandomVariable(Generic[_ValueType]): \"\"\"", "`logcdf` nor the `cdf` of the random variable object \"", "(e.g. its mean, cov, sampling function, etc.) will result in", "outputs of probabilistic numerical methods. \"\"\" from typing import Any,", ":attr:`cov`, :attr:`var`, or :attr:`std`. It will be set to the", "density function self.__pdf = pdf self.__logpdf = logpdf super().__init__( shape=shape,", "std=std, entropy=entropy, ) def pdf(self, x: _ValueType) -> np.float_: \"\"\"", "a probabilistic numerical method is: ``output_rv = probnum_method(input_rv, method_params)`` In", "If an int, use a new RandomState instance seeded with", "be compatible with the original shape. Returns ------- reshaped_rv :", "None: raise NotImplementedError entropy = self.__entropy() entropy = RandomVariable._ensure_numpy_float( \"entropy\",", "bool]] = None, pmf: Optional[Callable[[_ValueType], np.float_]] = None, logpmf: Optional[Callable[[_ValueType],", "DiscreteRandomVariable._ensure_numpy_float(\"pmf\", self.__pmf(x)) elif self.__logpmf is not None: pmf = np.exp(self.__logpmf(x))", "in which case these values are averaged. For example, a", "__rdivmod__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "Lebesgue measure unless stated otherwise. Parameters ---------- x : array-like", "RandomStateArgType): \"\"\"Get or set the RandomState object of the underlying", "To learn about the dtype of the mean, see :attr:`moment_dtype`.", "sub(other, self) def __mul__(self, other: Any) -> \"RandomVariable\": # pylint:", "pmf = np.exp(self.__logpmf(x)) assert isinstance(pmf, np.float_) return pmf else: raise", "self.__in_support is None: raise NotImplementedError in_support = self.__in_support(self._as_value_type(x)) if not", "_ValueType = TypeVar(\"ValueType\") class RandomVariable(Generic[_ValueType]): \"\"\" Random variables are the", "etc.) will result in undefined behavior. In particular, this should", "DTypeArgType, random_state: RandomStateArgType = None, parameters: Optional[Dict[str, Any]] = None,", "= self.__cov() RandomVariable._check_property_value( \"covariance\", cov, shape=(self.size, self.size) if self.ndim >", "mode = self.__mode() RandomVariable._check_property_value( \"mode\", mode, shape=self.__shape, dtype=self.__dtype, ) #", "the inherent ``shape``. \"\"\" if self.__sample is None: raise NotImplementedError(\"No", "as _utils from probnum.type import ( ArrayLikeGetitemArgType, DTypeArgType, FloatArgType, RandomStateArgType,", "\"covariance\", cov, shape=(self.size, self.size) if self.ndim > 0 else (),", "as_value_type def __repr__(self) -> str: return f\"<{self.shape} {self.__class__.__name__} with dtype={self.dtype}>\"", "using the dtypes :class:`np.float_` and :attr:`dtype`, respectively. \"\"\" return self.__moment_dtype", "self.cov, var=lambda: self.var.reshape(newshape), std=lambda: self.std.reshape(newshape), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def", "np.isscalar(value): if not isinstance(value, np.float_): try: value = _utils.as_numpy_scalar(value, dtype=np.float_)", "See documentation of numpy.ndarray.transpose. Returns ------- transposed_rv : The transposed", "set to something like ``lambda x: np.float64(x)``. See Also --------", "the random variable. The logcdf evaluation will be broadcast over", "of the probability density / mass function. The shape of", "T = property(transpose) # Unary arithmetic operations def __neg__(self) ->", "import mul return mul(self, other) def __rmul__(self, other: Any) ->", "if isinstance(mean, np.ndarray): mean.setflags(write=False) return mean @cached_property def cov(self) ->", "the `logpmf` nor the `pmf` of the discrete random variable", "compatible with the original shape. Returns ------- reshaped_rv : ``self``", "force_scalar: try: value = np.asarray(value, dtype=np.float_) except TypeError as err:", "values in which case these values are averaged. For example,", "the correct \" f\"dtype. Expected {dtype.name} but got {value.dtype.name}.\" )", "a `np.float_`, which is not possible for \" f\"{value} of", "instance. \"\"\" return self._random_state @random_state.setter def random_state(self, seed: RandomStateArgType): \"\"\"Get", "(), dtype=self.__moment_dtype, ) # Make immutable if isinstance(cov, np.ndarray): cov.setflags(write=False)", "probability distribution. The parameters of the distribution such as mean,", "np.issubdtype(value.dtype, dtype): raise ValueError( f\"The {name} of the random variable", "\"\"\"Data type of (elements of) a realization of this random", "size: -self.sample(size=size), in_support=lambda x: self.in_support(-x), mode=lambda: -self.mode, median=lambda: -self.median, mean=lambda:", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import pow_ return pow_(other, self) @staticmethod def", "None, quantile: Optional[Callable[[FloatArgType], _ValueType]] = None, mode: Optional[Callable[[], _ValueType]] =", "raise NotImplementedError from exc else: std = self.__std() RandomVariable._check_property_value( \"standard", "the probability density / mass function. The shape of this", ":math:`\\\\mathbb{E}(X)` of the distribution. To learn about the dtype of", "variable. \"\"\" # pylint: disable=line-too-long if self.__cov is None: raise", "for scalar random variables.\" ) if self.__quantile is None: raise", "variable. Parameters ---------- size : tuple Size of the drawn", "all additional dimensions. Returns ------- logp : array-like Value of", "f\"with type `{type(self).__name__}` is implemented.\" ) def logcdf(self, x: _ValueType)", "the `pdf` of the continuous random variable \" f\"object with", "_utils.as_numpy_scalar(p, dtype=np.floating) except TypeError as exc: raise TypeError( \"The given", "dtype=np.floating) except TypeError as exc: raise TypeError( \"The given argument", "def reshape(self, newshape: ShapeArgType) -> \"RandomVariable\": \"\"\" Give a new", "self.__var = var self.__std = std self.__entropy = entropy #", "both only work on :class:`np.float_` arguments, but we still want", "is not None: pmf = np.exp(self.__logpmf(x)) assert isinstance(pmf, np.float_) return", "density function at the given points. \"\"\" if self.__logcdf is", "else: std = self.__std() RandomVariable._check_property_value( \"standard deviation\", std, shape=self.__shape, dtype=self.__moment_dtype,", "Union import numpy as np from probnum import utils as", "realizations from a random variable. Parameters ---------- size : tuple", "dimensions of ``shape``. \"\"\" newshape = _utils.as_shape(newshape) return RandomVariable( shape=newshape,", "if self.__entropy is None: raise NotImplementedError entropy = self.__entropy() entropy", "None, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeType], _ValueType]] =", ": float The mode of the random variable. \"\"\" if", "-> _ValueType: \"\"\" Median of the random variable. To learn", "methods. \"\"\" from typing import Any, Callable, Dict, Generic, Optional,", "return RandomVariable.infer_moment_dtype(value_dtype) @staticmethod def infer_moment_dtype(value_dtype: DTypeArgType) -> np.dtype: return np.promote_types(value_dtype,", "ShapeArgType, ShapeType, ) try: # functools.cached_property is only available in", "of this random variable. dtype : Data type of realizations", "__rmatmul__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "be able to pass Python :class:`float`. Then ``as_value_type`` should be", "of the random variable. The pdf evaluation will be broadcast", "self.__logpdf(self._as_value_type(x)) ) elif self.__pdf is not None: logpdf = np.log(self.__pdf(self._as_value_type(x)))", "Will be called internally to transform the argument of functions", "the dtype of the standard deviation, see :attr:`moment_dtype`. Returns -------", "np.ndarray)) return value class DiscreteRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape: ShapeArgType,", "variable. For instance, for a discrete distribution over the integers,", "defined by RandomVariable instead of elementwise. Thus no array of", "def __neg__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda", "over products of probabilities and values of the random variable,", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mod return mod(self, other)", "of the standard deviation, see :attr:`moment_dtype`. Returns ------- std :", "distribution. The parameters of the distribution such as mean, variance,", "return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: -self.sample(size=size), in_support=lambda x:", "This means that, in general, :math:`Q(0.5)` is not equal to", "`pdf` of the continuous random variable \" f\"object with type", "._arithmetic import matmul return matmul(self, other) def __rmatmul__(self, other: Any)", ") from err else: raise TypeError( f\"The function `{name}` specified", "is None: raise NotImplementedError try: p = _utils.as_numpy_scalar(p, dtype=np.floating) except", "raise NotImplementedError try: p = _utils.as_numpy_scalar(p, dtype=np.floating) except TypeError as", "main in- and outputs of probabilistic numerical methods. \"\"\" from", "np.random), the global np.random state is used. If integer, it", "\" f\"object with type `{type(self).__name__}` is implemented.\" ) class ContinuousRandomVariable(RandomVariable[_ValueType]):", "``logpdf`` (in :class:`ContinuousRandomVariable`), and potentially by similar functions in subclasses.", "is None: raise NotImplementedError cov = self.__cov() RandomVariable._check_property_value( \"covariance\", cov,", "other) def __rmul__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import matmul", "Returns ------- mean : array-like The mean of the distribution.", ": array-like Value of the cumulative density function at the", "format. Will be called internally to transform the argument of", "DTypeArgType) -> np.dtype: return np.promote_types(value_dtype, np.float_) def _as_value_type(self, x: Any)", "Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mul", "is motivated by the mathematical definition of a moment as", "type `{type(x)}`.\" ) return in_support def sample(self, size: ShapeArgType =", "numpy as np from probnum import utils as _utils from", "float The median of the distribution. \"\"\" if self.__shape !=", "\"\"\" return self.__moment_dtype @property def random_state(self) -> RandomStateType: \"\"\"Random state", "return logpmf else: raise NotImplementedError( f\"Neither the `logpmf` nor the", "NotImplementedError try: p = _utils.as_numpy_scalar(p, dtype=np.floating) except TypeError as exc:", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import sub return sub(self, other) def", "return cov @cached_property def var(self) -> _ValueType: \"\"\" Variance :math:`\\\\operatorname{Var}(X)", "def __rdivmod__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "quantile function should return values of the same shape as", "return divmod_(other, self) def __pow__(self, other: Any) -> \"RandomVariable\": #", "used to make certain computations more efficient. As a consequence,", ":class:`RandomVariable` can be added, multiplied, etc. with arrays and linear", "return value \" f\"is of type `{type(x)}`.\" ) return in_support", "cumulative density function at the given points. \"\"\" if self.__cdf", "\"\"\" return RandomVariable( shape=np.empty(shape=self.shape).transpose(*axes).shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).transpose(*axes), mode=lambda:", "pdf evaluation will be broadcast over all additional dimensions. Returns", "a scalar value, but {value} of type \" f\"{type(value)} is", "entropy = self.__entropy() entropy = RandomVariable._ensure_numpy_float( \"entropy\", entropy, force_scalar=True )", "__add__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "are represented as using the dtypes :class:`np.float_` and :attr:`dtype`, respectively.", "in Python >=3.8 from functools import cached_property except ImportError: from", "Optional[Callable[[], _ValueType]] = None, var: Optional[Callable[[], _ValueType]] = None, std:", "`np.float_`, which is not possible for \" f\"{value} of type", "__divmod__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "use a new RandomState instance seeded with seed. \"\"\" self._random_state", "NotImplementedError in_support = self.__in_support(self._as_value_type(x)) if not isinstance(in_support, bool): raise ValueError(", "_ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, cdf: Optional[Callable[[_ValueType],", "random variable self.__mode = mode self.__median = median self.__mean =", "the standard deviation, see :attr:`moment_dtype`. Returns ------- std : array-like", "in_support=lambda x: self.in_support(-x), mode=lambda: -self.mode, median=lambda: -self.median, mean=lambda: -self.mean, cov=lambda:", "typing import Any, Callable, Dict, Generic, Optional, Tuple, TypeVar, Union", "mode @cached_property def median(self) -> _ValueType: \"\"\" Median of the", "From the definition it follows that the quantile function always", "value that can be converted \" f\"to a `np.ndarray` of", "function :math:`Q \\\\colon [0, 1] \\\\to \\\\mathbb{R}` of a random", "def __rtruediv__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "of the random variable. \"\"\" if self.__mode is None: raise", "scalar random variables.\" ) if self.__quantile is None: raise NotImplementedError", "returned quantiles will also be integers. This means that, in", "is not None: return DiscreteRandomVariable._ensure_numpy_float( \"logpmf\", self.__logpmf(self._as_value_type(x)) ) elif self.__pmf", "change their ``distribution`` and not necessarily all previously available methods", "the probability density function. Parameters ---------- x : array-like Evaluation", "values of the random variable, which are represented as using", "dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).transpose(*axes), mode=lambda: self.mode.transpose(*axes), median=lambda: self.median.transpose(*axes), mean=lambda:", "if self.__as_value_type is not None: return self.__as_value_type(x) return x @staticmethod", "= None, ): # Probability mass function self.__pmf = pmf", "= \\\\inf\\\\{ x \\\\in \\\\mathbb{R} \\\\colon p \\\\le F_X(x) \\\\}`,", "if self.__shape != (): raise NotImplementedError( \"The median is only", "methods are retained. The internals of :class:`RandomVariable` objects are assumed", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import matmul return matmul(other, self) def", "= pdf self.__logpdf = logpdf super().__init__( shape=shape, dtype=dtype, random_state=random_state, parameters=parameters,", "-> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mod return", "try: var = np.diag(self.cov).reshape(self.__shape).copy() except NotImplementedError as exc: raise NotImplementedError", "not None: return RandomVariable._ensure_numpy_float( \"logcdf\", self.__logcdf(self._as_value_type(x)) ) elif self.__cdf is", "{value.dtype.name}.\" ) @classmethod def _ensure_numpy_float( cls, name: str, value: Any,", "def __rpow__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "the RandomState object of the underlying distribution. This can be", "argument of functions like ``in_support``, ``cdf`` and ``logcdf``, ``pmf`` and", "arithmetic operations def __neg__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype,", "to be able to pass Python :class:`float`. Then ``as_value_type`` should", "density function. Parameters ---------- x : array-like Evaluation points of", "= cov self.__var = var self.__std = std self.__entropy =", "variable, to an easy-to-process, normalized format. Will be called internally", ":math:`\\\\{ 1, 2, 3, 4 \\\\}` will have a median", "_check_property_value( name: str, value: Any, shape: Optional[Tuple[int, ...]] = None,", "or mass function. Following the predominant convention in mathematics, we", "points. \"\"\" if self.__pdf is not None: return ContinuousRandomVariable._ensure_numpy_float( \"pdf\",", "at the given points. \"\"\" if self.__pdf is not None:", "= None, pmf: Optional[Callable[[_ValueType], np.float_]] = None, logpmf: Optional[Callable[[_ValueType], np.float_]]", "the new dimensions of ``shape``. \"\"\" newshape = _utils.as_shape(newshape) return", "immutable if isinstance(cov, np.ndarray): cov.setflags(write=False) return cov @cached_property def var(self)", "self.in_support(-x), mode=lambda: -self.mode, median=lambda: -self.median, mean=lambda: -self.mean, cov=lambda: self.cov, var=lambda:", "over their whole lifecycle. This is due to the caches", "distribution such as mean, variance, et cetera stored in a", "SN)` is the :attr:`shape` of the random variable. The logpdf", "self.cov, var=lambda: self.var.transpose(*axes), std=lambda: self.std.transpose(*axes), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) T", "sample=sample, in_support=in_support, cdf=cdf, logcdf=logcdf, quantile=quantile, mode=mode, median=median, mean=mean, cov=cov, var=var,", "shape as the \" f\"random variable, i.e. {self.__shape}, but it", "to a `np.float_`, which is not possible for \" f\"{value}", "arithmetic operations allowing expressions like: y = np.array([1, 1]) +", "does not have the correct \" f\"dtype. Expected {dtype.name} but", ") # Binary arithmetic operations __array_ufunc__ = None \"\"\" This", "like: y = np.array([1, 1]) + RV to call the", "= None, dtype: Optional[np.dtype] = None, ): if shape is", ") elif self.__pdf is not None: logpdf = np.log(self.__pdf(self._as_value_type(x))) assert", "variable. If ``object`` will be converted to ``numpy.dtype``. as_value_type :", "isinstance(logcdf, np.float_) return logcdf else: raise NotImplementedError( f\"Neither the `logcdf`", "returned a value \" f\"with dtype `{quantile.dtype.name}`.\" ) return quantile", "a `np.floating` object.\" ) from exc quantile = self.__quantile(p) if", "@staticmethod def _check_property_value( name: str, value: Any, shape: Optional[Tuple[int, ...]]", "and values of the random variable, which are represented as", "mean, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(mean, np.ndarray):", "size : tuple Size of the drawn sample of realizations.", "and :class:`np.float_`. This is motivated by the mathematical definition of", "-> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mul return", "\" f\"shape. Expected {shape} but got {value.shape}.\" ) if dtype", "a `np.ndarray` of type `np.float_`, which is not possible \"", "shape: Optional[Tuple[int, ...]] = None, dtype: Optional[np.dtype] = None, ):", "f\"Neither the `logpdf` nor the `pdf` of the continuous random", "return pow_(other, self) @staticmethod def infer_median_dtype(value_dtype: DTypeArgType) -> np.dtype: return", "necessarily all previously available methods are retained. The internals of", "entropy, force_scalar=True ) return entropy def in_support(self, x: _ValueType) ->", "variables.\" ) median = self.__median() RandomVariable._check_property_value( \"median\", median, shape=self.__shape, dtype=self.__median_dtype,", "ndim(self) -> int: return len(self.__shape) @cached_property def size(self) -> int:", "f\"with dtype `{quantile.dtype.name}`.\" ) return quantile def __getitem__(self, key: ArrayLikeGetitemArgType)", "kernels of the random variable. \"\"\" # pylint: disable=line-too-long if", "return mode @cached_property def median(self) -> _ValueType: \"\"\" Median of", "dtype(self) -> np.dtype: \"\"\"Data type of (elements of) a realization", "of the distribution. \"\"\" if self.__mean is None: raise NotImplementedError", "\"\"\" Median of the random variable. To learn about the", "is used. If integer, it is used to seed the", "scalar value, but {value} of type \" f\"{type(value)} is not", "= None, sample: Optional[Callable[[ShapeArgType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]]", "def size(self) -> int: return int(np.prod(self.__shape)) @property def dtype(self) ->", "Python >=3.8 from functools import cached_property except ImportError: from cached_property", "f\"dtype. Expected {dtype.name} but got {value.dtype.name}.\" ) @classmethod def _ensure_numpy_float(", "deviation of the distribution. \"\"\" if self.__std is None: try:", "by probabilistic numerical methods. Every probabilistic numerical method takes a", "self.ndim > 0 else (), dtype=self.__moment_dtype, ) # Make immutable", "# pylint: disable=too-many-arguments,too-many-locals \"\"\"Create a new random variable.\"\"\" self.__shape =", "import floordiv return floordiv(other, self) def __mod__(self, other: Any) ->", "{type(value)}.\" ) from err else: raise TypeError( f\"The function `{name}`", "attribute defines the RandomState object to use for drawing realizations", "`{type(self).__name__}` is implemented.\" ) class ContinuousRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape:", "None, cov: Optional[Callable[[], _ValueType]] = None, var: Optional[Callable[[], _ValueType]] =", "variable. To learn about the dtype of the covariance, see", "moment as a sum or an integral over products of", "ValueError( f\"The function `in_support` must return a `bool`, but its", "but it returned a value \" f\"with dtype `{quantile.dtype.name}`.\" )", "with type `{type(self).__name__}` is implemented.\" ) class ContinuousRandomVariable(RandomVariable[_ValueType]): def __init__(", "\"\"\" Natural logarithm of the probability density function. Parameters ----------", "other) def __rpow__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "Python :class:`float`. Then ``as_value_type`` should be set to something like", "assert isinstance(cdf, np.float_) return cdf else: raise NotImplementedError( f\"Neither the", ": float The median of the distribution. \"\"\" if self.__shape", "given ``size`` and the inherent ``shape``. \"\"\" if self.__sample is", "Size of the drawn sample of realizations. Returns ------- sample", "Returns ------- mode : float The mode of the random", "cov=lambda: self.cov, var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type, ) def __pos__(self)", "random variable. Returns ------- mode : float The mode of", "__pow__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "type `{type(self).__name__}` is implemented.\" ) class ContinuousRandomVariable(RandomVariable[_ValueType]): def __init__( self,", "realizations of the random variable.\"\"\" return self.__shape @cached_property def ndim(self)", "of \" f\"`{cls.__name__}` must return a scalar value that can", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import truediv return truediv(other,", "dtypes :class:`np.float_` and :attr:`dtype`, respectively. \"\"\" return self.__moment_dtype @property def", "import pow_ return pow_(other, self) @staticmethod def infer_median_dtype(value_dtype: DTypeArgType) ->", "of probabilities and values of the random variable, which are", "function at the given points. \"\"\" if self.__logpdf is not", "# Utilities self.__as_value_type = as_value_type def __repr__(self) -> str: return", "entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) T = property(transpose) # Unary arithmetic", "median self.__mean = mean self.__cov = cov self.__var = var", "force_scalar: bool = False ) -> Union[np.float_, np.ndarray]: if np.isscalar(value):", "._arithmetic import mod return mod(other, self) def __divmod__(self, other: Any)", "used by methods in ProbNum have Dirac or Gaussian measure.", "Parameters ---------- shape : Shape of realizations of this random", "dtype : Data type of realizations of this random variable.", "This is motivated by the mathematical definition of a moment", "self.__dtype: raise ValueError( f\"The quantile function should return values of", "additional dimensions. Returns ------- q : array-like Value of the", "+self.mean, cov=lambda: self.cov, var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type, ) def", "f\"<{self.shape} {self.__class__.__name__} with dtype={self.dtype}>\" @property def shape(self) -> ShapeType: \"\"\"Shape", "self.__logpdf is not None: return ContinuousRandomVariable._ensure_numpy_float( \"logpdf\", self.__logpdf(self._as_value_type(x)) ) elif", "\"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: abs(self.sample(size=size)), )", "= _utils.as_random_state(seed) @property def parameters(self) -> Dict[str, Any]: \"\"\" Parameters", "is the :meth:`cdf` of the random variable. From the definition", "np.ndarray): mode.setflags(write=False) return mode @cached_property def median(self) -> _ValueType: \"\"\"", "variable. \"\"\" return RandomVariable( shape=np.empty(shape=self.shape).transpose(*axes).shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).transpose(*axes),", "not None else {} self.__sample = sample self.__in_support = in_support", "random variable. The pdf evaluation will be broadcast over all", "f\"Neither the `pmf` nor the `logpmf` of the discrete random", "@random_state.setter def random_state(self, seed: RandomStateArgType): \"\"\"Get or set the RandomState", "newshape = _utils.as_shape(newshape) return RandomVariable( shape=newshape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size:", "is None: raise NotImplementedError mean = self.__mean() RandomVariable._check_property_value( \"mean\", mean,", "of type `np.float_`, which is not possible \" f\"for {value}", "self.mean.transpose(*axes), cov=lambda: self.cov, var=lambda: self.var.transpose(*axes), std=lambda: self.std.transpose(*axes), entropy=lambda: self.entropy, as_value_type=self.__as_value_type,", "array-like Evaluation points of the log-probability density/mass function. The shape", "self.__quantile is None: raise NotImplementedError try: p = _utils.as_numpy_scalar(p, dtype=np.floating)", "make certain computations more efficient. As a consequence, altering the", "x: Any) -> _ValueType: if self.__as_value_type is not None: return", "or set the RandomState object of the underlying distribution. This", "np.asarray(value, dtype=np.float_) except TypeError as err: raise TypeError( f\"The function", "\"entropy\", entropy, force_scalar=True ) return entropy def in_support(self, x: _ValueType)", ") assert isinstance(value, (np.float_, np.ndarray)) return value class DiscreteRandomVariable(RandomVariable[_ValueType]): def", "variable self.__mode = mode self.__median = median self.__mean = mean", "not possible for \" f\"{value} of type {type(value)}.\" ) from", "the random variable, e.g. its :attr:`mean`, :attr:`cov`, :attr:`var`, or :attr:`std`.", "underlying distribution. This can be either None or an existing", "probabilistic numerical method is: ``output_rv = probnum_method(input_rv, method_params)`` In practice,", "learn about the dtype of the mean, see :attr:`moment_dtype`. Returns", "f\"is of type `{type(x)}`.\" ) return in_support def sample(self, size:", "x: _ValueType) -> np.float_: if self.__logpmf is not None: return", "self.__cdf is not None: logcdf = np.log(self.__cdf(x)) assert isinstance(logcdf, np.float_)", "np.float_: \"\"\" Log-cumulative distribution function. Parameters ---------- x : array-like", "dtype as the random variable. For instance, for a discrete", "TypeVar, Union import numpy as np from probnum import utils", "mode=lambda: self.mode.reshape(newshape), median=lambda: self.median.reshape(newshape), mean=lambda: self.mean.reshape(newshape), cov=lambda: self.cov, var=lambda: self.var.reshape(newshape),", "logcdf: Optional[Callable[[_ValueType], np.float_]] = None, quantile: Optional[Callable[[FloatArgType], _ValueType]] = None,", "dtype=self.__moment_dtype, ) # Make immutable if isinstance(mean, np.ndarray): mean.setflags(write=False) return", "np.float_]] = None, quantile: Optional[Callable[[FloatArgType], _ValueType]] = None, mode: Optional[Callable[[],", "random variable, e.g. its :attr:`mean`, :attr:`cov`, :attr:`var`, or :attr:`std`. It", "return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: abs(self.sample(size=size)), ) #", "the drawn sample of realizations. Returns ------- sample : array-like", "of the probability density function. Parameters ---------- x : array-like", "------- logp : array-like Value of the log-probability density /", "SN)` is the :attr:`shape` of the random variable. The cdf", "internals of :class:`RandomVariable` objects are assumed to be constant over", "variable \" f\"object with type `{type(self).__name__}` is implemented.\" ) class", "is implemented.\" ) def logcdf(self, x: _ValueType) -> np.float_: \"\"\"", "f\"`{cls.__name__}` must return a scalar value that can be \"", "which case these values are averaged. For example, a uniform", "logpdf else: raise NotImplementedError( f\"Neither the `logpdf` nor the `pdf`", ": None, tuple of ints, or n ints See documentation", "entropy(self) -> np.float_: if self.__entropy is None: raise NotImplementedError entropy", "dtype): raise ValueError( f\"The {name} of the random variable does", "not None: return DiscreteRandomVariable._ensure_numpy_float( \"logpmf\", self.__logpmf(self._as_value_type(x)) ) elif self.__pmf is", "len(self.__shape) @cached_property def size(self) -> int: return int(np.prod(self.__shape)) @property def", "np.dtype: \"\"\"The dtype of the :attr:`median`. It will be set", "None, entropy: Optional[Callable[[], np.float_]] = None, ): # Probability density", "-> ShapeType: \"\"\"Shape of realizations of the random variable.\"\"\" return", "return add(self, other) def __radd__(self, other: Any) -> \"RandomVariable\": #", "the random variable. The logpdf evaluation will be broadcast over", "dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: abs(self.sample(size=size)), ) # Binary arithmetic operations", "f\"`{cls.__name__}` must return a scalar value, but {value} of type", "Also -------- asrandvar : Transform into a :class:`RandomVariable`. Examples --------", "This module implements random variables. Random variables are the main", "# Data Types self.__dtype = np.dtype(dtype) self.__median_dtype = RandomVariable.infer_median_dtype(self.__dtype) self.__moment_dtype", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mul return mul(self, other)", "Shape of realizations of this random variable. dtype : Data", "\"logpdf\", self.__logpdf(self._as_value_type(x)) ) elif self.__pdf is not None: logpdf =", "is None: raise NotImplementedError(\"No sampling method provided.\") return self.__sample(size=_utils.as_shape(size)) def", "It must be compatible with the original shape. Returns -------", "np.float_: if self.__pmf is not None: return DiscreteRandomVariable._ensure_numpy_float(\"pmf\", self.__pmf(x)) elif", "assert isinstance(pmf, np.float_) return pmf else: raise NotImplementedError( f\"Neither the", "\"\"\"Shape of realizations of the random variable.\"\"\" return self.__shape @cached_property", "outputs a random variable whose distribution encodes the uncertainty arising", "correct \" f\"dtype. Expected {dtype.name} but got {value.dtype.name}.\" ) @classmethod", ") # Make immutable if isinstance(mode, np.ndarray): mode.setflags(write=False) return mode", "sample: Optional[Callable[[ShapeArgType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None,", "the :attr:`shape` of the random variable. The logpdf evaluation will", "self.std.reshape(newshape), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def transpose(self, *axes: int) ->", "only work on :class:`np.float_` arguments, but we still want the", "None, ): # pylint: disable=too-many-arguments,too-many-locals \"\"\"Create a new random variable.\"\"\"", "``logpmf`` (in :class:`DiscreteRandomVariable`), ``pdf`` and ``logpdf`` (in :class:`ContinuousRandomVariable`), and potentially", "the cumulative distribution function. The shape of this argument should", "instance, for a discrete distribution over the integers, the returned", "cov=cov, var=var, std=std, entropy=entropy, ) def pdf(self, x: _ValueType) ->", "is not None: return ContinuousRandomVariable._ensure_numpy_float( \"logpdf\", self.__logpdf(self._as_value_type(x)) ) elif self.__pdf", "by np.random. If already a RandomState instance, use it. If", "have a median of :math:`2.5`. \"\"\" return self.__median_dtype @property def", "Tuple, TypeVar, Union import numpy as np from probnum import", "`logpmf` of the discrete random variable \" f\"object with type", "median of the distribution. \"\"\" if self.__shape != (): raise", "the distribution. To learn about the dtype of the variance,", "with \" f\"{quantile.shape}.\" ) if quantile.dtype != self.__dtype: raise ValueError(", "will be broadcast over all additional dimensions. Returns ------- p", "\"\"\" if self.__std is None: try: std = np.sqrt(self.var) except", "will have a median of :math:`2.5`. \"\"\" return self.__median_dtype @property", "a random variable. Parameters ---------- size : tuple Size of", "with dtype={self.dtype}>\" @property def shape(self) -> ShapeType: \"\"\"Shape of realizations", "\"\"\" self._random_state = _utils.as_random_state(seed) @property def parameters(self) -> Dict[str, Any]:", "raise NotImplementedError(\"No sampling method provided.\") return self.__sample(size=_utils.as_shape(size)) def cdf(self, x:", "------- q : array-like Value of the log-cumulative density function", "cov @cached_property def var(self) -> _ValueType: \"\"\" Variance :math:`\\\\operatorname{Var}(X) =", "= RandomVariable._ensure_numpy_float( \"entropy\", entropy, force_scalar=True ) return entropy def in_support(self,", "---------- size : tuple Size of the drawn sample of", "= None, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeType], _ValueType]]", "@staticmethod def infer_median_dtype(value_dtype: DTypeArgType) -> np.dtype: return RandomVariable.infer_moment_dtype(value_dtype) @staticmethod def", "\"\"\"Create a new random variable.\"\"\" self.__shape = _utils.as_shape(shape) # Data", ": The transposed random variable. \"\"\" return RandomVariable( shape=np.empty(shape=self.shape).transpose(*axes).shape, dtype=self.dtype,", "ShapeType: \"\"\"Shape of realizations of the random variable.\"\"\" return self.__shape", "shape : Shape of realizations of this random variable. dtype", "raise NotImplementedError( f\"Neither the `logcdf` nor the `cdf` of the", "dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).reshape(size + newshape), mode=lambda: self.mode.reshape(newshape), median=lambda:", "-> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import matmul return", "not None: return self.__as_value_type(x) return x @staticmethod def _check_property_value( name:", "Optional[Callable[[_ValueType], np.float_]] = None, logpdf: Optional[Callable[[_ValueType], np.float_]] = None, cdf:", "= None, in_support: Optional[Callable[[_ValueType], bool]] = None, pmf: Optional[Callable[[_ValueType], np.float_]]", "learn about the dtype of the covariance, see :attr:`moment_dtype`. Returns", "None, entropy: Optional[Callable[[], np.float_]] = None, ): # Probability mass", "if self.__logpdf is not None: pdf = np.exp(self.__logpdf(self._as_value_type(x))) assert isinstance(pdf,", "with dtypes :attr:`dtype` and :class:`np.float_`. This is motivated by the", "def __radd__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "_as_value_type(self, x: Any) -> _ValueType: if self.__as_value_type is not None:", "self._random_state = _utils.as_random_state(seed) @property def parameters(self) -> Dict[str, Any]: \"\"\"", "\"\"\" Variance :math:`\\\\operatorname{Var}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))^2)` of the distribution. To learn", "transposed_rv : The transposed random variable. \"\"\" return RandomVariable( shape=np.empty(shape=self.shape).transpose(*axes).shape,", "is only defined for scalar random variables.\" ) if self.__quantile", ": array-like The mean of the distribution. \"\"\" if self.__mean", "not None: cdf = np.exp(self.logcdf(self._as_value_type(x))) assert isinstance(cdf, np.float_) return cdf", "nor the `pdf` of the continuous random variable \" f\"object", "function should return values of the same shape as the", "str: return f\"<{self.shape} {self.__class__.__name__} with dtype={self.dtype}>\" @property def shape(self) ->", "std=std, entropy=entropy, ) def pmf(self, x: _ValueType) -> np.float_: if", "median : float The median of the distribution. \"\"\" if", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import add return add(other, self)", ") elif self.__logcdf is not None: cdf = np.exp(self.logcdf(self._as_value_type(x))) assert", "\"\"\" # pylint: disable=line-too-long if self.__cov is None: raise NotImplementedError", "logpdf = np.log(self.__pdf(self._as_value_type(x))) assert isinstance(logpdf, np.float_) return logpdf else: raise", "def logpmf(self, x: _ValueType) -> np.float_: if self.__logpmf is not", "def in_support(self, x: _ValueType) -> bool: if self.__in_support is None:", "random variables used by methods in ProbNum have Dirac or", "function. The shape of this argument should be :code:`(..., S1,", "-> RandomStateType: \"\"\"Random state of the random variable. This attribute", "have the correct \" f\"shape. Expected {shape} but got {value.shape}.\"", "x : array-like Evaluation points of the log-probability density/mass function.", "type \" f\"{type(value)} is not scalar.\" ) assert isinstance(value, (np.float_,", "pass Python :class:`float`. Then ``as_value_type`` should be set to something", "prior distribution as input and outputs a random variable whose", "\"\"\" if self.__mode is None: raise NotImplementedError mode = self.__mode()", "np.float_) def _as_value_type(self, x: Any) -> _ValueType: if self.__as_value_type is", "Parameters ---------- x : array-like Evaluation points of the cumulative", "bool]] = None, pdf: Optional[Callable[[_ValueType], np.float_]] = None, logpdf: Optional[Callable[[_ValueType],", "std=lambda: self.std, as_value_type=self.__as_value_type, ) def __abs__(self) -> \"RandomVariable\": return RandomVariable(", "if (``log``)``cdf`` and (``log``)``pdf`` both only work on :class:`np.float_` arguments,", "this argument should be :code:`(..., S1, ..., SN)`, where :code:`(S1,", "self.__cov() RandomVariable._check_property_value( \"covariance\", cov, shape=(self.size, self.size) if self.ndim > 0", "Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import truediv", "mod return mod(self, other) def __rmod__(self, other: Any) -> \"RandomVariable\":", "internally to transform the argument of functions like ``in_support``, ``cdf``", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import sub return sub(self,", "cov=lambda: self.cov, var=lambda: self.var.transpose(*axes), std=lambda: self.std.transpose(*axes), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, )", "but a RandomVariable with the correct shape is returned. \"\"\"", "\"\"\" Parameters of the probability distribution. The parameters of the", "return mul(other, self) def __matmul__(self, other: Any) -> \"RandomVariable\": #", "logp : array-like Value of the log-probability density / mass", "floordiv(self, other) def __rfloordiv__(self, other: Any) -> \"RandomVariable\": # pylint:", ": array-like Evaluation points of the cumulative distribution function. The", "import divmod_ return divmod_(other, self) def __pow__(self, other: Any) ->", "self.__logpdf = logpdf super().__init__( shape=shape, dtype=dtype, random_state=random_state, parameters=parameters, sample=sample, in_support=in_support,", "the main in- and outputs of probabilistic numerical methods. \"\"\"", "= np.array([1, 1]) + RV to call the arithmetic operations", "of a moment as a sum or an integral over", "of the probability density / mass function at the given", "on :class:`np.float_` arguments, but we still want the user to", "(or np.random), the global np.random state is used. If integer,", "def mean(self) -> _ValueType: \"\"\" Mean :math:`\\\\mathbb{E}(X)` of the distribution.", "Dict[str, Any]: \"\"\" Parameters of the probability distribution. The parameters", "for more details and examples. \"\"\" if self.__shape != ():", "SN)`, where :code:`(S1, ..., SN)` is the :attr:`shape` of the", "dimensions. Returns ------- p : array-like Value of the probability", "pylint: disable=too-many-arguments,too-many-locals \"\"\"Create a new random variable.\"\"\" self.__shape = _utils.as_shape(shape)", "variables, the :attr:`median` might lie in between two values in", "et cetera stored in a ``dict``. \"\"\" return self.__parameters.copy() @cached_property", "the global np.random state is used. If integer, it is", "of the cumulative density function at the given points. \"\"\"", "(in :class:`ContinuousRandomVariable`), and potentially by similar functions in subclasses. For", "random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size)[key], mode=lambda: self.mode[key], mean=lambda: self.mean[key], var=lambda: self.var[key],", "given points. \"\"\" if self.__pdf is not None: return ContinuousRandomVariable._ensure_numpy_float(", "an easy-to-process, normalized format. Will be called internally to transform", "`{self.__dtype.name}`, but it returned a value \" f\"with dtype `{quantile.dtype.name}`.\"", "self.entropy, as_value_type=self.__as_value_type, ) T = property(transpose) # Unary arithmetic operations", "RandomVariable._check_property_value( \"mode\", mode, shape=self.__shape, dtype=self.__dtype, ) # Make immutable if", "-self.mean, cov=lambda: self.cov, var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type, ) def", "if not np.issubdtype(value.dtype, dtype): raise ValueError( f\"The {name} of the", "of the random variable, e.g. its :attr:`mean`, :attr:`cov`, :attr:`var`, or", "This can be either None or an existing RandomState object.", ": array-like Evaluation points of the probability density / mass", "self) def __mod__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "= _utils.as_shape(newshape) return RandomVariable( shape=newshape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).reshape(size", "# pylint: disable=too-many-instance-attributes,too-many-public-methods def __init__( self, shape: ShapeArgType, dtype: DTypeArgType,", "\" f\"`{cls.__name__}` must return a scalar value that can be", "def var(self) -> _ValueType: \"\"\" Variance :math:`\\\\operatorname{Var}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))^2)` of", "var.setflags(write=False) return var @cached_property def std(self) -> _ValueType: \"\"\" Standard", "takes a random variable encoding the prior distribution as input", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import matmul return matmul(self, other) def __rmatmul__(self,", "see :attr:`median_dtype`. Returns ------- median : float The median of", "var @cached_property def std(self) -> _ValueType: \"\"\" Standard deviation of", "pdf self.__logpdf = logpdf super().__init__( shape=shape, dtype=dtype, random_state=random_state, parameters=parameters, sample=sample,", "operators. This may change their ``distribution`` and not necessarily all", "will be set to the dtype arising from the multiplication", "= self.__in_support(self._as_value_type(x)) if not isinstance(in_support, bool): raise ValueError( f\"The function", "return matmul(self, other) def __rmatmul__(self, other: Any) -> \"RandomVariable\": #", "the uncertainty arising from finite computation. The generic signature of", "self.__quantile = quantile # Properties of the random variable self.__mode", "f\"object with type `{type(self).__name__}` is implemented.\" ) class ContinuousRandomVariable(RandomVariable[_ValueType]): def", "argument should be :code:`(..., S1, ..., SN)`, where :code:`(S1, ...,", "= np.asarray(value, dtype=np.float_) except TypeError as err: raise TypeError( f\"The", "from this random variable. If None (or np.random), the global", "@property def median_dtype(self) -> np.dtype: \"\"\"The dtype of the :attr:`median`.", "self.sample(size)[key], mode=lambda: self.mode[key], mean=lambda: self.mean[key], var=lambda: self.var[key], std=lambda: self.std[key], entropy=lambda:", "used to transform user-supplied arguments, interpreted as realizations of this", "std=lambda: self.std, as_value_type=self.__as_value_type, ) def __pos__(self) -> \"RandomVariable\": return RandomVariable(", "RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: -self.sample(size=size), in_support=lambda x: self.in_support(-x),", "> 0 else (), dtype=self.__moment_dtype, ) # Make immutable if", "matmul return matmul(other, self) def __truediv__(self, other: Any) -> \"RandomVariable\":", "not necessarily all previously available methods are retained. The internals", "self.std[key], entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def reshape(self, newshape: ShapeArgType) ->", "Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import pow_", "immutable if isinstance(std, np.ndarray): std.setflags(write=False) return std @cached_property def entropy(self)", "function always returns values of the same dtype as the", "should be set to something like ``lambda x: np.float64(x)``. See", "got {value.shape}.\" ) if dtype is not None: if not", "ShapeArgType = ()) -> _ValueType: \"\"\" Draw realizations from a", "not None: if value.shape != shape: raise ValueError( f\"The {name}", "raise NotImplementedError entropy = self.__entropy() entropy = RandomVariable._ensure_numpy_float( \"entropy\", entropy,", "still want the user to be able to pass Python", "..., SN)` is the :attr:`shape` of the random variable. The", "NotImplementedError mean = self.__mean() RandomVariable._check_property_value( \"mean\", mean, shape=self.__shape, dtype=self.__moment_dtype, )", "self.__logcdf = logcdf self.__quantile = quantile # Properties of the", "the given points. \"\"\" if self.__logcdf is not None: return", "self.__shape != (): raise NotImplementedError( \"The quantile function is only", "self.__entropy is None: raise NotImplementedError entropy = self.__entropy() entropy =", "_ValueType) -> np.float_: \"\"\" Natural logarithm of the probability density", "RandomState singleton used by np.random. If already a RandomState instance,", "from ._arithmetic import add return add(other, self) def __sub__(self, other:", "@classmethod def _ensure_numpy_float( cls, name: str, value: Any, force_scalar: bool", "None: return ContinuousRandomVariable._ensure_numpy_float( \"logpdf\", self.__logpdf(self._as_value_type(x)) ) elif self.__pdf is not", "be used to transform user-supplied arguments, interpreted as realizations of", "`cdf` of the random variable object \" f\"with type `{type(self).__name__}`", "integer, it is used to seed the local :class:`~numpy.random.RandomState` instance.", "to call the arithmetic operations defined by RandomVariable instead of", "distribution. \"\"\" if self.__std is None: try: std = np.sqrt(self.var)", "numerical method takes a random variable encoding the prior distribution", ":class:`RandomVariable` or any of its descendants. Parameters ---------- shape :", "used by np.random. If already a RandomState instance, use it.", ") try: # functools.cached_property is only available in Python >=3.8", "\"\"\" if self.__mean is None: raise NotImplementedError mean = self.__mean()", ":attr:`shape` of the random variable. The logcdf evaluation will be", "practice, most random variables used by methods in ProbNum have", ":attr:`dtype` and :class:`np.float_`. This is motivated by the fact that,", "-> \"RandomVariable\": \"\"\" Give a new shape to a random", "RandomVariable._check_property_value( \"mean\", mean, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if", "-> np.float_: \"\"\" Log-cumulative distribution function. Parameters ---------- x :", "__rsub__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "as exc: raise TypeError( \"The given argument `p` can not", "mass function. The shape of this argument should be :code:`(...,", ":attr:`moment_dtype`. Returns ------- var : array-like The variance of the", "the random variable self.__parameters = parameters.copy() if parameters is not", "at the given points. \"\"\" if self.__logcdf is not None:", "\\\\to [0, 1]` is the :meth:`cdf` of the random variable.", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import floordiv return floordiv(other, self) def", "__init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state: RandomStateArgType = None,", "distribution. To learn about the dtype of the mean, see", "either None or an existing RandomState object. If None (or", "NotImplementedError( f\"Neither the `cdf` nor the `logcdf` of the random", "correct \" f\"shape. Expected {shape} but got {value.shape}.\" ) if", "with the new dimensions of ``shape``. \"\"\" newshape = _utils.as_shape(newshape)", "isinstance(cov, np.ndarray): cov.setflags(write=False) return cov @cached_property def var(self) -> _ValueType:", "but got {value.shape}.\" ) if dtype is not None: if", "set the RandomState object of the underlying distribution. This can", "self.size) if self.ndim > 0 else (), dtype=self.__moment_dtype, ) #", "if parameters is not None else {} self.__sample = sample", "of a probabilistic numerical method is: ``output_rv = probnum_method(input_rv, method_params)``", "can be either None or an existing RandomState object. If", "var=lambda: self.var[key], std=lambda: self.std[key], entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def reshape(self,", "of type {type(value)}.\" ) from err else: raise TypeError( f\"The", "self.std, as_value_type=self.__as_value_type, ) def __abs__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape,", "variable.\"\"\" return self.__dtype @property def median_dtype(self) -> np.dtype: \"\"\"The dtype", ": array-like The standard deviation of the distribution. \"\"\" if", "sub(self, other) def __rsub__(self, other: Any) -> \"RandomVariable\": # pylint:", "np.float_]] = None, ): # Probability density function self.__pdf =", "in between two values in which case these values are", "None: if value.shape != shape: raise ValueError( f\"The {name} of", "variable.\"\"\" return self.__shape @cached_property def ndim(self) -> int: return len(self.__shape)", ":class:`RandomVariable` (e.g. its mean, cov, sampling function, etc.) will result", "but it returned a value with \" f\"{quantile.shape}.\" ) if", "quantile: Optional[Callable[[FloatArgType], _ValueType]] = None, mode: Optional[Callable[[], _ValueType]] = None,", "arithmetic operations defined by RandomVariable instead of elementwise. Thus no", "-> Union[np.float_, np.ndarray]: if np.isscalar(value): if not isinstance(value, np.float_): try:", "pow_(other, self) @staticmethod def infer_median_dtype(value_dtype: DTypeArgType) -> np.dtype: return RandomVariable.infer_moment_dtype(value_dtype)", "the mathematical definition of a moment as a sum or", ":class:`np.float_` and :attr:`dtype`, respectively. \"\"\" return self.__moment_dtype @property def random_state(self)", "of a random variable :math:`X` is defined as :math:`Q(p) =", "if quantile.dtype != self.__dtype: raise ValueError( f\"The quantile function should", "Parameters ---------- size : tuple Size of the drawn sample", "density / mass function. The shape of this argument should", "from functools import cached_property except ImportError: from cached_property import cached_property", "\"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: +self.sample(size=size), in_support=lambda", "the \" f\"random variable, i.e. {self.__shape}, but it returned a", "new shape to a random variable. Parameters ---------- newshape :", "(function of a) moment of the random variable, e.g. its", ":class:`RandomVariable` objects are assumed to be constant over their whole", "self.__cov is None: raise NotImplementedError cov = self.__cov() RandomVariable._check_property_value( \"covariance\",", "@cached_property def var(self) -> _ValueType: \"\"\" Variance :math:`\\\\operatorname{Var}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))^2)`", ") return in_support def sample(self, size: ShapeArgType = ()) ->", "None: return ContinuousRandomVariable._ensure_numpy_float( \"pdf\", self.__pdf(self._as_value_type(x)) ) if self.__logpdf is not", "= self.__var() RandomVariable._check_property_value( \"variance\", var, shape=self.__shape, dtype=self.__moment_dtype, ) # Make", "-self.median, mean=lambda: -self.mean, cov=lambda: self.cov, var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type,", "assert isinstance(pdf, np.float_) return pdf raise NotImplementedError( f\"Neither the `pdf`", "is due to the caches used to make certain computations", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import divmod_ return divmod_(self, other) def", "added, multiplied, etc. with arrays and linear operators. This may", "random_state=random_state, parameters=parameters, sample=sample, in_support=in_support, cdf=cdf, logcdf=logcdf, quantile=quantile, mode=mode, median=median, mean=mean,", "@cached_property def median(self) -> _ValueType: \"\"\" Median of the random", "variance of the distribution. \"\"\" if self.__var is None: try:", "the correct \" f\"shape. Expected {shape} but got {value.shape}.\" )", "Returns ------- logp : array-like Value of the log-probability density", "Random variables are the main objects used by probabilistic numerical", "a :class:`RandomVariable` (e.g. its mean, cov, sampling function, etc.) will", "= var self.__std = std self.__entropy = entropy # Utilities", "RandomVariable._ensure_numpy_float( \"entropy\", entropy, force_scalar=True ) return entropy def in_support(self, x:", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mul return mul(self,", "import sub return sub(self, other) def __rsub__(self, other: Any) ->", "constructor of \" f\"`{cls.__name__}` must return a value that can", "of the distribution. \"\"\" if self.__var is None: try: var", "None: return DiscreteRandomVariable._ensure_numpy_float( \"logpmf\", self.__logpmf(self._as_value_type(x)) ) elif self.__pmf is not", "import Any, Callable, Dict, Generic, Optional, Tuple, TypeVar, Union import", "\" f\"object with type `{type(self).__name__}` is implemented.\" ) def logpmf(self,", "def __rmatmul__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "new random variable.\"\"\" self.__shape = _utils.as_shape(shape) # Data Types self.__dtype", "also be integers. This means that, in general, :math:`Q(0.5)` is", "Parameters ---------- axes : None, tuple of ints, or n", "to an easy-to-process, normalized format. Will be called internally to", "as it is defined in this class. See https://en.wikipedia.org/wiki/Quantile_function for", "# Make immutable if isinstance(median, np.ndarray): median.setflags(write=False) return median @cached_property", "= self.__quantile(p) if quantile.shape != self.__shape: raise ValueError( f\"The quantile", "linear operators. This may change their ``distribution`` and not necessarily", "is not None: return RandomVariable._ensure_numpy_float( \"cdf\", self.__cdf(self._as_value_type(x)) ) elif self.__logcdf", "elif not force_scalar: try: value = np.asarray(value, dtype=np.float_) except TypeError", "2, 3, 4 \\\\}` will have a median of :math:`2.5`.", "be called internally to transform the argument of functions like", "dtypes :attr:`dtype` and :class:`np.float_`. This is motivated by the mathematical", "to seed the local :class:`~numpy.random.RandomState` instance. \"\"\" return self._random_state @random_state.setter", "return mod(other, self) def __divmod__(self, other: Any) -> \"RandomVariable\": #", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import divmod_ return divmod_(self, other) def __rdivmod__(self,", "__matmul__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "import pow_ return pow_(self, other) def __rpow__(self, other: Any) ->", ":class:`np.float_`. This is motivated by the fact that, even for", "pmf self.__logpmf = logpmf super().__init__( shape=shape, dtype=dtype, random_state=random_state, parameters=parameters, sample=sample,", "= None, ): if shape is not None: if value.shape", "The cdf evaluation will be broadcast over all additional dimensions.", "uniform random variable on :math:`\\\\{ 1, 2, 3, 4 \\\\}`", "Union[np.float_, np.ndarray]: if np.isscalar(value): if not isinstance(value, np.float_): try: value", "`pmf` nor the `logpmf` of the discrete random variable \"", "if self.__mode is None: raise NotImplementedError mode = self.__mode() RandomVariable._check_property_value(", "exc quantile = self.__quantile(p) if quantile.shape != self.__shape: raise ValueError(", "a scalar value that can be \" f\"converted to a", "if self.__cov is None: raise NotImplementedError cov = self.__cov() RandomVariable._check_property_value(", "1] \\\\to \\\\mathbb{R}` of a random variable :math:`X` is defined", "np.random), use the RandomState singleton used by np.random. If already", "cached_property except ImportError: from cached_property import cached_property _ValueType = TypeVar(\"ValueType\")", "\"\"\" if self.__shape != (): raise NotImplementedError( \"The quantile function", "is implemented.\" ) class ContinuousRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape: ShapeArgType,", "random_state: RandomStateArgType = None, parameters: Optional[Dict[str, Any]] = None, sample:", "be converted \" f\"to a `np.ndarray` of type `np.float_`, which", ") def reshape(self, newshape: ShapeArgType) -> \"RandomVariable\": \"\"\" Give a", "Optional[Callable[[], _ValueType]] = None, median: Optional[Callable[[], _ValueType]] = None, mean:", "= None, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeArgType], _ValueType]]", "\"\"\" Standard deviation of the distribution. To learn about the", "-> np.float_: if self.__entropy is None: raise NotImplementedError entropy =", "or tuple of ints New shape for the random variable.", "self.__logcdf is not None: cdf = np.exp(self.logcdf(self._as_value_type(x))) assert isinstance(cdf, np.float_)", ":attr:`moment_dtype`. Returns ------- cov : array-like The kernels of the", "(or np.random), use the RandomState singleton used by np.random. If", "NotImplementedError( f\"Neither the `pdf` nor the `logpdf` of the continuous", "broadcast over all additional dimensions. Returns ------- logp : array-like", "F_X(x) \\\\}`, where :math:`F_X \\\\colon \\\\mathbb{R} \\\\to [0, 1]` is", "all additional dimensions. Returns ------- q : array-like Value of", "-> _ValueType: \"\"\" Standard deviation of the distribution. To learn", "------- cov : array-like The kernels of the random variable.", "be :code:`(..., S1, ..., SN)`, where :code:`(S1, ..., SN)` is", "NotImplementedError from exc else: var = self.__var() RandomVariable._check_property_value( \"variance\", var,", "`{type(self).__name__}` is implemented.\" ) def quantile(self, p: FloatArgType) -> _ValueType:", "# pylint: disable=line-too-long if self.__cov is None: raise NotImplementedError cov", "be kept in mind when subclassing :class:`RandomVariable` or any of", "defined for scalar random variables.\" ) median = self.__median() RandomVariable._check_property_value(", "`pdf` nor the `logpdf` of the continuous random variable \"", "{dtype.name} but got {value.dtype.name}.\" ) @classmethod def _ensure_numpy_float( cls, name:", "transform user-supplied arguments, interpreted as realizations of this random variable,", "internal state of a :class:`RandomVariable` (e.g. its mean, cov, sampling", "mul return mul(other, self) def __matmul__(self, other: Any) -> \"RandomVariable\":", "nor the `logcdf` of the random variable object \" f\"with", "mode(self) -> _ValueType: \"\"\" Mode of the random variable. Returns", "def _as_value_type(self, x: Any) -> _ValueType: if self.__as_value_type is not", "{} self.__sample = sample self.__in_support = in_support self.__cdf = cdf", "from exc else: std = self.__std() RandomVariable._check_property_value( \"standard deviation\", std,", "as_value_type=self.__as_value_type, ) def reshape(self, newshape: ShapeArgType) -> \"RandomVariable\": \"\"\" Give", "numpy from calling elementwise arithmetic operations allowing expressions like: y", "density function at the given points. \"\"\" if self.__cdf is", "of the discrete random variable \" f\"object with type `{type(self).__name__}`", "return RandomVariable( shape=np.empty(shape=self.shape)[key].shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size)[key], mode=lambda: self.mode[key],", "is not None: if value.shape != shape: raise ValueError( f\"The", "infer_moment_dtype(value_dtype: DTypeArgType) -> np.dtype: return np.promote_types(value_dtype, np.float_) def _as_value_type(self, x:", ":math:`X` is defined as :math:`Q(p) = \\\\inf\\\\{ x \\\\in \\\\mathbb{R}", "whose distribution encodes the uncertainty arising from finite computation. The", "= None, as_value_type: Optional[Callable[[Any], _ValueType]] = None, ): # pylint:", "= self.__median() RandomVariable._check_property_value( \"median\", median, shape=self.__shape, dtype=self.__median_dtype, ) # Make", "np.float_) return logpmf else: raise NotImplementedError( f\"Neither the `logpmf` nor", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import matmul return matmul(other,", "log-cumulative density function at the given points. \"\"\" if self.__logcdf", "over the integers, the returned quantiles will also be integers.", "divmod_(self, other) def __rdivmod__(self, other: Any) -> \"RandomVariable\": # pylint:", "entropy: Optional[Callable[[], np.float_]] = None, ): # Probability mass function", "__truediv__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "if self.__pdf is not None: return ContinuousRandomVariable._ensure_numpy_float( \"pdf\", self.__pdf(self._as_value_type(x)) )", "Following the predominant convention in mathematics, we express pdfs with", "_ValueType: \"\"\" Covariance :math:`\\\\operatorname{Cov}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))(X-\\\\mathbb{E}(X))^\\\\top)` of the random variable.", "dtype=self.__median_dtype, ) # Make immutable if isinstance(median, np.ndarray): median.setflags(write=False) return", "NotImplementedError as exc: raise NotImplementedError from exc else: std =", "`logpdf` nor the `pdf` of the continuous random variable \"", "return RandomVariable( shape=newshape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).reshape(size + newshape),", "._arithmetic import matmul return matmul(other, self) def __truediv__(self, other: Any)", "type `{type(self).__name__}` is implemented.\" ) def logpmf(self, x: _ValueType) ->", "Value of the cumulative density function at the given points.", "The shape of this argument should be :code:`(..., S1, ...,", "provided.\") return self.__sample(size=_utils.as_shape(size)) def cdf(self, x: _ValueType) -> np.float_: \"\"\"", "= None, cov: Optional[Callable[[], _ValueType]] = None, var: Optional[Callable[[], _ValueType]]", "= None, entropy: Optional[Callable[[], np.float_]] = None, as_value_type: Optional[Callable[[Any], _ValueType]]", "@property def shape(self) -> ShapeType: \"\"\"Shape of realizations of the", "the RandomState object to use for drawing realizations from this", "quantile = self.__quantile(p) if quantile.shape != self.__shape: raise ValueError( f\"The", "as a sum or an integral over products of probabilities", "value: Any, force_scalar: bool = False ) -> Union[np.float_, np.ndarray]:", "\\\\colon \\\\mathbb{R} \\\\to [0, 1]` is the :meth:`cdf` of the", "as_value_type=self.__as_value_type, ) def __abs__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype,", "= ()) -> _ValueType: \"\"\" Draw realizations from a random", "assert isinstance(logpdf, np.float_) return logpdf else: raise NotImplementedError( f\"Neither the", "always returns values of the same dtype as the random", "._arithmetic import sub return sub(other, self) def __mul__(self, other: Any)", "probabilities and values of the random variable, which are represented", "the argument of functions like ``in_support``, ``cdf`` and ``logcdf``, ``pmf``", "etc. with arrays and linear operators. This may change their", "The mean of the distribution. \"\"\" if self.__mean is None:", "method provided.\") return self.__sample(size=_utils.as_shape(size)) def cdf(self, x: _ValueType) -> np.float_:", "integers. This means that, in general, :math:`Q(0.5)` is not equal", ") from exc quantile = self.__quantile(p) if quantile.shape != self.__shape:", "truediv return truediv(other, self) def __floordiv__(self, other: Any) -> \"RandomVariable\":", "and (``log``)``pdf`` both only work on :class:`np.float_` arguments, but we", "None, ): if shape is not None: if value.shape !=", "mean=mean, cov=cov, var=var, std=std, entropy=entropy, ) def pmf(self, x: _ValueType)", "the user to be able to pass Python :class:`float`. Then", "-> str: return f\"<{self.shape} {self.__class__.__name__} with dtype={self.dtype}>\" @property def shape(self)", "f\"Neither the `logcdf` nor the `cdf` of the random variable", "https://en.wikipedia.org/wiki/Quantile_function for more details and examples. \"\"\" if self.__shape !=", "implemented.\" ) def logpdf(self, x: _ValueType) -> np.float_: \"\"\" Natural", "particular, this should be kept in mind when subclassing :class:`RandomVariable`", "logcdf evaluation will be broadcast over all additional dimensions. Returns", "asrandvar : Transform into a :class:`RandomVariable`. Examples -------- \"\"\" #", "Optional[Callable[[], _ValueType]] = None, entropy: Optional[Callable[[], np.float_]] = None, ):", "implemented.\" ) def logpmf(self, x: _ValueType) -> np.float_: if self.__logpmf", "self.__sample(size=_utils.as_shape(size)) def cdf(self, x: _ValueType) -> np.float_: \"\"\" Cumulative distribution", "RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: +self.sample(size=size), in_support=lambda x: self.in_support(+x),", "The kernels of the random variable. \"\"\" # pylint: disable=line-too-long", "= None, var: Optional[Callable[[], _ValueType]] = None, std: Optional[Callable[[], _ValueType]]", "object \" f\"with type `{type(self).__name__}` is implemented.\" ) def quantile(self,", "be added, multiplied, etc. with arrays and linear operators. This", "be \" f\"converted to a `np.float_`, which is not possible", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import truediv return truediv(other, self)", "def __matmul__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", ":attr:`median` as it is defined in this class. See https://en.wikipedia.org/wiki/Quantile_function", "It will be set to the dtype arising from the", "the \" f\"random variable, i.e. `{self.__dtype.name}`, but it returned a", "if self.__sample is None: raise NotImplementedError(\"No sampling method provided.\") return", "\"\"\" Transpose the random variable. Parameters ---------- axes : None,", "and examples. \"\"\" if self.__shape != (): raise NotImplementedError( \"The", "int: return int(np.prod(self.__shape)) @property def dtype(self) -> np.dtype: \"\"\"Data type", "bool): raise ValueError( f\"The function `in_support` must return a `bool`,", "assert isinstance(value, (np.float_, np.ndarray)) return value class DiscreteRandomVariable(RandomVariable[_ValueType]): def __init__(", "in mathematics, we express pdfs with respect to the Lebesgue", "self.mean[key], var=lambda: self.var[key], std=lambda: self.std[key], entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def", "is defined in this class. See https://en.wikipedia.org/wiki/Quantile_function for more details", "None: logpmf = np.log(self.__pmf(self._as_value_type(x))) assert isinstance(logpmf, np.float_) return logpmf else:", "are the main in- and outputs of probabilistic numerical methods.", "def infer_moment_dtype(value_dtype: DTypeArgType) -> np.dtype: return np.promote_types(value_dtype, np.float_) def _as_value_type(self,", "median of :math:`2.5`. \"\"\" return self.__median_dtype @property def moment_dtype(self) ->", "of the random variable. To learn about the dtype of", "None, as_value_type: Optional[Callable[[Any], _ValueType]] = None, ): # pylint: disable=too-many-arguments,too-many-locals", "var = np.diag(self.cov).reshape(self.__shape).copy() except NotImplementedError as exc: raise NotImplementedError from", "self.__sample = sample self.__in_support = in_support self.__cdf = cdf self.__logcdf", "\"\"\" def __add__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "returned. \"\"\" def __add__(self, other: Any) -> \"RandomVariable\": # pylint:", "\"\"\" from typing import Any, Callable, Dict, Generic, Optional, Tuple,", "use the RandomState singleton used by np.random. If already a", "to pass Python :class:`float`. Then ``as_value_type`` should be set to", "of the probability distribution. The parameters of the distribution such", "in subclasses. For instance, this method is useful if (``log``)``cdf``", "np.sqrt(self.var) except NotImplementedError as exc: raise NotImplementedError from exc else:", "self.__std is None: try: std = np.sqrt(self.var) except NotImplementedError as", "-> np.dtype: return np.promote_types(value_dtype, np.float_) def _as_value_type(self, x: Any) ->", "self.__logpmf is not None: pmf = np.exp(self.__logpmf(x)) assert isinstance(pmf, np.float_)", "of this random variable.\"\"\" return self.__dtype @property def median_dtype(self) ->", "the random variable. The pdf evaluation will be broadcast over", "e.g. integer-valued random variables, the :attr:`median` might lie in between", "otherwise. Parameters ---------- x : array-like Evaluation points of the", "may change their ``distribution`` and not necessarily all previously available", "return self.__dtype @property def median_dtype(self) -> np.dtype: \"\"\"The dtype of", "of the random variable. This attribute defines the RandomState object", "is the :attr:`shape` of the random variable. The logpdf evaluation", "entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def reshape(self, newshape: ShapeArgType) -> \"RandomVariable\":", "of this random variable. If ``object`` will be converted to", "type `{type(self).__name__}` is implemented.\" ) def quantile(self, p: FloatArgType) ->", "median=lambda: +self.median, mean=lambda: +self.mean, cov=lambda: self.cov, var=lambda: self.var, std=lambda: self.std,", "shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: abs(self.sample(size=size)), ) # Binary arithmetic", "# Binary arithmetic operations __array_ufunc__ = None \"\"\" This prevents", "from calling elementwise arithmetic operations allowing expressions like: y =", "The mode of the random variable. \"\"\" if self.__mode is", "_ValueType: \"\"\"Quantile function. The quantile function :math:`Q \\\\colon [0, 1]", "import sub return sub(other, self) def __mul__(self, other: Any) ->", "= None, mode: Optional[Callable[[], _ValueType]] = None, median: Optional[Callable[[], _ValueType]]", "variable does not have the correct \" f\"shape. Expected {shape}", "np.ndarray): cov.setflags(write=False) return cov @cached_property def var(self) -> _ValueType: \"\"\"", "cached_property _ValueType = TypeVar(\"ValueType\") class RandomVariable(Generic[_ValueType]): \"\"\" Random variables are", "unless stated otherwise. Parameters ---------- x : array-like Evaluation points", "result in undefined behavior. In particular, this should be kept", "return sub(other, self) def __mul__(self, other: Any) -> \"RandomVariable\": #", "\\\\}`, where :math:`F_X \\\\colon \\\\mathbb{R} \\\\to [0, 1]` is the", "sampling function, etc.) will result in undefined behavior. In particular,", "`logpmf` nor the `pmf` of the discrete random variable \"", "arguments, interpreted as realizations of this random variable, to an", "`in_support` must return a `bool`, but its return value \"", "median=lambda: self.median.reshape(newshape), mean=lambda: self.mean.reshape(newshape), cov=lambda: self.cov, var=lambda: self.var.reshape(newshape), std=lambda: self.std.reshape(newshape),", "object. If None (or np.random), use the RandomState singleton used", "Evaluation points of the cumulative distribution function. The shape of", "\"The quantile function is only defined for scalar random variables.\"", "random variable. \"\"\" if self.__mode is None: raise NotImplementedError mode", "to make certain computations more efficient. As a consequence, altering", "the `logcdf` nor the `cdf` of the random variable object", "mean, variance, et cetera stored in a ``dict``. \"\"\" return", "std = np.sqrt(self.var) except NotImplementedError as exc: raise NotImplementedError from", "x @staticmethod def _check_property_value( name: str, value: Any, shape: Optional[Tuple[int,", "this should be kept in mind when subclassing :class:`RandomVariable` or", "= None, in_support: Optional[Callable[[_ValueType], bool]] = None, cdf: Optional[Callable[[_ValueType], np.float_]]", "---------- shape : Shape of realizations of this random variable.", "var, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(var, np.ndarray):", "f\"{quantile.shape}.\" ) if quantile.dtype != self.__dtype: raise ValueError( f\"The quantile", "self.__mode() RandomVariable._check_property_value( \"mode\", mode, shape=self.__shape, dtype=self.__dtype, ) # Make immutable", "= as_value_type def __repr__(self) -> str: return f\"<{self.shape} {self.__class__.__name__} with", "x: _ValueType) -> bool: if self.__in_support is None: raise NotImplementedError", "assert isinstance(logcdf, np.float_) return logcdf else: raise NotImplementedError( f\"Neither the", "= mean self.__cov = cov self.__var = var self.__std =", "not None: logpmf = np.log(self.__pmf(self._as_value_type(x))) assert isinstance(logpmf, np.float_) return logpmf", "np.random. If already a RandomState instance, use it. If an", "like ``lambda x: np.float64(x)``. See Also -------- asrandvar : Transform", "probabilistic numerical methods. \"\"\" from typing import Any, Callable, Dict,", ": array-like Value of the probability density / mass function", "object.\" ) from exc quantile = self.__quantile(p) if quantile.shape !=", "examples. \"\"\" if self.__shape != (): raise NotImplementedError( \"The quantile", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import divmod_ return divmod_(other,", "variable, i.e. {self.__shape}, but it returned a value with \"", "`logcdf` of the random variable object \" f\"with type `{type(self).__name__}`", "/ mass function at the given points. \"\"\" if self.__logpdf", "# Probability mass function self.__pmf = pmf self.__logpmf = logpmf", "variable, which are represented as using the dtypes :class:`np.float_` and", "Optional[Callable[[ShapeType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, cdf:", "implements random variables. Random variables are the main in- and", "np.log(self.__pmf(self._as_value_type(x))) assert isinstance(logpmf, np.float_) return logpmf else: raise NotImplementedError( f\"Neither", "np.float_) return pdf raise NotImplementedError( f\"Neither the `pdf` nor the", "defined as :math:`Q(p) = \\\\inf\\\\{ x \\\\in \\\\mathbb{R} \\\\colon p", "over all additional dimensions. Returns ------- p : array-like Value", "Natural logarithm of the probability density function. Parameters ---------- x", "`{quantile.dtype.name}`.\" ) return quantile def __getitem__(self, key: ArrayLikeGetitemArgType) -> \"RandomVariable\":", "is not None: return DiscreteRandomVariable._ensure_numpy_float(\"pmf\", self.__pmf(x)) elif self.__logpmf is not", "4 \\\\}` will have a median of :math:`2.5`. \"\"\" return", "-> np.float_: \"\"\" Cumulative distribution function. Parameters ---------- x :", "if isinstance(mode, np.ndarray): mode.setflags(write=False) return mode @cached_property def median(self) ->", "is not None: if not np.issubdtype(value.dtype, dtype): raise ValueError( f\"The", "var=var, std=std, entropy=entropy, ) def pdf(self, x: _ValueType) -> np.float_:", "not None: logpdf = np.log(self.__pdf(self._as_value_type(x))) assert isinstance(logpdf, np.float_) return logpdf", "self.var[key], std=lambda: self.std[key], entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def reshape(self, newshape:", "values are averaged. For example, a uniform random variable on", "np.ndarray): mean.setflags(write=False) return mean @cached_property def cov(self) -> _ValueType: \"\"\"", "np.float_: \"\"\" Cumulative distribution function. Parameters ---------- x : array-like", "assumed to be constant over their whole lifecycle. This is", "self.__median_dtype = RandomVariable.infer_median_dtype(self.__dtype) self.__moment_dtype = RandomVariable.infer_moment_dtype(self.__dtype) self._random_state = _utils.as_random_state(random_state) #", "only defined for scalar random variables.\" ) if self.__quantile is", "Sample of realizations with the given ``size`` and the inherent", "Then ``as_value_type`` should be set to something like ``lambda x:", "@cached_property def mode(self) -> _ValueType: \"\"\" Mode of the random", "except NotImplementedError as exc: raise NotImplementedError from exc else: var", "Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import floordiv", "1, 2, 3, 4 \\\\}` will have a median of", "np.float_: if self.__entropy is None: raise NotImplementedError entropy = self.__entropy()", "implemented.\" ) def quantile(self, p: FloatArgType) -> _ValueType: \"\"\"Quantile function.", "is the :attr:`shape` of the random variable. The logcdf evaluation", "mean: Optional[Callable[[], _ValueType]] = None, cov: Optional[Callable[[], _ValueType]] = None,", "__radd__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "is None: raise NotImplementedError in_support = self.__in_support(self._as_value_type(x)) if not isinstance(in_support,", "name: str, value: Any, shape: Optional[Tuple[int, ...]] = None, dtype:", "._arithmetic import mul return mul(self, other) def __rmul__(self, other: Any)", "from ._arithmetic import pow_ return pow_(self, other) def __rpow__(self, other:", "the distribution. \"\"\" if self.__mean is None: raise NotImplementedError mean", "int: return len(self.__shape) @cached_property def size(self) -> int: return int(np.prod(self.__shape))", "two values in which case these values are averaged. For", "None (or np.random), use the RandomState singleton used by np.random.", "a :class:`RandomVariable`. Examples -------- \"\"\" # pylint: disable=too-many-instance-attributes,too-many-public-methods def __init__(", "= cdf self.__logcdf = logcdf self.__quantile = quantile # Properties", "np.log(self.__cdf(x)) assert isinstance(logcdf, np.float_) return logcdf else: raise NotImplementedError( f\"Neither", "= logpdf super().__init__( shape=shape, dtype=dtype, random_state=random_state, parameters=parameters, sample=sample, in_support=in_support, cdf=cdf,", "of realizations of this random variable. dtype : Data type", "See https://en.wikipedia.org/wiki/Quantile_function for more details and examples. \"\"\" if self.__shape", "the dtype of the mean, see :attr:`moment_dtype`. Returns ------- mean", "from ._arithmetic import mod return mod(self, other) def __rmod__(self, other:", "Make immutable if isinstance(mode, np.ndarray): mode.setflags(write=False) return mode @cached_property def", "distribution. To learn about the dtype of the variance, see", "standard deviation of the distribution. \"\"\" if self.__std is None:", "most random variables used by methods in ProbNum have Dirac", "learn about the dtype of the standard deviation, see :attr:`moment_dtype`.", "density or mass function. Following the predominant convention in mathematics,", "the distribution. \"\"\" if self.__shape != (): raise NotImplementedError( \"The", "Parameters ---------- newshape : int or tuple of ints New", "._arithmetic import divmod_ return divmod_(other, self) def __pow__(self, other: Any)", "Optional, Tuple, TypeVar, Union import numpy as np from probnum", "import numpy as np from probnum import utils as _utils", ":attr:`shape` of the random variable. The pdf evaluation will be", "In particular, this should be kept in mind when subclassing", "This attribute defines the RandomState object to use for drawing", "size: abs(self.sample(size=size)), ) # Binary arithmetic operations __array_ufunc__ = None", "any of its descendants. Parameters ---------- shape : Shape of", "The median of the distribution. \"\"\" if self.__shape != ():", "parameters of the distribution such as mean, variance, et cetera", "method_params)`` In practice, most random variables used by methods in", "self.__pdf is not None: logpdf = np.log(self.__pdf(self._as_value_type(x))) assert isinstance(logpdf, np.float_)", "from ._arithmetic import divmod_ return divmod_(self, other) def __rdivmod__(self, other:", "class. See https://en.wikipedia.org/wiki/Quantile_function for more details and examples. \"\"\" if", "np.float_) return logcdf else: raise NotImplementedError( f\"Neither the `logcdf` nor", "mean=lambda: +self.mean, cov=lambda: self.cov, var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type, )", "sample : array-like Sample of realizations with the given ``size``", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import matmul return matmul(self,", ": Function which can be used to transform user-supplied arguments,", "# Probability density function self.__pdf = pdf self.__logpdf = logpdf", "return ContinuousRandomVariable._ensure_numpy_float( \"logpdf\", self.__logpdf(self._as_value_type(x)) ) elif self.__pdf is not None:", ":math:`Q \\\\colon [0, 1] \\\\to \\\\mathbb{R}` of a random variable", "\" f\"dtype. Expected {dtype.name} but got {value.dtype.name}.\" ) @classmethod def", "ShapeArgType) -> \"RandomVariable\": \"\"\" Give a new shape to a", "a uniform random variable on :math:`\\\\{ 1, 2, 3, 4", "is defined as :math:`Q(p) = \\\\inf\\\\{ x \\\\in \\\\mathbb{R} \\\\colon", "None, mode: Optional[Callable[[], _ValueType]] = None, median: Optional[Callable[[], _ValueType]] =", "value with \" f\"{quantile.shape}.\" ) if quantile.dtype != self.__dtype: raise", "return int(np.prod(self.__shape)) @property def dtype(self) -> np.dtype: \"\"\"Data type of", "matmul(other, self) def __truediv__(self, other: Any) -> \"RandomVariable\": # pylint:", "points of the probability density / mass function. The shape", "import cached_property _ValueType = TypeVar(\"ValueType\") class RandomVariable(Generic[_ValueType]): \"\"\" Random variables", "To learn about the dtype of the variance, see :attr:`moment_dtype`.", ") # Make immutable if isinstance(mean, np.ndarray): mean.setflags(write=False) return mean", "isinstance(std, np.ndarray): std.setflags(write=False) return std @cached_property def entropy(self) -> np.float_:", "via the constructor of \" f\"`{cls.__name__}` must return a scalar", "will result in undefined behavior. In particular, this should be", "return f\"<{self.shape} {self.__class__.__name__} with dtype={self.dtype}>\" @property def shape(self) -> ShapeType:", "not None: return ContinuousRandomVariable._ensure_numpy_float( \"logpdf\", self.__logpdf(self._as_value_type(x)) ) elif self.__pdf is", "\"\"\" if self.__logpdf is not None: return ContinuousRandomVariable._ensure_numpy_float( \"logpdf\", self.__logpdf(self._as_value_type(x))", "be converted to ``numpy.dtype``. as_value_type : Function which can be", "self.__parameters = parameters.copy() if parameters is not None else {}", "@cached_property def ndim(self) -> int: return len(self.__shape) @cached_property def size(self)", "{value} of type \" f\"{type(value)} is not scalar.\" ) assert", "f\"The {name} of the random variable does not have the", "sub return sub(other, self) def __mul__(self, other: Any) -> \"RandomVariable\":", "a moment as a sum or an integral over products", "= self.__mode() RandomVariable._check_property_value( \"mode\", mode, shape=self.__shape, dtype=self.__dtype, ) # Make", "np.float64(x)``. See Also -------- asrandvar : Transform into a :class:`RandomVariable`.", "quantile=quantile, mode=mode, median=median, mean=mean, cov=cov, var=var, std=std, entropy=entropy, ) def", "Transpose the random variable. Parameters ---------- axes : None, tuple", "x: _ValueType) -> np.float_: \"\"\" Natural logarithm of the probability", "= None, cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf: Optional[Callable[[_ValueType], np.float_]]", "if np.isscalar(value): if not isinstance(value, np.float_): try: value = _utils.as_numpy_scalar(value,", "{name} of the random variable does not have the correct", "variables are the main in- and outputs of probabilistic numerical", "argument `p` can not be cast to a `np.floating` object.\"", "= pmf self.__logpmf = logpmf super().__init__( shape=shape, dtype=dtype, random_state=random_state, parameters=parameters,", "\" f\"random variable, i.e. {self.__shape}, but it returned a value", "`{type(self).__name__}` is implemented.\" ) def logpdf(self, x: _ValueType) -> np.float_:", "None, mean: Optional[Callable[[], _ValueType]] = None, cov: Optional[Callable[[], _ValueType]] =", "not equal to the :attr:`median` as it is defined in", "return mod(self, other) def __rmod__(self, other: Any) -> \"RandomVariable\": #", "dimensions. Returns ------- q : array-like Value of the log-cumulative", "averaged. For example, a uniform random variable on :math:`\\\\{ 1,", "np.log(self.__pdf(self._as_value_type(x))) assert isinstance(logpdf, np.float_) return logpdf else: raise NotImplementedError( f\"Neither", "= mode self.__median = median self.__mean = mean self.__cov =", "\"mean\", mean, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(mean,", "\"The given argument `p` can not be cast to a", "density/mass function. The shape of this argument should be :code:`(...,", "Any, Callable, Dict, Generic, Optional, Tuple, TypeVar, Union import numpy", "if dtype is not None: if not np.issubdtype(value.dtype, dtype): raise", "allowing expressions like: y = np.array([1, 1]) + RV to", ":math:`Q(0.5)` is not equal to the :attr:`median` as it is", "None, std: Optional[Callable[[], _ValueType]] = None, entropy: Optional[Callable[[], np.float_]] =", "cov = self.__cov() RandomVariable._check_property_value( \"covariance\", cov, shape=(self.size, self.size) if self.ndim", ":math:`Q(p) = \\\\inf\\\\{ x \\\\in \\\\mathbb{R} \\\\colon p \\\\le F_X(x)", "self.mode.reshape(newshape), median=lambda: self.median.reshape(newshape), mean=lambda: self.mean.reshape(newshape), cov=lambda: self.cov, var=lambda: self.var.reshape(newshape), std=lambda:", "moment_dtype(self) -> np.dtype: \"\"\"The dtype of any (function of a)", "cdf evaluation will be broadcast over all additional dimensions. Returns", "mode=lambda: -self.mode, median=lambda: -self.median, mean=lambda: -self.mean, cov=lambda: self.cov, var=lambda: self.var,", "------- std : array-like The standard deviation of the distribution.", "except NotImplementedError as exc: raise NotImplementedError from exc else: std", "dtype=self.__dtype, ) # Make immutable if isinstance(mode, np.ndarray): mode.setflags(write=False) return", "self.__as_value_type is not None: return self.__as_value_type(x) return x @staticmethod def", "in_support=lambda x: self.in_support(+x), mode=lambda: +self.mode, median=lambda: +self.median, mean=lambda: +self.mean, cov=lambda:", "q : array-like Value of the log-cumulative density function at", "except TypeError as err: raise TypeError( f\"The function `{name}` specified", "variable. Parameters ---------- axes : None, tuple of ints, or", "class DiscreteRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state:", "): # Probability mass function self.__pmf = pmf self.__logpmf =", "self.__quantile(p) if quantile.shape != self.__shape: raise ValueError( f\"The quantile function", "..., SN)`, where :code:`(S1, ..., SN)` is the :attr:`shape` of", "predominant convention in mathematics, we express pdfs with respect to", "median.setflags(write=False) return median @cached_property def mean(self) -> _ValueType: \"\"\" Mean", "shape=(self.size, self.size) if self.ndim > 0 else (), dtype=self.__moment_dtype, )", "must return a `bool`, but its return value \" f\"is", "_ValueType: \"\"\" Mode of the random variable. Returns ------- mode", "self.__pmf is not None: logpmf = np.log(self.__pmf(self._as_value_type(x))) assert isinstance(logpmf, np.float_)", "Returns ------- transposed_rv : The transposed random variable. \"\"\" return", "quantile # Properties of the random variable self.__mode = mode", "Optional[Callable[[Any], _ValueType]] = None, ): # pylint: disable=too-many-arguments,too-many-locals \"\"\"Create a", "floordiv return floordiv(other, self) def __mod__(self, other: Any) -> \"RandomVariable\":", "std: Optional[Callable[[], _ValueType]] = None, entropy: Optional[Callable[[], np.float_]] = None,", "quantile.dtype != self.__dtype: raise ValueError( f\"The quantile function should return", "method is useful if (``log``)``cdf`` and (``log``)``pdf`` both only work", "to the caches used to make certain computations more efficient.", "shape=np.empty(shape=self.shape)[key].shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size)[key], mode=lambda: self.mode[key], mean=lambda: self.mean[key],", "arguments, but we still want the user to be able", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import truediv return truediv(self, other) def", "the random variable does not have the correct \" f\"dtype.", "err: raise TypeError( f\"The function `{name}` specified via the constructor", "nor the `cdf` of the random variable object \" f\"with", "None: try: var = np.diag(self.cov).reshape(self.__shape).copy() except NotImplementedError as exc: raise", "size: self.sample(size).reshape(size + newshape), mode=lambda: self.mode.reshape(newshape), median=lambda: self.median.reshape(newshape), mean=lambda: self.mean.reshape(newshape),", "if value.shape != shape: raise ValueError( f\"The {name} of the", "return a `bool`, but its return value \" f\"is of", "np.float_) return cdf else: raise NotImplementedError( f\"Neither the `cdf` nor", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import mul return mul(self, other) def __rmul__(self,", "\\\\to \\\\mathbb{R}` of a random variable :math:`X` is defined as", "divmod_(other, self) def __pow__(self, other: Any) -> \"RandomVariable\": # pylint:", "functools import cached_property except ImportError: from cached_property import cached_property _ValueType", "immutable if isinstance(mode, np.ndarray): mode.setflags(write=False) return mode @cached_property def median(self)", "err else: raise TypeError( f\"The function `{name}` specified via the", "def median(self) -> _ValueType: \"\"\" Median of the random variable.", "\"cdf\", self.__cdf(self._as_value_type(x)) ) elif self.__logcdf is not None: cdf =", "a value \" f\"with dtype `{quantile.dtype.name}`.\" ) return quantile def", "np.float_) return logpdf else: raise NotImplementedError( f\"Neither the `logpdf` nor", "the distribution. \"\"\" if self.__var is None: try: var =", "------- var : array-like The variance of the distribution. \"\"\"", "the same dtype as the \" f\"random variable, i.e. `{self.__dtype.name}`,", "scalar value that can be \" f\"converted to a `np.float_`,", "the random variable. \"\"\" if self.__mode is None: raise NotImplementedError", "return median @cached_property def mean(self) -> _ValueType: \"\"\" Mean :math:`\\\\mathbb{E}(X)`", "\" f\"with type `{type(self).__name__}` is implemented.\" ) def logcdf(self, x:", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import matmul return matmul(self, other)", "return mul(self, other) def __rmul__(self, other: Any) -> \"RandomVariable\": #", "self) @staticmethod def infer_median_dtype(value_dtype: DTypeArgType) -> np.dtype: return RandomVariable.infer_moment_dtype(value_dtype) @staticmethod", "._arithmetic import mul return mul(other, self) def __matmul__(self, other: Any)", "-> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: abs(self.sample(size=size)),", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import matmul return matmul(other, self)", "---------- x : array-like Evaluation points of the log-probability density/mass", "np.float_): try: value = _utils.as_numpy_scalar(value, dtype=np.float_) except TypeError as err:", "variable. The logcdf evaluation will be broadcast over all additional", "None, ): # Probability mass function self.__pmf = pmf self.__logpmf", "std = self.__std() RandomVariable._check_property_value( \"standard deviation\", std, shape=self.__shape, dtype=self.__moment_dtype, )", "_utils.as_shape(shape) # Data Types self.__dtype = np.dtype(dtype) self.__median_dtype = RandomVariable.infer_median_dtype(self.__dtype)", "(): raise NotImplementedError( \"The median is only defined for scalar", "ArrayLikeGetitemArgType) -> \"RandomVariable\": return RandomVariable( shape=np.empty(shape=self.shape)[key].shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size:", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mod return mod(self, other) def", ") if self.__quantile is None: raise NotImplementedError try: p =", "-> _ValueType: \"\"\"Quantile function. The quantile function :math:`Q \\\\colon [0,", "the internal state of a :class:`RandomVariable` (e.g. its mean, cov,", "for \" f\"{value} of type {type(value)}.\" ) from err elif", "None, tuple of ints, or n ints See documentation of", "= None, std: Optional[Callable[[], _ValueType]] = None, entropy: Optional[Callable[[], np.float_]]", "\"logpmf\", self.__logpmf(self._as_value_type(x)) ) elif self.__pmf is not None: logpmf =", "\"\"\" Log-cumulative distribution function. Parameters ---------- x : array-like Evaluation", ": Shape of realizations of this random variable. dtype :", "mean self.__cov = cov self.__var = var self.__std = std", "/ mass function at the given points. \"\"\" if self.__pdf", "RandomVariable._check_property_value( \"median\", median, shape=self.__shape, dtype=self.__median_dtype, ) # Make immutable if", "seed. \"\"\" self._random_state = _utils.as_random_state(seed) @property def parameters(self) -> Dict[str,", "np.dtype: return RandomVariable.infer_moment_dtype(value_dtype) @staticmethod def infer_moment_dtype(value_dtype: DTypeArgType) -> np.dtype: return", "the integers, the returned quantiles will also be integers. This", "the random variable. It must be compatible with the original", "Callable, Dict, Generic, Optional, Tuple, TypeVar, Union import numpy as", "of probabilistic numerical methods. \"\"\" from typing import Any, Callable,", "def __repr__(self) -> str: return f\"<{self.shape} {self.__class__.__name__} with dtype={self.dtype}>\" @property", "\"\"\" return self._random_state @random_state.setter def random_state(self, seed: RandomStateArgType): \"\"\"Get or", "is None: try: std = np.sqrt(self.var) except NotImplementedError as exc:", "the log-probability density/mass function. The shape of this argument should", "to a random variable. Parameters ---------- newshape : int or", "express pdfs with respect to the Lebesgue measure unless stated", "shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(mean, np.ndarray): mean.setflags(write=False)", "f\"random variable, i.e. {self.__shape}, but it returned a value with", ":class:`~numpy.random.RandomState` instance. \"\"\" return self._random_state @random_state.setter def random_state(self, seed: RandomStateArgType):", "is None: raise NotImplementedError mode = self.__mode() RandomVariable._check_property_value( \"mode\", mode,", "# Make immutable if isinstance(mean, np.ndarray): mean.setflags(write=False) return mean @cached_property", "cov=lambda: self.cov, var=lambda: self.var.reshape(newshape), std=lambda: self.std.reshape(newshape), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, )", "\"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: -self.sample(size=size), in_support=lambda", "is not possible \" f\"for {value} of type {type(value)}.\" )", "return pmf else: raise NotImplementedError( f\"Neither the `pmf` nor the", "RandomVariable(Generic[_ValueType]): \"\"\" Random variables are the main objects used by", "of this random variable, to an easy-to-process, normalized format. Will", "is implemented.\" ) def logpdf(self, x: _ValueType) -> np.float_: \"\"\"", "TypeError as exc: raise TypeError( \"The given argument `p` can", "`np.float_`, which is not possible \" f\"for {value} of type", "`{type(self).__name__}` is implemented.\" ) def logcdf(self, x: _ValueType) -> np.float_:", "a value that can be converted \" f\"to a `np.ndarray`", "class ContinuousRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state:", "as mean, variance, et cetera stored in a ``dict``. \"\"\"", "reshaped_rv : ``self`` with the new dimensions of ``shape``. \"\"\"", "from ._arithmetic import matmul return matmul(other, self) def __truediv__(self, other:", "mode=mode, median=median, mean=mean, cov=cov, var=var, std=std, entropy=entropy, ) def pmf(self,", "is implemented.\" ) def logpmf(self, x: _ValueType) -> np.float_: if", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import add return add(other, self) def __sub__(self,", "functools.cached_property is only available in Python >=3.8 from functools import", "To learn about the dtype of the median, see :attr:`median_dtype`.", "_ValueType]] = None, entropy: Optional[Callable[[], np.float_]] = None, as_value_type: Optional[Callable[[Any],", "__rmul__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "from exc quantile = self.__quantile(p) if quantile.shape != self.__shape: raise", "of a :class:`RandomVariable` (e.g. its mean, cov, sampling function, etc.)", "f\"converted to a `np.float_`, which is not possible for \"", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import add return add(self,", "cov=cov, var=var, std=std, entropy=entropy, ) def pmf(self, x: _ValueType) ->", "isinstance(mode, np.ndarray): mode.setflags(write=False) return mode @cached_property def median(self) -> _ValueType:", "``in_support``, ``cdf`` and ``logcdf``, ``pmf`` and ``logpmf`` (in :class:`DiscreteRandomVariable`), ``pdf``", "np.float_]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf: Optional[Callable[[_ValueType],", "dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size)[key], mode=lambda: self.mode[key], mean=lambda: self.mean[key], var=lambda:", "variable self.__parameters = parameters.copy() if parameters is not None else", "distribution. This can be either None or an existing RandomState", "): # Probability density function self.__pdf = pdf self.__logpdf =", "mode of the random variable. \"\"\" if self.__mode is None:", "elementwise arithmetic operations allowing expressions like: y = np.array([1, 1])", "_utils.as_random_state(seed) @property def parameters(self) -> Dict[str, Any]: \"\"\" Parameters of", "cached_property import cached_property _ValueType = TypeVar(\"ValueType\") class RandomVariable(Generic[_ValueType]): \"\"\" Random", "the `logcdf` of the random variable object \" f\"with type", "integers, the returned quantiles will also be integers. This means", ") if self.__logpdf is not None: pdf = np.exp(self.__logpdf(self._as_value_type(x))) assert", ":class:`np.float_` arguments, but we still want the user to be", "the continuous random variable \" f\"object with type `{type(self).__name__}` is", "The logcdf evaluation will be broadcast over all additional dimensions.", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import floordiv return floordiv(self, other) def", "of the median, see :attr:`median_dtype`. Returns ------- median : float", "methods in ProbNum have Dirac or Gaussian measure. Instances of", "the random variable. This attribute defines the RandomState object to", "return self._random_state @random_state.setter def random_state(self, seed: RandomStateArgType): \"\"\"Get or set", "import cached_property except ImportError: from cached_property import cached_property _ValueType =", "In practice, most random variables used by methods in ProbNum", "that the quantile function always returns values of the same", "self.__logpmf(self._as_value_type(x)) ) elif self.__pmf is not None: logpmf = np.log(self.__pmf(self._as_value_type(x)))", "\"RandomVariable\": \"\"\" Transpose the random variable. Parameters ---------- axes :", "``cdf`` and ``logcdf``, ``pmf`` and ``logpmf`` (in :class:`DiscreteRandomVariable`), ``pdf`` and", "immutable if isinstance(median, np.ndarray): median.setflags(write=False) return median @cached_property def mean(self)", "._arithmetic import floordiv return floordiv(self, other) def __rfloordiv__(self, other: Any)", "will be broadcast over all additional dimensions. Returns ------- q", "return logpdf else: raise NotImplementedError( f\"Neither the `logpdf` nor the", "add return add(self, other) def __radd__(self, other: Any) -> \"RandomVariable\":", "distribution. To learn about the dtype of the standard deviation,", "Dict, Generic, Optional, Tuple, TypeVar, Union import numpy as np", "``lambda x: np.float64(x)``. See Also -------- asrandvar : Transform into", "descendants. Parameters ---------- shape : Shape of realizations of this", "# Make immutable if isinstance(var, np.ndarray): var.setflags(write=False) return var @cached_property", "random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: -self.sample(size=size), in_support=lambda x: self.in_support(-x), mode=lambda: -self.mode, median=lambda:", "._arithmetic import divmod_ return divmod_(self, other) def __rdivmod__(self, other: Any)", "Variables. This module implements random variables. Random variables are the", "shape=shape, dtype=dtype, random_state=random_state, parameters=parameters, sample=sample, in_support=in_support, cdf=cdf, logcdf=logcdf, quantile=quantile, mode=mode,", "Log-cumulative distribution function. Parameters ---------- x : array-like Evaluation points", "bool = False ) -> Union[np.float_, np.ndarray]: if np.isscalar(value): if", "a consequence, altering the internal state of a :class:`RandomVariable` (e.g.", "= np.exp(self.__logpmf(x)) assert isinstance(pmf, np.float_) return pmf else: raise NotImplementedError(", "isinstance(pmf, np.float_) return pmf else: raise NotImplementedError( f\"Neither the `pmf`", "the random variable. \"\"\" # pylint: disable=line-too-long if self.__cov is", "assert isinstance(logpmf, np.float_) return logpmf else: raise NotImplementedError( f\"Neither the", "is not None: logpdf = np.log(self.__pdf(self._as_value_type(x))) assert isinstance(logpdf, np.float_) return", "Types self.__dtype = np.dtype(dtype) self.__median_dtype = RandomVariable.infer_median_dtype(self.__dtype) self.__moment_dtype = RandomVariable.infer_moment_dtype(self.__dtype)", "self.__shape: raise ValueError( f\"The quantile function should return values of", "def median_dtype(self) -> np.dtype: \"\"\"The dtype of the :attr:`median`. It", "__pos__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size:", "return matmul(other, self) def __truediv__(self, other: Any) -> \"RandomVariable\": #", "logpdf(self, x: _ValueType) -> np.float_: \"\"\" Natural logarithm of the", "parameters=parameters, sample=sample, in_support=in_support, cdf=cdf, logcdf=logcdf, quantile=quantile, mode=mode, median=median, mean=mean, cov=cov,", "cov : array-like The kernels of the random variable. \"\"\"", "a discrete distribution over the integers, the returned quantiles will", "_ValueType]] = None, cov: Optional[Callable[[], _ValueType]] = None, var: Optional[Callable[[],", "even for discrete random variables, e.g. integer-valued random variables, the", "a RandomState instance, use it. If an int, use a", "mathematics, we express pdfs with respect to the Lebesgue measure", "\"\"\" Covariance :math:`\\\\operatorname{Cov}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))(X-\\\\mathbb{E}(X))^\\\\top)` of the random variable. To", "def moment_dtype(self) -> np.dtype: \"\"\"The dtype of any (function of", "std.setflags(write=False) return std @cached_property def entropy(self) -> np.float_: if self.__entropy", "ContinuousRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state: Optional[RandomStateType]", "floordiv return floordiv(self, other) def __rfloordiv__(self, other: Any) -> \"RandomVariable\":", "of ``shape``. \"\"\" newshape = _utils.as_shape(newshape) return RandomVariable( shape=newshape, dtype=self.dtype,", "-self.sample(size=size), in_support=lambda x: self.in_support(-x), mode=lambda: -self.mode, median=lambda: -self.median, mean=lambda: -self.mean,", "= None, entropy: Optional[Callable[[], np.float_]] = None, ): # Probability", "-------- asrandvar : Transform into a :class:`RandomVariable`. Examples -------- \"\"\"", "add(self, other) def __radd__(self, other: Any) -> \"RandomVariable\": # pylint:", "\\\\mathbb{E}((X-\\\\mathbb{E}(X))(X-\\\\mathbb{E}(X))^\\\\top)` of the random variable. To learn about the dtype", "equal to the :attr:`median` as it is defined in this", "The logpdf evaluation will be broadcast over all additional dimensions.", "# Make immutable if isinstance(std, np.ndarray): std.setflags(write=False) return std @cached_property", "random variable. For instance, for a discrete distribution over the", "similar functions in subclasses. For instance, this method is useful", "Random Variables. This module implements random variables. Random variables are", "+self.sample(size=size), in_support=lambda x: self.in_support(+x), mode=lambda: +self.mode, median=lambda: +self.median, mean=lambda: +self.mean,", "mode : float The mode of the random variable. \"\"\"", "*axes: int) -> \"RandomVariable\": \"\"\" Transpose the random variable. Parameters", "it returned a value with \" f\"{quantile.shape}.\" ) if quantile.dtype", "\"\"\" Random Variables. This module implements random variables. Random variables", "Optional[Callable[[_ValueType], np.float_]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf:", "mass function. Following the predominant convention in mathematics, we express", "array-like The mean of the distribution. \"\"\" if self.__mean is", "can be added, multiplied, etc. with arrays and linear operators.", "with the given ``size`` and the inherent ``shape``. \"\"\" if", "def logpdf(self, x: _ValueType) -> np.float_: \"\"\" Natural logarithm of", "the returned quantiles will also be integers. This means that,", "which is not possible for \" f\"{value} of type {type(value)}.\"", "encoding the prior distribution as input and outputs a random", "mean = self.__mean() RandomVariable._check_property_value( \"mean\", mean, shape=self.__shape, dtype=self.__moment_dtype, ) #", "transposed random variable. \"\"\" return RandomVariable( shape=np.empty(shape=self.shape).transpose(*axes).shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda", "-self.mode, median=lambda: -self.median, mean=lambda: -self.mean, cov=lambda: self.cov, var=lambda: self.var, std=lambda:", "random_state(self) -> RandomStateType: \"\"\"Random state of the random variable. This", "of the random variable. The cdf evaluation will be broadcast", "`bool`, but its return value \" f\"is of type `{type(x)}`.\"", "self.__logcdf is not None: return RandomVariable._ensure_numpy_float( \"logcdf\", self.__logcdf(self._as_value_type(x)) ) elif", "mod(self, other) def __rmod__(self, other: Any) -> \"RandomVariable\": # pylint:", "Optional[Callable[[_ValueType], np.float_]] = None, quantile: Optional[Callable[[FloatArgType], _ValueType]] = None, mode:", "is implemented.\" ) def quantile(self, p: FloatArgType) -> _ValueType: \"\"\"Quantile", "by the fact that, even for discrete random variables, e.g.", "If None (or np.random), the global np.random state is used.", "def pdf(self, x: _ValueType) -> np.float_: \"\"\" Probability density or", "NotImplementedError( f\"Neither the `logpdf` nor the `pdf` of the continuous", "reshape(self, newshape: ShapeArgType) -> \"RandomVariable\": \"\"\" Give a new shape", "std @cached_property def entropy(self) -> np.float_: if self.__entropy is None:", "mean : array-like The mean of the distribution. \"\"\" if", "ints New shape for the random variable. It must be", "------- p : array-like Value of the probability density /", "ValueError( f\"The {name} of the random variable does not have", "probability density function. Parameters ---------- x : array-like Evaluation points", "raise NotImplementedError in_support = self.__in_support(self._as_value_type(x)) if not isinstance(in_support, bool): raise", "the covariance, see :attr:`moment_dtype`. Returns ------- cov : array-like The", "-> _ValueType: \"\"\" Variance :math:`\\\\operatorname{Var}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))^2)` of the distribution.", "points of the cumulative distribution function. The shape of this", "the `logpdf` nor the `pdf` of the continuous random variable", "Mean :math:`\\\\mathbb{E}(X)` of the distribution. To learn about the dtype", "a random variable. Parameters ---------- newshape : int or tuple", ") @classmethod def _ensure_numpy_float( cls, name: str, value: Any, force_scalar:", "Probability mass function self.__pmf = pmf self.__logpmf = logpmf super().__init__(", "TypeError as err: raise TypeError( f\"The function `{name}` specified via", "RandomVariable.infer_moment_dtype(value_dtype) @staticmethod def infer_moment_dtype(value_dtype: DTypeArgType) -> np.dtype: return np.promote_types(value_dtype, np.float_)", "abs(self.sample(size=size)), ) # Binary arithmetic operations __array_ufunc__ = None \"\"\"", "converted \" f\"to a `np.ndarray` of type `np.float_`, which is", "The internals of :class:`RandomVariable` objects are assumed to be constant", "Optional[Callable[[_ValueType], np.float_]] = None, logcdf: Optional[Callable[[_ValueType], np.float_]] = None, quantile:", "of elementwise. Thus no array of RandomVariables but a RandomVariable", "the random variable self.__mode = mode self.__median = median self.__mean", "exc: raise TypeError( \"The given argument `p` can not be", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import truediv return truediv(self, other) def __rtruediv__(self,", "= None, logcdf: Optional[Callable[[_ValueType], np.float_]] = None, quantile: Optional[Callable[[FloatArgType], _ValueType]]", "logcdf else: raise NotImplementedError( f\"Neither the `logcdf` nor the `cdf`", "raise NotImplementedError( f\"Neither the `pmf` nor the `logpmf` of the", "instance seeded with seed. \"\"\" self._random_state = _utils.as_random_state(seed) @property def", "not possible \" f\"for {value} of type {type(value)}.\" ) from", "__rtruediv__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "variable encoding the prior distribution as input and outputs a", "calling elementwise arithmetic operations allowing expressions like: y = np.array([1,", "as :math:`Q(p) = \\\\inf\\\\{ x \\\\in \\\\mathbb{R} \\\\colon p \\\\le", "of type \" f\"{type(value)} is not scalar.\" ) assert isinstance(value,", "random variable. To learn about the dtype of the median,", "function, etc.) will result in undefined behavior. In particular, this", "return add(other, self) def __sub__(self, other: Any) -> \"RandomVariable\": #", "instance, this method is useful if (``log``)``cdf`` and (``log``)``pdf`` both", "user to be able to pass Python :class:`float`. Then ``as_value_type``", "__abs__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size:", "return values of the same shape as the \" f\"random", "is None: try: var = np.diag(self.cov).reshape(self.__shape).copy() except NotImplementedError as exc:", "type {type(value)}.\" ) from err else: raise TypeError( f\"The function", "the :attr:`shape` of the random variable. The logcdf evaluation will", ") median = self.__median() RandomVariable._check_property_value( \"median\", median, shape=self.__shape, dtype=self.__median_dtype, )", "shape of this argument should be :code:`(..., S1, ..., SN)`,", "dtype: DTypeArgType, random_state: RandomStateArgType = None, parameters: Optional[Dict[str, Any]] =", "interpreted as realizations of this random variable, to an easy-to-process,", "raise NotImplementedError mean = self.__mean() RandomVariable._check_property_value( \"mean\", mean, shape=self.__shape, dtype=self.__moment_dtype,", "is not None: logpmf = np.log(self.__pmf(self._as_value_type(x))) assert isinstance(logpmf, np.float_) return", "from ._arithmetic import floordiv return floordiv(other, self) def __mod__(self, other:", ": array-like Evaluation points of the log-probability density/mass function. The", "var self.__std = std self.__entropy = entropy # Utilities self.__as_value_type", "the random variable does not have the correct \" f\"shape.", "random variables, the :attr:`median` might lie in between two values", "is motivated by the fact that, even for discrete random", "if self.__logpmf is not None: return DiscreteRandomVariable._ensure_numpy_float( \"logpmf\", self.__logpmf(self._as_value_type(x)) )", "Parameters ---------- x : array-like Evaluation points of the log-probability", "operations __array_ufunc__ = None \"\"\" This prevents numpy from calling", "and outputs of probabilistic numerical methods. \"\"\" from typing import", "converted to ``numpy.dtype``. as_value_type : Function which can be used", "self.__logpdf is not None: pdf = np.exp(self.__logpdf(self._as_value_type(x))) assert isinstance(pdf, np.float_)", "_utils.as_shape(newshape) return RandomVariable( shape=newshape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).reshape(size +", "shape=self.__shape, dtype=self.__dtype, ) # Make immutable if isinstance(mode, np.ndarray): mode.setflags(write=False)", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import add return add(self, other)", "discrete distribution over the integers, the returned quantiles will also", "int) -> \"RandomVariable\": \"\"\" Transpose the random variable. Parameters ----------", "mean=lambda: self.mean.reshape(newshape), cov=lambda: self.cov, var=lambda: self.var.reshape(newshape), std=lambda: self.std.reshape(newshape), entropy=lambda: self.entropy,", "are assumed to be constant over their whole lifecycle. This", "self.__dtype @property def median_dtype(self) -> np.dtype: \"\"\"The dtype of the", "divmod_ return divmod_(other, self) def __pow__(self, other: Any) -> \"RandomVariable\":", "``size`` and the inherent ``shape``. \"\"\" if self.__sample is None:", "np.exp(self.logcdf(self._as_value_type(x))) assert isinstance(cdf, np.float_) return cdf else: raise NotImplementedError( f\"Neither", "std=lambda: self.std.transpose(*axes), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) T = property(transpose) #", "-> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import divmod_ return", "_utils from probnum.type import ( ArrayLikeGetitemArgType, DTypeArgType, FloatArgType, RandomStateArgType, RandomStateType,", "-> _ValueType: \"\"\" Mode of the random variable. Returns -------", "specified via the constructor of \" f\"`{cls.__name__}` must return a", "array-like Evaluation points of the cumulative distribution function. The shape", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import matmul return matmul(self, other) def", "3, 4 \\\\}` will have a median of :math:`2.5`. \"\"\"", "if not isinstance(value, np.float_): try: value = _utils.as_numpy_scalar(value, dtype=np.float_) except", "else: raise NotImplementedError( f\"Neither the `pmf` nor the `logpmf` of", "random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: +self.sample(size=size), in_support=lambda x: self.in_support(+x), mode=lambda: +self.mode, median=lambda:", "self) def __divmod__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "mass function self.__pmf = pmf self.__logpmf = logpmf super().__init__( shape=shape,", "/ mass function. The shape of this argument should be", "of the random variable does not have the correct \"", "self.__dtype = np.dtype(dtype) self.__median_dtype = RandomVariable.infer_median_dtype(self.__dtype) self.__moment_dtype = RandomVariable.infer_moment_dtype(self.__dtype) self._random_state", "uncertainty arising from finite computation. The generic signature of a", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mod return mod(other,", "quantile(self, p: FloatArgType) -> _ValueType: \"\"\"Quantile function. The quantile function", "= None, ): # pylint: disable=too-many-arguments,too-many-locals \"\"\"Create a new random", "the random variable, which are represented as using the dtypes", "@property def parameters(self) -> Dict[str, Any]: \"\"\" Parameters of the", "None: logcdf = np.log(self.__cdf(x)) assert isinstance(logcdf, np.float_) return logcdf else:", "import matmul return matmul(self, other) def __rmatmul__(self, other: Any) ->", "ShapeType, ) try: # functools.cached_property is only available in Python", "self.var, std=lambda: self.std, as_value_type=self.__as_value_type, ) def __abs__(self) -> \"RandomVariable\": return", "self.__moment_dtype @property def random_state(self) -> RandomStateType: \"\"\"Random state of the", "this random variable, to an easy-to-process, normalized format. Will be", "random variables, e.g. integer-valued random variables, the :attr:`median` might lie", "with the original shape. Returns ------- reshaped_rv : ``self`` with", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import truediv return truediv(other, self) def", "covariance, see :attr:`moment_dtype`. Returns ------- cov : array-like The kernels", "quantile def __getitem__(self, key: ArrayLikeGetitemArgType) -> \"RandomVariable\": return RandomVariable( shape=np.empty(shape=self.shape)[key].shape,", "or an existing RandomState object. If None (or np.random), use", "isinstance(cdf, np.float_) return cdf else: raise NotImplementedError( f\"Neither the `cdf`", "method is: ``output_rv = probnum_method(input_rv, method_params)`` In practice, most random", "possible for \" f\"{value} of type {type(value)}.\" ) from err", "from err elif not force_scalar: try: value = np.asarray(value, dtype=np.float_)", "f\"shape. Expected {shape} but got {value.shape}.\" ) if dtype is", "\"The median is only defined for scalar random variables.\" )", "mod(other, self) def __divmod__(self, other: Any) -> \"RandomVariable\": # pylint:", "NotImplementedError entropy = self.__entropy() entropy = RandomVariable._ensure_numpy_float( \"entropy\", entropy, force_scalar=True", "if self.__var is None: try: var = np.diag(self.cov).reshape(self.__shape).copy() except NotImplementedError", "same shape as the \" f\"random variable, i.e. {self.__shape}, but", "a new RandomState instance seeded with seed. \"\"\" self._random_state =", "random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: abs(self.sample(size=size)), ) # Binary arithmetic operations __array_ufunc__", "For instance, this method is useful if (``log``)``cdf`` and (``log``)``pdf``", "numerical methods. \"\"\" from typing import Any, Callable, Dict, Generic,", "def shape(self) -> ShapeType: \"\"\"Shape of realizations of the random", "random variable. The logcdf evaluation will be broadcast over all", "RandomState instance seeded with seed. \"\"\" self._random_state = _utils.as_random_state(seed) @property", "If None (or np.random), use the RandomState singleton used by", "shape=self.__shape, dtype=self.__median_dtype, ) # Make immutable if isinstance(median, np.ndarray): median.setflags(write=False)", "\"pdf\", self.__pdf(self._as_value_type(x)) ) if self.__logpdf is not None: pdf =", "in this class. See https://en.wikipedia.org/wiki/Quantile_function for more details and examples.", "return std @cached_property def entropy(self) -> np.float_: if self.__entropy is", "drawn sample of realizations. Returns ------- sample : array-like Sample", "return self.__parameters.copy() @cached_property def mode(self) -> _ValueType: \"\"\" Mode of", "value class DiscreteRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape: ShapeArgType, dtype: DTypeArgType,", "Evaluation points of the log-probability density/mass function. The shape of", "on :math:`\\\\{ 1, 2, 3, 4 \\\\}` will have a", "this random variable. If ``object`` will be converted to ``numpy.dtype``.", "._arithmetic import sub return sub(self, other) def __rsub__(self, other: Any)", "isinstance(pdf, np.float_) return pdf raise NotImplementedError( f\"Neither the `pdf` nor", "bool]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf: Optional[Callable[[_ValueType],", "f\"object with type `{type(self).__name__}` is implemented.\" ) def logpmf(self, x:", "but {value} of type \" f\"{type(value)} is not scalar.\" )", "distribution function. Parameters ---------- x : array-like Evaluation points of", "RandomStateArgType = None, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeType],", "is None: raise NotImplementedError entropy = self.__entropy() entropy = RandomVariable._ensure_numpy_float(", "module implements random variables. Random variables are the main in-", "variable. The pdf evaluation will be broadcast over all additional", "Optional[Callable[[_ValueType], np.float_]] = None, logpmf: Optional[Callable[[_ValueType], np.float_]] = None, cdf:", "variable. The cdf evaluation will be broadcast over all additional", "must return a value that can be converted \" f\"to", "------- transposed_rv : The transposed random variable. \"\"\" return RandomVariable(", "of the random variable, which are represented as using the", "self.__var is None: try: var = np.diag(self.cov).reshape(self.__shape).copy() except NotImplementedError as", "np.float_]] = None, as_value_type: Optional[Callable[[Any], _ValueType]] = None, ): #", "in mind when subclassing :class:`RandomVariable` or any of its descendants.", "_ValueType: if self.__as_value_type is not None: return self.__as_value_type(x) return x", "not None: return DiscreteRandomVariable._ensure_numpy_float(\"pmf\", self.__pmf(x)) elif self.__logpmf is not None:", "as the \" f\"random variable, i.e. {self.__shape}, but it returned", "if self.ndim > 0 else (), dtype=self.__moment_dtype, ) # Make", "Returns ------- q : array-like Value of the cumulative density", "Optional[Callable[[_ValueType], bool]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf:", "random variable. Parameters ---------- newshape : int or tuple of", "of :class:`RandomVariable` can be added, multiplied, etc. with arrays and", "``self`` with the new dimensions of ``shape``. \"\"\" newshape =", "\\\\le F_X(x) \\\\}`, where :math:`F_X \\\\colon \\\\mathbb{R} \\\\to [0, 1]`", "\\\\in \\\\mathbb{R} \\\\colon p \\\\le F_X(x) \\\\}`, where :math:`F_X \\\\colon", "to use for drawing realizations from this random variable. If", ": array-like Sample of realizations with the given ``size`` and", "RandomVariable._ensure_numpy_float( \"logcdf\", self.__logcdf(self._as_value_type(x)) ) elif self.__cdf is not None: logcdf", "random variable whose distribution encodes the uncertainty arising from finite", "-> np.dtype: \"\"\"The dtype of any (function of a) moment", "------- q : array-like Value of the cumulative density function", "main objects used by probabilistic numerical methods. Every probabilistic numerical", "Random variables are the main in- and outputs of probabilistic", "return quantile def __getitem__(self, key: ArrayLikeGetitemArgType) -> \"RandomVariable\": return RandomVariable(", "key: ArrayLikeGetitemArgType) -> \"RandomVariable\": return RandomVariable( shape=np.empty(shape=self.shape)[key].shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda", "random_state(self, seed: RandomStateArgType): \"\"\"Get or set the RandomState object of", "NotImplementedError( f\"Neither the `logcdf` nor the `cdf` of the random", "The quantile function :math:`Q \\\\colon [0, 1] \\\\to \\\\mathbb{R}` of", "in undefined behavior. In particular, this should be kept in", "the probability distribution. The parameters of the distribution such as", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mod return mod(other, self) def", "case these values are averaged. For example, a uniform random", "of functions like ``in_support``, ``cdf`` and ``logcdf``, ``pmf`` and ``logpmf``", "__mul__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", ":attr:`median_dtype`. Returns ------- median : float The median of the", "elif self.__cdf is not None: logcdf = np.log(self.__cdf(x)) assert isinstance(logcdf,", "the :attr:`median` as it is defined in this class. See", "None, logpdf: Optional[Callable[[_ValueType], np.float_]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] =", "logarithm of the probability density function. Parameters ---------- x :", "Optional[Callable[[], _ValueType]] = None, mean: Optional[Callable[[], _ValueType]] = None, cov:", "int, use a new RandomState instance seeded with seed. \"\"\"", "from ._arithmetic import pow_ return pow_(other, self) @staticmethod def infer_median_dtype(value_dtype:", "_ValueType: \"\"\" Standard deviation of the distribution. To learn about", "other) def __rmatmul__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "self.__pdf is not None: return ContinuousRandomVariable._ensure_numpy_float( \"pdf\", self.__pdf(self._as_value_type(x)) ) if", "array-like The standard deviation of the distribution. \"\"\" if self.__std", "\"\"\" Random variables are the main objects used by probabilistic", "due to the caches used to make certain computations more", "in- and outputs of probabilistic numerical methods. \"\"\" from typing", "median=lambda: self.median.transpose(*axes), mean=lambda: self.mean.transpose(*axes), cov=lambda: self.cov, var=lambda: self.var.transpose(*axes), std=lambda: self.std.transpose(*axes),", "Expected {shape} but got {value.shape}.\" ) if dtype is not", "to the Lebesgue measure unless stated otherwise. Parameters ---------- x", "with the correct shape is returned. \"\"\" def __add__(self, other:", "of) a realization of this random variable.\"\"\" return self.__dtype @property", "mode: Optional[Callable[[], _ValueType]] = None, median: Optional[Callable[[], _ValueType]] = None,", "cov.setflags(write=False) return cov @cached_property def var(self) -> _ValueType: \"\"\" Variance", "of realizations of this random variable. If ``object`` will be", "an int, use a new RandomState instance seeded with seed.", "isinstance(logpdf, np.float_) return logpdf else: raise NotImplementedError( f\"Neither the `logpdf`", "logcdf = np.log(self.__cdf(x)) assert isinstance(logcdf, np.float_) return logcdf else: raise", "self) def __sub__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "\" f\"object with type `{type(self).__name__}` is implemented.\" ) def logpdf(self,", "probabilistic numerical methods. Every probabilistic numerical method takes a random", "will be broadcast over all additional dimensions. Returns ------- logp", "import add return add(other, self) def __sub__(self, other: Any) ->", "infer_median_dtype(value_dtype: DTypeArgType) -> np.dtype: return RandomVariable.infer_moment_dtype(value_dtype) @staticmethod def infer_moment_dtype(value_dtype: DTypeArgType)", "def __init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state: RandomStateArgType =", "raise ValueError( f\"The {name} of the random variable does not", "of the variance, see :attr:`moment_dtype`. Returns ------- var : array-like", "the variance, see :attr:`moment_dtype`. Returns ------- var : array-like The", "variables are the main objects used by probabilistic numerical methods.", "as err: raise TypeError( f\"The function `{name}` specified via the", ": array-like The kernels of the random variable. \"\"\" #", "not None: pdf = np.exp(self.__logpdf(self._as_value_type(x))) assert isinstance(pdf, np.float_) return pdf", "pdf raise NotImplementedError( f\"Neither the `pdf` nor the `logpdf` of", "random variable. It must be compatible with the original shape.", "as np from probnum import utils as _utils from probnum.type", "self) def __floordiv__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "is not scalar.\" ) assert isinstance(value, (np.float_, np.ndarray)) return value", ":class:`ContinuousRandomVariable`), and potentially by similar functions in subclasses. For instance,", ": array-like Value of the log-cumulative density function at the", "value: Any, shape: Optional[Tuple[int, ...]] = None, dtype: Optional[np.dtype] =", "self.median.reshape(newshape), mean=lambda: self.mean.reshape(newshape), cov=lambda: self.cov, var=lambda: self.var.reshape(newshape), std=lambda: self.std.reshape(newshape), entropy=lambda:", "\" f\"with dtype `{quantile.dtype.name}`.\" ) return quantile def __getitem__(self, key:", "the Lebesgue measure unless stated otherwise. Parameters ---------- x :", "distribution. \"\"\" if self.__shape != (): raise NotImplementedError( \"The median", "ints See documentation of numpy.ndarray.transpose. Returns ------- transposed_rv : The", "returns values of the same dtype as the random variable.", "state is used. If integer, it is used to seed", "quantiles will also be integers. This means that, in general,", "None, pmf: Optional[Callable[[_ValueType], np.float_]] = None, logpmf: Optional[Callable[[_ValueType], np.float_]] =", "constant over their whole lifecycle. This is due to the", "err elif not force_scalar: try: value = np.asarray(value, dtype=np.float_) except", "return len(self.__shape) @cached_property def size(self) -> int: return int(np.prod(self.__shape)) @property", "the `pmf` nor the `logpmf` of the discrete random variable", "the random variable. The cdf evaluation will be broadcast over", "this random variable. dtype : Data type of realizations of", "logcdf self.__quantile = quantile # Properties of the random variable", ") def logcdf(self, x: _ValueType) -> np.float_: \"\"\" Log-cumulative distribution", "Utilities self.__as_value_type = as_value_type def __repr__(self) -> str: return f\"<{self.shape}", "dtype of the mean, see :attr:`moment_dtype`. Returns ------- mean :", "name: str, value: Any, force_scalar: bool = False ) ->", "to the dtype arising from the multiplication of values with", "mean of the distribution. \"\"\" if self.__mean is None: raise", "\"RandomVariable\": return RandomVariable( shape=np.empty(shape=self.shape)[key].shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size)[key], mode=lambda:", ") -> Union[np.float_, np.ndarray]: if np.isscalar(value): if not isinstance(value, np.float_):", "random variable on :math:`\\\\{ 1, 2, 3, 4 \\\\}` will", "def _check_property_value( name: str, value: Any, shape: Optional[Tuple[int, ...]] =", "p: FloatArgType) -> _ValueType: \"\"\"Quantile function. The quantile function :math:`Q", "= \\\\mathbb{E}((X-\\\\mathbb{E}(X))^2)` of the distribution. To learn about the dtype", "return logcdf else: raise NotImplementedError( f\"Neither the `logcdf` nor the", "to something like ``lambda x: np.float64(x)``. See Also -------- asrandvar", "To learn about the dtype of the covariance, see :attr:`moment_dtype`.", "@cached_property def entropy(self) -> np.float_: if self.__entropy is None: raise", "the `pdf` nor the `logpdf` of the continuous random variable", "def std(self) -> _ValueType: \"\"\" Standard deviation of the distribution.", "value \" f\"is of type `{type(x)}`.\" ) return in_support def", "object \" f\"with type `{type(self).__name__}` is implemented.\" ) def logcdf(self,", ":math:`F_X \\\\colon \\\\mathbb{R} \\\\to [0, 1]` is the :meth:`cdf` of", "from finite computation. The generic signature of a probabilistic numerical", "distribution over the integers, the returned quantiles will also be", "\\\\mathbb{R} \\\\colon p \\\\le F_X(x) \\\\}`, where :math:`F_X \\\\colon \\\\mathbb{R}", "shape: raise ValueError( f\"The {name} of the random variable does", "self.__median() RandomVariable._check_property_value( \"median\", median, shape=self.__shape, dtype=self.__median_dtype, ) # Make immutable", "= np.log(self.__pmf(self._as_value_type(x))) assert isinstance(logpmf, np.float_) return logpmf else: raise NotImplementedError(", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import mul return mul(other, self) def __matmul__(self,", "import mul return mul(other, self) def __matmul__(self, other: Any) ->", "raise NotImplementedError( \"The quantile function is only defined for scalar", "try: value = _utils.as_numpy_scalar(value, dtype=np.float_) except TypeError as err: raise", "None, sample: Optional[Callable[[ShapeArgType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] =", "bool: if self.__in_support is None: raise NotImplementedError in_support = self.__in_support(self._as_value_type(x))", "function should return values of the same dtype as the", "= self.__std() RandomVariable._check_property_value( \"standard deviation\", std, shape=self.__shape, dtype=self.__moment_dtype, ) #", "!= (): raise NotImplementedError( \"The median is only defined for", "# Properties of the random variable self.__mode = mode self.__median", "dtype of the variance, see :attr:`moment_dtype`. Returns ------- var :", "= np.sqrt(self.var) except NotImplementedError as exc: raise NotImplementedError from exc", "self.__entropy = entropy # Utilities self.__as_value_type = as_value_type def __repr__(self)", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import sub return sub(self, other)", "\"\"\" if self.__shape != (): raise NotImplementedError( \"The median is", "value, but {value} of type \" f\"{type(value)} is not scalar.\"", "random variable.\"\"\" return self.__dtype @property def median_dtype(self) -> np.dtype: \"\"\"The", "Any]] = None, sample: Optional[Callable[[ShapeArgType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType],", "mode=lambda: +self.mode, median=lambda: +self.median, mean=lambda: +self.mean, cov=lambda: self.cov, var=lambda: self.var,", "``distribution`` and not necessarily all previously available methods are retained.", "as_value_type: Optional[Callable[[Any], _ValueType]] = None, ): # pylint: disable=too-many-arguments,too-many-locals \"\"\"Create", "parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeType], _ValueType]] = None,", "\" f\"`{cls.__name__}` must return a value that can be converted", "self.__mode = mode self.__median = median self.__mean = mean self.__cov", "return RandomVariable( shape=np.empty(shape=self.shape).transpose(*axes).shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).transpose(*axes), mode=lambda: self.mode.transpose(*axes),", "median, see :attr:`median_dtype`. Returns ------- median : float The median", "= TypeVar(\"ValueType\") class RandomVariable(Generic[_ValueType]): \"\"\" Random variables are the main", "variable. The logpdf evaluation will be broadcast over all additional", "``dict``. \"\"\" return self.__parameters.copy() @cached_property def mode(self) -> _ValueType: \"\"\"", "a new shape to a random variable. Parameters ---------- newshape", "value = np.asarray(value, dtype=np.float_) except TypeError as err: raise TypeError(", "of any (function of a) moment of the random variable,", "the :meth:`cdf` of the random variable. From the definition it", "var=lambda: self.var.transpose(*axes), std=lambda: self.std.transpose(*axes), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) T =", "None: return DiscreteRandomVariable._ensure_numpy_float(\"pmf\", self.__pmf(x)) elif self.__logpmf is not None: pmf", "else: raise NotImplementedError( f\"Neither the `logcdf` nor the `cdf` of", "``object`` will be converted to ``numpy.dtype``. as_value_type : Function which", "mean, cov, sampling function, etc.) will result in undefined behavior.", "i.e. {self.__shape}, but it returned a value with \" f\"{quantile.shape}.\"", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import pow_ return pow_(other,", "\" f\"for {value} of type {type(value)}.\" ) from err else:", "to ``numpy.dtype``. as_value_type : Function which can be used to", "if isinstance(std, np.ndarray): std.setflags(write=False) return std @cached_property def entropy(self) ->", "tuple of ints New shape for the random variable. It", "shape: ShapeArgType, dtype: DTypeArgType, random_state: RandomStateArgType = None, parameters: Optional[Dict[str,", "mass function at the given points. \"\"\" if self.__pdf is", "in_support: Optional[Callable[[_ValueType], bool]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None,", "import add return add(self, other) def __radd__(self, other: Any) ->", "DTypeArgType, FloatArgType, RandomStateArgType, RandomStateType, ShapeArgType, ShapeType, ) try: # functools.cached_property", "variable whose distribution encodes the uncertainty arising from finite computation.", "None, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeArgType], _ValueType]] =", "moment of the random variable, e.g. its :attr:`mean`, :attr:`cov`, :attr:`var`,", "self.__cdf(self._as_value_type(x)) ) elif self.__logcdf is not None: cdf = np.exp(self.logcdf(self._as_value_type(x)))", "def dtype(self) -> np.dtype: \"\"\"Data type of (elements of) a", "None: raise NotImplementedError try: p = _utils.as_numpy_scalar(p, dtype=np.floating) except TypeError", "mean=lambda: self.mean.transpose(*axes), cov=lambda: self.cov, var=lambda: self.var.transpose(*axes), std=lambda: self.std.transpose(*axes), entropy=lambda: self.entropy,", "__rfloordiv__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "\"standard deviation\", std, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import divmod_ return divmod_(other, self) def", "cov, sampling function, etc.) will result in undefined behavior. In", "cast to a `np.floating` object.\" ) from exc quantile =", "the `pmf` of the discrete random variable \" f\"object with", "the constructor of \" f\"`{cls.__name__}` must return a scalar value", "function at the given points. \"\"\" if self.__logcdf is not", "is not None: cdf = np.exp(self.logcdf(self._as_value_type(x))) assert isinstance(cdf, np.float_) return", "raise NotImplementedError( f\"Neither the `pdf` nor the `logpdf` of the", "random variable. \"\"\" return RandomVariable( shape=np.empty(shape=self.shape).transpose(*axes).shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size:", "of :math:`2.5`. \"\"\" return self.__median_dtype @property def moment_dtype(self) -> np.dtype:", "axes : None, tuple of ints, or n ints See", "else: raise NotImplementedError( f\"Neither the `logpmf` nor the `pmf` of", "return self.__shape @cached_property def ndim(self) -> int: return len(self.__shape) @cached_property", "pmf(self, x: _ValueType) -> np.float_: if self.__pmf is not None:", "Returns ------- std : array-like The standard deviation of the", "inherent ``shape``. \"\"\" if self.__sample is None: raise NotImplementedError(\"No sampling", "the given points. \"\"\" if self.__pdf is not None: return", "0 else (), dtype=self.__moment_dtype, ) # Make immutable if isinstance(cov,", "might lie in between two values in which case these", "the random variable. From the definition it follows that the", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import pow_ return pow_(self,", "user-supplied arguments, interpreted as realizations of this random variable, to", "Optional[Callable[[], np.float_]] = None, as_value_type: Optional[Callable[[Any], _ValueType]] = None, ):", "np.float_) return pmf else: raise NotImplementedError( f\"Neither the `pmf` nor", "should be :code:`(..., S1, ..., SN)`, where :code:`(S1, ..., SN)`", "not be cast to a `np.floating` object.\" ) from exc", "self.var.transpose(*axes), std=lambda: self.std.transpose(*axes), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) T = property(transpose)", "sample=lambda size: +self.sample(size=size), in_support=lambda x: self.in_support(+x), mode=lambda: +self.mode, median=lambda: +self.median,", "values of the same shape as the \" f\"random variable,", "_ValueType: \"\"\" Draw realizations from a random variable. Parameters ----------", "shape=np.empty(shape=self.shape).transpose(*axes).shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).transpose(*axes), mode=lambda: self.mode.transpose(*axes), median=lambda: self.median.transpose(*axes),", "are retained. The internals of :class:`RandomVariable` objects are assumed to", "random variable. \"\"\" # pylint: disable=line-too-long if self.__cov is None:", "the log-cumulative density function at the given points. \"\"\" if", "see :attr:`moment_dtype`. Returns ------- std : array-like The standard deviation", "tuple Size of the drawn sample of realizations. Returns -------", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mod return mod(self,", "of type {type(value)}.\" ) from err elif not force_scalar: try:", "about the dtype of the variance, see :attr:`moment_dtype`. Returns -------", "As a consequence, altering the internal state of a :class:`RandomVariable`", ":attr:`std`. It will be set to the dtype arising from", "its return value \" f\"is of type `{type(x)}`.\" ) return", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import add return add(other, self) def", "logpmf super().__init__( shape=shape, dtype=dtype, random_state=random_state, parameters=parameters, sample=sample, in_support=in_support, cdf=cdf, logcdf=logcdf,", "not None: return ContinuousRandomVariable._ensure_numpy_float( \"pdf\", self.__pdf(self._as_value_type(x)) ) if self.__logpdf is", "x: self.in_support(-x), mode=lambda: -self.mode, median=lambda: -self.median, mean=lambda: -self.mean, cov=lambda: self.cov,", "None: if not np.issubdtype(value.dtype, dtype): raise ValueError( f\"The {name} of", "given points. \"\"\" if self.__logcdf is not None: return RandomVariable._ensure_numpy_float(", "\"\"\" This prevents numpy from calling elementwise arithmetic operations allowing", "truediv(self, other) def __rtruediv__(self, other: Any) -> \"RandomVariable\": # pylint:", "-> np.dtype: return RandomVariable.infer_moment_dtype(value_dtype) @staticmethod def infer_moment_dtype(value_dtype: DTypeArgType) -> np.dtype:", "Standard deviation of the distribution. To learn about the dtype", "std(self) -> _ValueType: \"\"\" Standard deviation of the distribution. To", "self.__in_support = in_support self.__cdf = cdf self.__logcdf = logcdf self.__quantile", "quantile function should return values of the same dtype as", "the `logpmf` of the discrete random variable \" f\"object with", "disable=line-too-long if self.__cov is None: raise NotImplementedError cov = self.__cov()", "------- sample : array-like Sample of realizations with the given", "f\"The quantile function should return values of the same dtype", "size: ShapeArgType = ()) -> _ValueType: \"\"\" Draw realizations from", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import divmod_ return divmod_(self,", "use it. If an int, use a new RandomState instance", ") class ContinuousRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape: ShapeArgType, dtype: DTypeArgType,", "Transform into a :class:`RandomVariable`. Examples -------- \"\"\" # pylint: disable=too-many-instance-attributes,too-many-public-methods", "RandomStateType: \"\"\"Random state of the random variable. This attribute defines", "_utils.as_numpy_scalar(value, dtype=np.float_) except TypeError as err: raise TypeError( f\"The function", "the `cdf` nor the `logcdf` of the random variable object", "numerical method is: ``output_rv = probnum_method(input_rv, method_params)`` In practice, most", "_ValueType) -> np.float_: \"\"\" Cumulative distribution function. Parameters ---------- x", "Optional[Tuple[int, ...]] = None, dtype: Optional[np.dtype] = None, ): if", "-> bool: if self.__in_support is None: raise NotImplementedError in_support =", "cov(self) -> _ValueType: \"\"\" Covariance :math:`\\\\operatorname{Cov}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))(X-\\\\mathbb{E}(X))^\\\\top)` of the", "= None, in_support: Optional[Callable[[_ValueType], bool]] = None, pdf: Optional[Callable[[_ValueType], np.float_]]", "random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).reshape(size + newshape), mode=lambda: self.mode.reshape(newshape), median=lambda: self.median.reshape(newshape),", "mul return mul(self, other) def __rmul__(self, other: Any) -> \"RandomVariable\":", ":class:`float`. Then ``as_value_type`` should be set to something like ``lambda", "for a discrete distribution over the integers, the returned quantiles", "None, ): # Probability density function self.__pdf = pdf self.__logpdf", "ProbNum have Dirac or Gaussian measure. Instances of :class:`RandomVariable` can", "from ._arithmetic import matmul return matmul(self, other) def __rmatmul__(self, other:", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mul return mul(other,", "distribution function. The shape of this argument should be :code:`(...,", "shape. Returns ------- reshaped_rv : ``self`` with the new dimensions", "and ``logcdf``, ``pmf`` and ``logpmf`` (in :class:`DiscreteRandomVariable`), ``pdf`` and ``logpdf``", "@property def dtype(self) -> np.dtype: \"\"\"Data type of (elements of)", "is not None: return RandomVariable._ensure_numpy_float( \"logcdf\", self.__logcdf(self._as_value_type(x)) ) elif self.__cdf", "np.promote_types(value_dtype, np.float_) def _as_value_type(self, x: Any) -> _ValueType: if self.__as_value_type", "------- median : float The median of the distribution. \"\"\"", "Optional[Callable[[], _ValueType]] = None, entropy: Optional[Callable[[], np.float_]] = None, as_value_type:", "by the mathematical definition of a moment as a sum", "Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import sub", "array-like Evaluation points of the probability density / mass function.", "._arithmetic import pow_ return pow_(other, self) @staticmethod def infer_median_dtype(value_dtype: DTypeArgType)", "# Probability distribution of the random variable self.__parameters = parameters.copy()", "var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type, ) def __abs__(self) -> \"RandomVariable\":", "mode.setflags(write=False) return mode @cached_property def median(self) -> _ValueType: \"\"\" Median", "force_scalar=True ) return entropy def in_support(self, x: _ValueType) -> bool:", "Make immutable if isinstance(std, np.ndarray): std.setflags(write=False) return std @cached_property def", "convention in mathematics, we express pdfs with respect to the", "the `cdf` of the random variable object \" f\"with type", "def __rmul__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "not have the correct \" f\"shape. Expected {shape} but got", "potentially by similar functions in subclasses. For instance, this method", "self.entropy, as_value_type=self.__as_value_type, ) def transpose(self, *axes: int) -> \"RandomVariable\": \"\"\"", "f\"object with type `{type(self).__name__}` is implemented.\" ) def logpdf(self, x:", "= _utils.as_numpy_scalar(p, dtype=np.floating) except TypeError as exc: raise TypeError( \"The", "of type `{type(x)}`.\" ) return in_support def sample(self, size: ShapeArgType", "return cdf else: raise NotImplementedError( f\"Neither the `cdf` nor the", "quantile function always returns values of the same dtype as", "is not None: pdf = np.exp(self.__logpdf(self._as_value_type(x))) assert isinstance(pdf, np.float_) return", "it. If an int, use a new RandomState instance seeded", "Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mod", "str, value: Any, shape: Optional[Tuple[int, ...]] = None, dtype: Optional[np.dtype]", "Data Types self.__dtype = np.dtype(dtype) self.__median_dtype = RandomVariable.infer_median_dtype(self.__dtype) self.__moment_dtype =", "size: self.sample(size)[key], mode=lambda: self.mode[key], mean=lambda: self.mean[key], var=lambda: self.var[key], std=lambda: self.std[key],", ") def pdf(self, x: _ValueType) -> np.float_: \"\"\" Probability density", "variable. dtype : Data type of realizations of this random", "self.__cdf = cdf self.__logcdf = logcdf self.__quantile = quantile #", "return value class DiscreteRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape: ShapeArgType, dtype:", "__init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state: Optional[RandomStateType] = None,", "entropy: Optional[Callable[[], np.float_]] = None, as_value_type: Optional[Callable[[Any], _ValueType]] = None,", "of this argument should be :code:`(..., S1, ..., SN)`, where", "self.in_support(+x), mode=lambda: +self.mode, median=lambda: +self.median, mean=lambda: +self.mean, cov=lambda: self.cov, var=lambda:", "over all additional dimensions. Returns ------- q : array-like Value", "random variable. From the definition it follows that the quantile", "np.random state is used. If integer, it is used to", "respect to the Lebesgue measure unless stated otherwise. Parameters ----------", "variable on :math:`\\\\{ 1, 2, 3, 4 \\\\}` will have", "Optional[Callable[[], _ValueType]] = None, cov: Optional[Callable[[], _ValueType]] = None, var:", "be integers. This means that, in general, :math:`Q(0.5)` is not", ") def transpose(self, *axes: int) -> \"RandomVariable\": \"\"\" Transpose the", "ints, or n ints See documentation of numpy.ndarray.transpose. Returns -------", "prevents numpy from calling elementwise arithmetic operations allowing expressions like:", ") if dtype is not None: if not np.issubdtype(value.dtype, dtype):", "-> np.float_: \"\"\" Probability density or mass function. Following the", "already a RandomState instance, use it. If an int, use", "else: var = self.__var() RandomVariable._check_property_value( \"variance\", var, shape=self.__shape, dtype=self.__moment_dtype, )", "Returns ------- cov : array-like The kernels of the random", ":class:`np.float_`. This is motivated by the mathematical definition of a", "return entropy def in_support(self, x: _ValueType) -> bool: if self.__in_support", "= parameters.copy() if parameters is not None else {} self.__sample", "._arithmetic import add return add(other, self) def __sub__(self, other: Any)", "`pmf` of the discrete random variable \" f\"object with type", "None, in_support: Optional[Callable[[_ValueType], bool]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] =", "\\\\colon p \\\\le F_X(x) \\\\}`, where :math:`F_X \\\\colon \\\\mathbb{R} \\\\to", "of the cumulative distribution function. The shape of this argument", "isinstance(value, (np.float_, np.ndarray)) return value class DiscreteRandomVariable(RandomVariable[_ValueType]): def __init__( self,", "RandomVariable.infer_median_dtype(self.__dtype) self.__moment_dtype = RandomVariable.infer_moment_dtype(self.__dtype) self._random_state = _utils.as_random_state(random_state) # Probability distribution", "the underlying distribution. This can be either None or an", "import ( ArrayLikeGetitemArgType, DTypeArgType, FloatArgType, RandomStateArgType, RandomStateType, ShapeArgType, ShapeType, )", "import floordiv return floordiv(self, other) def __rfloordiv__(self, other: Any) ->", "# functools.cached_property is only available in Python >=3.8 from functools", "Returns ------- var : array-like The variance of the distribution.", "\"\"\" Mode of the random variable. Returns ------- mode :", "random variable. Parameters ---------- axes : None, tuple of ints,", "RandomStateArgType, RandomStateType, ShapeArgType, ShapeType, ) try: # functools.cached_property is only", "var: Optional[Callable[[], _ValueType]] = None, std: Optional[Callable[[], _ValueType]] = None,", "random variable. This attribute defines the RandomState object to use", "{self.__shape}, but it returned a value with \" f\"{quantile.shape}.\" )", "the given points. \"\"\" if self.__logpdf is not None: return", "exc: raise NotImplementedError from exc else: var = self.__var() RandomVariable._check_property_value(", "by RandomVariable instead of elementwise. Thus no array of RandomVariables", "the random variable. Parameters ---------- axes : None, tuple of", "shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: +self.sample(size=size), in_support=lambda x: self.in_support(+x), mode=lambda:", "var : array-like The variance of the distribution. \"\"\" if", "mean=lambda: -self.mean, cov=lambda: self.cov, var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type, )", "objects used by probabilistic numerical methods. Every probabilistic numerical method", "\"mode\", mode, shape=self.__shape, dtype=self.__dtype, ) # Make immutable if isinstance(mode,", "Probability density function self.__pdf = pdf self.__logpdf = logpdf super().__init__(", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import floordiv return floordiv(other, self) def __mod__(self,", ") T = property(transpose) # Unary arithmetic operations def __neg__(self)", "of the random variable object \" f\"with type `{type(self).__name__}` is", "Gaussian measure. Instances of :class:`RandomVariable` can be added, multiplied, etc.", "it is used to seed the local :class:`~numpy.random.RandomState` instance. \"\"\"", "------- reshaped_rv : ``self`` with the new dimensions of ``shape``.", "None: logpdf = np.log(self.__pdf(self._as_value_type(x))) assert isinstance(logpdf, np.float_) return logpdf else:", "return x @staticmethod def _check_property_value( name: str, value: Any, shape:", "_ValueType]] = None, median: Optional[Callable[[], _ValueType]] = None, mean: Optional[Callable[[],", "multiplication of values with dtypes :attr:`dtype` and :class:`np.float_`. This is", "or n ints See documentation of numpy.ndarray.transpose. Returns ------- transposed_rv", "NotImplementedError as exc: raise NotImplementedError from exc else: var =", "this method is useful if (``log``)``cdf`` and (``log``)``pdf`` both only", "will also be integers. This means that, in general, :math:`Q(0.5)`", "return a value that can be converted \" f\"to a", "arithmetic operations __array_ufunc__ = None \"\"\" This prevents numpy from", "in_support: Optional[Callable[[_ValueType], bool]] = None, pmf: Optional[Callable[[_ValueType], np.float_]] = None,", "array-like The kernels of the random variable. \"\"\" # pylint:", "is the :attr:`shape` of the random variable. The cdf evaluation", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import truediv return truediv(other, self) def __floordiv__(self,", "RandomVariable( shape=np.empty(shape=self.shape)[key].shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size)[key], mode=lambda: self.mode[key], mean=lambda:", "def __abs__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda", "\"\"\" Mean :math:`\\\\mathbb{E}(X)` of the distribution. To learn about the", "\" f\"random variable, i.e. `{self.__dtype.name}`, but it returned a value", "__repr__(self) -> str: return f\"<{self.shape} {self.__class__.__name__} with dtype={self.dtype}>\" @property def", "used. If integer, it is used to seed the local", "variable. Parameters ---------- newshape : int or tuple of ints", "defined for scalar random variables.\" ) if self.__quantile is None:", "median(self) -> _ValueType: \"\"\" Median of the random variable. To", "constructor of \" f\"`{cls.__name__}` must return a scalar value, but", "The parameters of the distribution such as mean, variance, et", "\"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import truediv return truediv(self,", "Probability density or mass function. Following the predominant convention in", "np.exp(self.__logpdf(self._as_value_type(x))) assert isinstance(pdf, np.float_) return pdf raise NotImplementedError( f\"Neither the", "shape is not None: if value.shape != shape: raise ValueError(", "should be kept in mind when subclassing :class:`RandomVariable` or any", "\"\"\"Quantile function. The quantile function :math:`Q \\\\colon [0, 1] \\\\to", "used by probabilistic numerical methods. Every probabilistic numerical method takes", "For example, a uniform random variable on :math:`\\\\{ 1, 2,", "-> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import pow_ return", "np.diag(self.cov).reshape(self.__shape).copy() except NotImplementedError as exc: raise NotImplementedError from exc else:", "mode=mode, median=median, mean=mean, cov=cov, var=var, std=std, entropy=entropy, ) def pdf(self,", "n ints See documentation of numpy.ndarray.transpose. Returns ------- transposed_rv :", ":class:`DiscreteRandomVariable`), ``pdf`` and ``logpdf`` (in :class:`ContinuousRandomVariable`), and potentially by similar", "Function which can be used to transform user-supplied arguments, interpreted", "= None, logpmf: Optional[Callable[[_ValueType], np.float_]] = None, cdf: Optional[Callable[[_ValueType], np.float_]]", "= None \"\"\" This prevents numpy from calling elementwise arithmetic", "Expected {dtype.name} but got {value.dtype.name}.\" ) @classmethod def _ensure_numpy_float( cls,", "disable=too-many-arguments,too-many-locals \"\"\"Create a new random variable.\"\"\" self.__shape = _utils.as_shape(shape) #", "= median self.__mean = mean self.__cov = cov self.__var =", "got {value.dtype.name}.\" ) @classmethod def _ensure_numpy_float( cls, name: str, value:", "Any]] = None, sample: Optional[Callable[[ShapeType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType],", "logcdf=logcdf, quantile=quantile, mode=mode, median=median, mean=mean, cov=cov, var=var, std=std, entropy=entropy, )", "all additional dimensions. Returns ------- p : array-like Value of", "!= (): raise NotImplementedError( \"The quantile function is only defined", "-> int: return int(np.prod(self.__shape)) @property def dtype(self) -> np.dtype: \"\"\"Data", "have Dirac or Gaussian measure. Instances of :class:`RandomVariable` can be", "random variable. The cdf evaluation will be broadcast over all", "\" f\"to a `np.ndarray` of type `np.float_`, which is not", "isinstance(logpmf, np.float_) return logpmf else: raise NotImplementedError( f\"Neither the `logpmf`", "None: pdf = np.exp(self.__logpdf(self._as_value_type(x))) assert isinstance(pdf, np.float_) return pdf raise", "definition it follows that the quantile function always returns values", "_ValueType]] = None, std: Optional[Callable[[], _ValueType]] = None, entropy: Optional[Callable[[],", "learn about the dtype of the variance, see :attr:`moment_dtype`. Returns", "immutable if isinstance(mean, np.ndarray): mean.setflags(write=False) return mean @cached_property def cov(self)", "the multiplication of values with dtypes :attr:`dtype` and :class:`np.float_`. This", "_ValueType]] = None, entropy: Optional[Callable[[], np.float_]] = None, ): #", "instance, use it. If an int, use a new RandomState", "with respect to the Lebesgue measure unless stated otherwise. Parameters", "def __init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state: Optional[RandomStateType] =", "self.__median_dtype @property def moment_dtype(self) -> np.dtype: \"\"\"The dtype of any", "return truediv(self, other) def __rtruediv__(self, other: Any) -> \"RandomVariable\": #", "dimensions. Returns ------- q : array-like Value of the cumulative", "existing RandomState object. If None (or np.random), use the RandomState", "+self.mode, median=lambda: +self.median, mean=lambda: +self.mean, cov=lambda: self.cov, var=lambda: self.var, std=lambda:", "RandomVariable.infer_moment_dtype(self.__dtype) self._random_state = _utils.as_random_state(random_state) # Probability distribution of the random", "self.__shape @cached_property def ndim(self) -> int: return len(self.__shape) @cached_property def", "np.dtype: \"\"\"The dtype of any (function of a) moment of", "of a) moment of the random variable, e.g. its :attr:`mean`,", "the log-probability density / mass function at the given points.", "Mode of the random variable. Returns ------- mode : float", "in_support = self.__in_support(self._as_value_type(x)) if not isinstance(in_support, bool): raise ValueError( f\"The", "ValueError( f\"The quantile function should return values of the same", "Optional[RandomStateType] = None, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeArgType],", "as_value_type : Function which can be used to transform user-supplied", "isinstance(median, np.ndarray): median.setflags(write=False) return median @cached_property def mean(self) -> _ValueType:", "shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(std, np.ndarray): std.setflags(write=False)", "self.__pdf = pdf self.__logpdf = logpdf super().__init__( shape=shape, dtype=dtype, random_state=random_state,", "mul(other, self) def __matmul__(self, other: Any) -> \"RandomVariable\": # pylint:", "return ContinuousRandomVariable._ensure_numpy_float( \"pdf\", self.__pdf(self._as_value_type(x)) ) if self.__logpdf is not None:", "type of realizations of this random variable. If ``object`` will", "Value of the log-cumulative density function at the given points.", "\\\\}` will have a median of :math:`2.5`. \"\"\" return self.__median_dtype", "elif self.__pmf is not None: logpmf = np.log(self.__pmf(self._as_value_type(x))) assert isinstance(logpmf,", "a `bool`, but its return value \" f\"is of type", "definition of a moment as a sum or an integral", "behavior. In particular, this should be kept in mind when", "will be converted to ``numpy.dtype``. as_value_type : Function which can", "This is motivated by the fact that, even for discrete", "can be converted \" f\"to a `np.ndarray` of type `np.float_`,", "_ValueType: \"\"\" Mean :math:`\\\\mathbb{E}(X)` of the distribution. To learn about", "seed: RandomStateArgType): \"\"\"Get or set the RandomState object of the", ") def logpdf(self, x: _ValueType) -> np.float_: \"\"\" Natural logarithm", "self.__mean is None: raise NotImplementedError mean = self.__mean() RandomVariable._check_property_value( \"mean\",", "dtype: DTypeArgType, random_state: Optional[RandomStateType] = None, parameters: Optional[Dict[str, Any]] =", "kept in mind when subclassing :class:`RandomVariable` or any of its", "of \" f\"`{cls.__name__}` must return a scalar value, but {value}", "Any, force_scalar: bool = False ) -> Union[np.float_, np.ndarray]: if", "for discrete random variables, e.g. integer-valued random variables, the :attr:`median`", "None, cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf: Optional[Callable[[_ValueType], np.float_]] =", "mean.setflags(write=False) return mean @cached_property def cov(self) -> _ValueType: \"\"\" Covariance", "if self.__cdf is not None: return RandomVariable._ensure_numpy_float( \"cdf\", self.__cdf(self._as_value_type(x)) )", "Returns ------- p : array-like Value of the probability density", "\"\"\" if self.__var is None: try: var = np.diag(self.cov).reshape(self.__shape).copy() except", "function. Parameters ---------- x : array-like Evaluation points of the", "Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import add", "mass function at the given points. \"\"\" if self.__logpdf is", "def mode(self) -> _ValueType: \"\"\" Mode of the random variable.", "else: raise TypeError( f\"The function `{name}` specified via the constructor", "is only defined for scalar random variables.\" ) median =", "must return a scalar value that can be \" f\"converted", "Optional[Callable[[ShapeArgType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, pdf:", "about the dtype of the median, see :attr:`median_dtype`. Returns -------", "self.__sample is None: raise NotImplementedError(\"No sampling method provided.\") return self.__sample(size=_utils.as_shape(size))", ") elif self.__pmf is not None: logpmf = np.log(self.__pmf(self._as_value_type(x))) assert", "of the log-probability density / mass function at the given", "median, shape=self.__shape, dtype=self.__median_dtype, ) # Make immutable if isinstance(median, np.ndarray):", "var=lambda: self.var.reshape(newshape), std=lambda: self.std.reshape(newshape), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def transpose(self,", "-> int: return len(self.__shape) @cached_property def size(self) -> int: return", "try: value = np.asarray(value, dtype=np.float_) except TypeError as err: raise", "not np.issubdtype(value.dtype, dtype): raise ValueError( f\"The {name} of the random", "var = self.__var() RandomVariable._check_property_value( \"variance\", var, shape=self.__shape, dtype=self.__moment_dtype, ) #", "type `np.float_`, which is not possible \" f\"for {value} of", "def sample(self, size: ShapeArgType = ()) -> _ValueType: \"\"\" Draw", "__floordiv__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "self.__as_value_type(x) return x @staticmethod def _check_property_value( name: str, value: Any,", "means that, in general, :math:`Q(0.5)` is not equal to the", "the predominant convention in mathematics, we express pdfs with respect", "import divmod_ return divmod_(self, other) def __rdivmod__(self, other: Any) ->", "a random variable :math:`X` is defined as :math:`Q(p) = \\\\inf\\\\{", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import pow_ return pow_(self, other)", "return sub(self, other) def __rsub__(self, other: Any) -> \"RandomVariable\": #", "arising from finite computation. The generic signature of a probabilistic", "expressions like: y = np.array([1, 1]) + RV to call", "DiscreteRandomVariable._ensure_numpy_float( \"logpmf\", self.__logpmf(self._as_value_type(x)) ) elif self.__pmf is not None: logpmf", "be broadcast over all additional dimensions. Returns ------- q :", "-> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: -self.sample(size=size),", "median: Optional[Callable[[], _ValueType]] = None, mean: Optional[Callable[[], _ValueType]] = None,", "be broadcast over all additional dimensions. Returns ------- logp :", "# Make immutable if isinstance(cov, np.ndarray): cov.setflags(write=False) return cov @cached_property", "dtype arising from the multiplication of values with dtypes :attr:`dtype`", "signature of a probabilistic numerical method is: ``output_rv = probnum_method(input_rv,", "must return a scalar value, but {value} of type \"", "( ArrayLikeGetitemArgType, DTypeArgType, FloatArgType, RandomStateArgType, RandomStateType, ShapeArgType, ShapeType, ) try:", "a sum or an integral over products of probabilities and", "cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf: Optional[Callable[[_ValueType], np.float_]] = None,", "self, shape: ShapeArgType, dtype: DTypeArgType, random_state: Optional[RandomStateType] = None, parameters:", "= logpmf super().__init__( shape=shape, dtype=dtype, random_state=random_state, parameters=parameters, sample=sample, in_support=in_support, cdf=cdf,", "RandomVariables but a RandomVariable with the correct shape is returned.", "_ValueType) -> np.float_: \"\"\" Probability density or mass function. Following", "mode=lambda: self.mode.transpose(*axes), median=lambda: self.median.transpose(*axes), mean=lambda: self.mean.transpose(*axes), cov=lambda: self.cov, var=lambda: self.var.transpose(*axes),", "_ValueType) -> np.float_: if self.__logpmf is not None: return DiscreteRandomVariable._ensure_numpy_float(", "Optional[Callable[[], _ValueType]] = None, std: Optional[Callable[[], _ValueType]] = None, entropy:", "this random variable. If None (or np.random), the global np.random", "for scalar random variables.\" ) median = self.__median() RandomVariable._check_property_value( \"median\",", ":attr:`shape` of the random variable. The logpdf evaluation will be", "a random variable whose distribution encodes the uncertainty arising from", "\"\"\"The dtype of any (function of a) moment of the", "RandomVariable( shape=newshape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).reshape(size + newshape), mode=lambda:", "._arithmetic import add return add(self, other) def __radd__(self, other: Any)", "raise NotImplementedError cov = self.__cov() RandomVariable._check_property_value( \"covariance\", cov, shape=(self.size, self.size)", "parameters.copy() if parameters is not None else {} self.__sample =", "ArrayLikeGetitemArgType, DTypeArgType, FloatArgType, RandomStateArgType, RandomStateType, ShapeArgType, ShapeType, ) try: #", "of the same shape as the \" f\"random variable, i.e.", "__rmod__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "cov: Optional[Callable[[], _ValueType]] = None, var: Optional[Callable[[], _ValueType]] = None,", "correct shape is returned. \"\"\" def __add__(self, other: Any) ->", "self) def __mul__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "None (or np.random), the global np.random state is used. If", "else: raise NotImplementedError( f\"Neither the `cdf` nor the `logcdf` of", "same dtype as the random variable. For instance, for a", ":attr:`mean`, :attr:`cov`, :attr:`var`, or :attr:`std`. It will be set to", "This is due to the caches used to make certain", "use for drawing realizations from this random variable. If None", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import mod return mod(other, self) def __divmod__(self,", "= None, logpdf: Optional[Callable[[_ValueType], np.float_]] = None, cdf: Optional[Callable[[_ValueType], np.float_]]", "None or an existing RandomState object. If None (or np.random),", "self.__pmf = pmf self.__logpmf = logpmf super().__init__( shape=shape, dtype=dtype, random_state=random_state,", "function is only defined for scalar random variables.\" ) if", "the mean, see :attr:`moment_dtype`. Returns ------- mean : array-like The", "sample=lambda size: -self.sample(size=size), in_support=lambda x: self.in_support(-x), mode=lambda: -self.mode, median=lambda: -self.median,", "entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def transpose(self, *axes: int) -> \"RandomVariable\":", "``pmf`` and ``logpmf`` (in :class:`DiscreteRandomVariable`), ``pdf`` and ``logpdf`` (in :class:`ContinuousRandomVariable`),", "None, sample: Optional[Callable[[ShapeType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] =", "be set to the dtype arising from the multiplication of", "their whole lifecycle. This is due to the caches used", "for drawing realizations from this random variable. If None (or", "NotImplementedError mode = self.__mode() RandomVariable._check_property_value( \"mode\", mode, shape=self.__shape, dtype=self.__dtype, )", "array-like Value of the log-probability density / mass function at", "self.__mode is None: raise NotImplementedError mode = self.__mode() RandomVariable._check_property_value( \"mode\",", "import truediv return truediv(self, other) def __rtruediv__(self, other: Any) ->", "discrete random variables, e.g. integer-valued random variables, the :attr:`median` might", "self.__moment_dtype = RandomVariable.infer_moment_dtype(self.__dtype) self._random_state = _utils.as_random_state(random_state) # Probability distribution of", "can not be cast to a `np.floating` object.\" ) from", "from ._arithmetic import truediv return truediv(self, other) def __rtruediv__(self, other:", "from ._arithmetic import sub return sub(other, self) def __mul__(self, other:", "variable does not have the correct \" f\"dtype. Expected {dtype.name}", "should return values of the same shape as the \"", "useful if (``log``)``cdf`` and (``log``)``pdf`` both only work on :class:`np.float_`", ">=3.8 from functools import cached_property except ImportError: from cached_property import", ":attr:`dtype`, respectively. \"\"\" return self.__moment_dtype @property def random_state(self) -> RandomStateType:", "of values with dtypes :attr:`dtype` and :class:`np.float_`. This is motivated", "= None, mean: Optional[Callable[[], _ValueType]] = None, cov: Optional[Callable[[], _ValueType]]", "= _utils.as_shape(shape) # Data Types self.__dtype = np.dtype(dtype) self.__median_dtype =", "np.float_]] = None, logpmf: Optional[Callable[[_ValueType], np.float_]] = None, cdf: Optional[Callable[[_ValueType],", "._arithmetic import truediv return truediv(other, self) def __floordiv__(self, other: Any)", "not None: return RandomVariable._ensure_numpy_float( \"cdf\", self.__cdf(self._as_value_type(x)) ) elif self.__logcdf is", "as_value_type=self.__as_value_type, ) def transpose(self, *axes: int) -> \"RandomVariable\": \"\"\" Transpose", "is only available in Python >=3.8 from functools import cached_property", "_ValueType]] = None, ): # pylint: disable=too-many-arguments,too-many-locals \"\"\"Create a new", ") elif self.__cdf is not None: logcdf = np.log(self.__cdf(x)) assert", "i.e. `{self.__dtype.name}`, but it returned a value \" f\"with dtype", "median=median, mean=mean, cov=cov, var=var, std=std, entropy=entropy, ) def pdf(self, x:", "local :class:`~numpy.random.RandomState` instance. \"\"\" return self._random_state @random_state.setter def random_state(self, seed:", "the :attr:`median` might lie in between two values in which", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import pow_ return pow_(self, other) def", "cov=lambda: self.cov, var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type, ) def __abs__(self)", "the prior distribution as input and outputs a random variable", "parameters is not None else {} self.__sample = sample self.__in_support", "self.sample(size).transpose(*axes), mode=lambda: self.mode.transpose(*axes), median=lambda: self.median.transpose(*axes), mean=lambda: self.mean.transpose(*axes), cov=lambda: self.cov, var=lambda:", "None, in_support: Optional[Callable[[_ValueType], bool]] = None, pmf: Optional[Callable[[_ValueType], np.float_]] =", "if self.__pmf is not None: return DiscreteRandomVariable._ensure_numpy_float(\"pmf\", self.__pmf(x)) elif self.__logpmf", "------- mean : array-like The mean of the distribution. \"\"\"", "broadcast over all additional dimensions. Returns ------- q : array-like", "defines the RandomState object to use for drawing realizations from", "\"\"\" return self.__parameters.copy() @cached_property def mode(self) -> _ValueType: \"\"\" Mode", "return DiscreteRandomVariable._ensure_numpy_float( \"logpmf\", self.__logpmf(self._as_value_type(x)) ) elif self.__pmf is not None:", "function. The quantile function :math:`Q \\\\colon [0, 1] \\\\to \\\\mathbb{R}`", "variable. This attribute defines the RandomState object to use for", "realizations of this random variable, to an easy-to-process, normalized format.", "\" f\"is of type `{type(x)}`.\" ) return in_support def sample(self,", "``logcdf``, ``pmf`` and ``logpmf`` (in :class:`DiscreteRandomVariable`), ``pdf`` and ``logpdf`` (in", "property(transpose) # Unary arithmetic operations def __neg__(self) -> \"RandomVariable\": return", "Optional[Callable[[], np.float_]] = None, ): # Probability density function self.__pdf", "try: # functools.cached_property is only available in Python >=3.8 from", "whole lifecycle. This is due to the caches used to", "realizations. Returns ------- sample : array-like Sample of realizations with", "!= self.__dtype: raise ValueError( f\"The quantile function should return values", "__mod__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "and ``logpdf`` (in :class:`ContinuousRandomVariable`), and potentially by similar functions in", ":attr:`var`, or :attr:`std`. It will be set to the dtype", "the dtype of the variance, see :attr:`moment_dtype`. Returns ------- var", "# Unary arithmetic operations def __neg__(self) -> \"RandomVariable\": return RandomVariable(", "None: raise NotImplementedError in_support = self.__in_support(self._as_value_type(x)) if not isinstance(in_support, bool):", "from ._arithmetic import mul return mul(self, other) def __rmul__(self, other:", "_ensure_numpy_float( cls, name: str, value: Any, force_scalar: bool = False", "self.__pmf is not None: return DiscreteRandomVariable._ensure_numpy_float(\"pmf\", self.__pmf(x)) elif self.__logpmf is", "dtype as the \" f\"random variable, i.e. `{self.__dtype.name}`, but it", "a value with \" f\"{quantile.shape}.\" ) if quantile.dtype != self.__dtype:", ": int or tuple of ints New shape for the", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import add return add(self, other) def", "f\"random variable, i.e. `{self.__dtype.name}`, but it returned a value \"", "variable, e.g. its :attr:`mean`, :attr:`cov`, :attr:`var`, or :attr:`std`. It will", "with type `{type(self).__name__}` is implemented.\" ) def logpmf(self, x: _ValueType)", "distribution encodes the uncertainty arising from finite computation. The generic", "_ValueType) -> np.float_: \"\"\" Log-cumulative distribution function. Parameters ---------- x", "from probnum.type import ( ArrayLikeGetitemArgType, DTypeArgType, FloatArgType, RandomStateArgType, RandomStateType, ShapeArgType,", "std self.__entropy = entropy # Utilities self.__as_value_type = as_value_type def", "method takes a random variable encoding the prior distribution as", "self.__pmf(x)) elif self.__logpmf is not None: pmf = np.exp(self.__logpmf(x)) assert", "available in Python >=3.8 from functools import cached_property except ImportError:", "f\"The function `in_support` must return a `bool`, but its return", "The transposed random variable. \"\"\" return RandomVariable( shape=np.empty(shape=self.shape).transpose(*axes).shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state),", "of the random variable. Returns ------- mode : float The", "variable, i.e. `{self.__dtype.name}`, but it returned a value \" f\"with", "undefined behavior. In particular, this should be kept in mind", "documentation of numpy.ndarray.transpose. Returns ------- transposed_rv : The transposed random", "log-probability density/mass function. The shape of this argument should be", "_utils.as_random_state(random_state) # Probability distribution of the random variable self.__parameters =", "given argument `p` can not be cast to a `np.floating`", "Optional[Callable[[ShapeArgType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, pmf:", "NotImplementedError(\"No sampling method provided.\") return self.__sample(size=_utils.as_shape(size)) def cdf(self, x: _ValueType)", "logpmf: Optional[Callable[[_ValueType], np.float_]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None,", "var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type, ) def __pos__(self) -> \"RandomVariable\":", "random variable self.__parameters = parameters.copy() if parameters is not None", "cov self.__var = var self.__std = std self.__entropy = entropy", "f\"to a `np.ndarray` of type `np.float_`, which is not possible", "where :code:`(S1, ..., SN)` is the :attr:`shape` of the random", "seeded with seed. \"\"\" self._random_state = _utils.as_random_state(seed) @property def parameters(self)", "ContinuousRandomVariable._ensure_numpy_float( \"logpdf\", self.__logpdf(self._as_value_type(x)) ) elif self.__pdf is not None: logpdf", "class RandomVariable(Generic[_ValueType]): \"\"\" Random variables are the main objects used", "std=lambda: self.std.reshape(newshape), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def transpose(self, *axes: int)", "-> _ValueType: \"\"\" Draw realizations from a random variable. Parameters", "return mean @cached_property def cov(self) -> _ValueType: \"\"\" Covariance :math:`\\\\operatorname{Cov}(X)", "the random variable. To learn about the dtype of the", "see :attr:`moment_dtype`. Returns ------- cov : array-like The kernels of", "`np.floating` object.\" ) from exc quantile = self.__quantile(p) if quantile.shape", "the RandomState singleton used by np.random. If already a RandomState", "raise NotImplementedError( f\"Neither the `logpdf` nor the `pdf` of the", "in_support def sample(self, size: ShapeArgType = ()) -> _ValueType: \"\"\"", "its :attr:`mean`, :attr:`cov`, :attr:`var`, or :attr:`std`. It will be set", "if quantile.shape != self.__shape: raise ValueError( f\"The quantile function should", "= False ) -> Union[np.float_, np.ndarray]: if np.isscalar(value): if not", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import sub return sub(other, self) def", "= \\\\mathbb{E}((X-\\\\mathbb{E}(X))(X-\\\\mathbb{E}(X))^\\\\top)` of the random variable. To learn about the", "implemented.\" ) def logcdf(self, x: _ValueType) -> np.float_: \"\"\" Log-cumulative", "motivated by the mathematical definition of a moment as a", "like ``in_support``, ``cdf`` and ``logcdf``, ``pmf`` and ``logpmf`` (in :class:`DiscreteRandomVariable`),", "= self.__entropy() entropy = RandomVariable._ensure_numpy_float( \"entropy\", entropy, force_scalar=True ) return", "transpose(self, *axes: int) -> \"RandomVariable\": \"\"\" Transpose the random variable.", "this class. See https://en.wikipedia.org/wiki/Quantile_function for more details and examples. \"\"\"", "pmf else: raise NotImplementedError( f\"Neither the `pmf` nor the `logpmf`", ") def pmf(self, x: _ValueType) -> np.float_: if self.__pmf is", "arrays and linear operators. This may change their ``distribution`` and", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import floordiv return floordiv(other, self)", "Parameters of the probability distribution. The parameters of the distribution", "of the :attr:`median`. It will be set to the dtype", "_ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, pdf: Optional[Callable[[_ValueType],", "sample self.__in_support = in_support self.__cdf = cdf self.__logcdf = logcdf", "as exc: raise NotImplementedError from exc else: var = self.__var()", "x: _ValueType) -> np.float_: if self.__pmf is not None: return", "variable \" f\"object with type `{type(self).__name__}` is implemented.\" ) def", "must be compatible with the original shape. Returns ------- reshaped_rv", "self) def __matmul__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "mean(self) -> _ValueType: \"\"\" Mean :math:`\\\\mathbb{E}(X)` of the distribution. To", "@property def moment_dtype(self) -> np.dtype: \"\"\"The dtype of any (function", "or any of its descendants. Parameters ---------- shape : Shape", "f\"for {value} of type {type(value)}.\" ) from err else: raise", "{shape} but got {value.shape}.\" ) if dtype is not None:", "the constructor of \" f\"`{cls.__name__}` must return a value that", "a new random variable.\"\"\" self.__shape = _utils.as_shape(shape) # Data Types", "sample=lambda size: self.sample(size).transpose(*axes), mode=lambda: self.mode.transpose(*axes), median=lambda: self.median.transpose(*axes), mean=lambda: self.mean.transpose(*axes), cov=lambda:", "def __rfloordiv__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "of the continuous random variable \" f\"object with type `{type(self).__name__}`", "np.ndarray): median.setflags(write=False) return median @cached_property def mean(self) -> _ValueType: \"\"\"", "return a scalar value that can be \" f\"converted to", "with seed. \"\"\" self._random_state = _utils.as_random_state(seed) @property def parameters(self) ->", "self.mean.reshape(newshape), cov=lambda: self.cov, var=lambda: self.var.reshape(newshape), std=lambda: self.std.reshape(newshape), entropy=lambda: self.entropy, as_value_type=self.__as_value_type,", "of numpy.ndarray.transpose. Returns ------- transposed_rv : The transposed random variable.", "dtype=self.__moment_dtype, ) # Make immutable if isinstance(cov, np.ndarray): cov.setflags(write=False) return", "except ImportError: from cached_property import cached_property _ValueType = TypeVar(\"ValueType\") class", "return var @cached_property def std(self) -> _ValueType: \"\"\" Standard deviation", "distribution of the random variable self.__parameters = parameters.copy() if parameters", "the dtype of the covariance, see :attr:`moment_dtype`. Returns ------- cov", "np.dtype: \"\"\"Data type of (elements of) a realization of this", "dtype of the :attr:`median`. It will be set to the", "Optional[Callable[[], np.float_]] = None, ): # Probability mass function self.__pmf", "= None, median: Optional[Callable[[], _ValueType]] = None, mean: Optional[Callable[[], _ValueType]]", "work on :class:`np.float_` arguments, but we still want the user", "between two values in which case these values are averaged.", "Generic, Optional, Tuple, TypeVar, Union import numpy as np from", "q : array-like Value of the cumulative density function at", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import pow_ return pow_(self, other) def __rpow__(self,", "False ) -> Union[np.float_, np.ndarray]: if np.isscalar(value): if not isinstance(value,", "broadcast over all additional dimensions. Returns ------- p : array-like", "If ``object`` will be converted to ``numpy.dtype``. as_value_type : Function", ":attr:`median` might lie in between two values in which case", "mean=mean, cov=cov, var=var, std=std, entropy=entropy, ) def pdf(self, x: _ValueType)", "deviation\", std, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(std,", "None else {} self.__sample = sample self.__in_support = in_support self.__cdf", "deviation, see :attr:`moment_dtype`. Returns ------- std : array-like The standard", "def __floordiv__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "._arithmetic import mod return mod(self, other) def __rmod__(self, other: Any)", "x: _ValueType) -> np.float_: \"\"\" Probability density or mass function.", "var(self) -> _ValueType: \"\"\" Variance :math:`\\\\operatorname{Var}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))^2)` of the", "dtype of the median, see :attr:`median_dtype`. Returns ------- median :", "super().__init__( shape=shape, dtype=dtype, random_state=random_state, parameters=parameters, sample=sample, in_support=in_support, cdf=cdf, logcdf=logcdf, quantile=quantile,", "self.__pdf(self._as_value_type(x)) ) if self.__logpdf is not None: pdf = np.exp(self.__logpdf(self._as_value_type(x)))", "def __divmod__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "be cast to a `np.floating` object.\" ) from exc quantile", "Value of the probability density / mass function at the", "retained. The internals of :class:`RandomVariable` objects are assumed to be", "operations def __neg__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state),", "integer-valued random variables, the :attr:`median` might lie in between two", "median=median, mean=mean, cov=cov, var=var, std=std, entropy=entropy, ) def pmf(self, x:", "of its descendants. Parameters ---------- shape : Shape of realizations", "-> np.dtype: \"\"\"Data type of (elements of) a realization of", "return values of the same dtype as the \" f\"random", "Binary arithmetic operations __array_ufunc__ = None \"\"\" This prevents numpy", "Make immutable if isinstance(var, np.ndarray): var.setflags(write=False) return var @cached_property def", "not isinstance(in_support, bool): raise ValueError( f\"The function `in_support` must return", "NotImplementedError( f\"Neither the `logpmf` nor the `pmf` of the discrete", "else (), dtype=self.__moment_dtype, ) # Make immutable if isinstance(cov, np.ndarray):", "variable. From the definition it follows that the quantile function", "entropy def in_support(self, x: _ValueType) -> bool: if self.__in_support is", "in ProbNum have Dirac or Gaussian measure. Instances of :class:`RandomVariable`", "{value} of type {type(value)}.\" ) from err else: raise TypeError(", "\"\"\" Draw realizations from a random variable. Parameters ---------- size", "self.__logcdf(self._as_value_type(x)) ) elif self.__cdf is not None: logcdf = np.log(self.__cdf(x))", "truediv return truediv(self, other) def __rtruediv__(self, other: Any) -> \"RandomVariable\":", "pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import pow_ return pow_(other, self) @staticmethod", "SN)` is the :attr:`shape` of the random variable. The pdf", "Cumulative distribution function. Parameters ---------- x : array-like Evaluation points", "def __truediv__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "variable :math:`X` is defined as :math:`Q(p) = \\\\inf\\\\{ x \\\\in", "as exc: raise NotImplementedError from exc else: std = self.__std()", "distribution. \"\"\" if self.__mean is None: raise NotImplementedError mean =", "variable. It must be compatible with the original shape. Returns", "only defined for scalar random variables.\" ) median = self.__median()", "return np.promote_types(value_dtype, np.float_) def _as_value_type(self, x: Any) -> _ValueType: if", "is the :attr:`shape` of the random variable. The pdf evaluation", "is not None: return ContinuousRandomVariable._ensure_numpy_float( \"pdf\", self.__pdf(self._as_value_type(x)) ) if self.__logpdf", "as the random variable. For instance, for a discrete distribution", "elementwise. Thus no array of RandomVariables but a RandomVariable with", "and linear operators. This may change their ``distribution`` and not", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import sub return sub(other, self) def __mul__(self,", "\"RandomVariable\": \"\"\" Give a new shape to a random variable.", "stored in a ``dict``. \"\"\" return self.__parameters.copy() @cached_property def mode(self)", "int or tuple of ints New shape for the random", "np.exp(self.__logpmf(x)) assert isinstance(pmf, np.float_) return pmf else: raise NotImplementedError( f\"Neither", "available methods are retained. The internals of :class:`RandomVariable` objects are", "of the random variable. The logcdf evaluation will be broadcast", "and :attr:`dtype`, respectively. \"\"\" return self.__moment_dtype @property def random_state(self) ->", "np.ndarray]: if np.isscalar(value): if not isinstance(value, np.float_): try: value =", "that can be converted \" f\"to a `np.ndarray` of type", "parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeArgType], _ValueType]] = None,", "logpmf else: raise NotImplementedError( f\"Neither the `logpmf` nor the `pmf`", "probability density / mass function. The shape of this argument", "the random variable. For instance, for a discrete distribution over", "self.entropy, as_value_type=self.__as_value_type, ) def reshape(self, newshape: ShapeArgType) -> \"RandomVariable\": \"\"\"", ":attr:`shape` of the random variable. The cdf evaluation will be", "._arithmetic import truediv return truediv(self, other) def __rtruediv__(self, other: Any)", "-> np.dtype: \"\"\"The dtype of the :attr:`median`. It will be", "but got {value.dtype.name}.\" ) @classmethod def _ensure_numpy_float( cls, name: str,", "S1, ..., SN)`, where :code:`(S1, ..., SN)` is the :attr:`shape`", "type {type(value)}.\" ) from err elif not force_scalar: try: value", "`{type(x)}`.\" ) return in_support def sample(self, size: ShapeArgType = ())", "cdf=cdf, logcdf=logcdf, quantile=quantile, mode=mode, median=median, mean=mean, cov=cov, var=var, std=std, entropy=entropy,", "additional dimensions. Returns ------- logp : array-like Value of the", "= property(transpose) # Unary arithmetic operations def __neg__(self) -> \"RandomVariable\":", "-> np.float_: if self.__logpmf is not None: return DiscreteRandomVariable._ensure_numpy_float( \"logpmf\",", "realizations of this random variable. dtype : Data type of", "self.var.reshape(newshape), std=lambda: self.std.reshape(newshape), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def transpose(self, *axes:", ") # Make immutable if isinstance(cov, np.ndarray): cov.setflags(write=False) return cov", "): if shape is not None: if value.shape != shape:", "be set to something like ``lambda x: np.float64(x)``. See Also", "the constructor of \" f\"`{cls.__name__}` must return a scalar value,", "np from probnum import utils as _utils from probnum.type import", "the distribution such as mean, variance, et cetera stored in", "variable. To learn about the dtype of the median, see", ") from err elif not force_scalar: try: value = np.asarray(value,", "= RandomVariable.infer_moment_dtype(self.__dtype) self._random_state = _utils.as_random_state(random_state) # Probability distribution of the", "is: ``output_rv = probnum_method(input_rv, method_params)`` In practice, most random variables", "nor the `logpdf` of the continuous random variable \" f\"object", "floordiv(other, self) def __mod__(self, other: Any) -> \"RandomVariable\": # pylint:", "previously available methods are retained. The internals of :class:`RandomVariable` objects", "a ``dict``. \"\"\" return self.__parameters.copy() @cached_property def mode(self) -> _ValueType:", "RandomState object. If None (or np.random), use the RandomState singleton", "from ._arithmetic import floordiv return floordiv(self, other) def __rfloordiv__(self, other:", "raise NotImplementedError( \"The median is only defined for scalar random", "np.float_]] = None, logcdf: Optional[Callable[[_ValueType], np.float_]] = None, quantile: Optional[Callable[[FloatArgType],", "add(other, self) def __sub__(self, other: Any) -> \"RandomVariable\": # pylint:", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import divmod_ return divmod_(self, other)", "from err else: raise TypeError( f\"The function `{name}` specified via", "measure. Instances of :class:`RandomVariable` can be added, multiplied, etc. with", "variance, et cetera stored in a ``dict``. \"\"\" return self.__parameters.copy()", "probabilistic numerical method takes a random variable encoding the prior", "= np.diag(self.cov).reshape(self.__shape).copy() except NotImplementedError as exc: raise NotImplementedError from exc", "random variable. Parameters ---------- size : tuple Size of the", "= entropy # Utilities self.__as_value_type = as_value_type def __repr__(self) ->", "mind when subclassing :class:`RandomVariable` or any of its descendants. Parameters", "the random variable object \" f\"with type `{type(self).__name__}` is implemented.\"", "isinstance(value, np.float_): try: value = _utils.as_numpy_scalar(value, dtype=np.float_) except TypeError as", "from ._arithmetic import add return add(self, other) def __radd__(self, other:", "None, logpmf: Optional[Callable[[_ValueType], np.float_]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] =", "standard deviation, see :attr:`moment_dtype`. Returns ------- std : array-like The", "caches used to make certain computations more efficient. As a", "of \" f\"`{cls.__name__}` must return a value that can be", "-> np.float_: if self.__pmf is not None: return DiscreteRandomVariable._ensure_numpy_float(\"pmf\", self.__pmf(x))", "self.__shape = _utils.as_shape(shape) # Data Types self.__dtype = np.dtype(dtype) self.__median_dtype", "function at the given points. \"\"\" if self.__cdf is not", "if self.__logcdf is not None: return RandomVariable._ensure_numpy_float( \"logcdf\", self.__logcdf(self._as_value_type(x)) )", "not have the correct \" f\"dtype. Expected {dtype.name} but got", "defined in this class. See https://en.wikipedia.org/wiki/Quantile_function for more details and", "np.float_: if self.__logpmf is not None: return DiscreteRandomVariable._ensure_numpy_float( \"logpmf\", self.__logpmf(self._as_value_type(x))", "x \\\\in \\\\mathbb{R} \\\\colon p \\\\le F_X(x) \\\\}`, where :math:`F_X", "is not None else {} self.__sample = sample self.__in_support =", "int(np.prod(self.__shape)) @property def dtype(self) -> np.dtype: \"\"\"Data type of (elements", "logpdf super().__init__( shape=shape, dtype=dtype, random_state=random_state, parameters=parameters, sample=sample, in_support=in_support, cdf=cdf, logcdf=logcdf,", "median = self.__median() RandomVariable._check_property_value( \"median\", median, shape=self.__shape, dtype=self.__median_dtype, ) #", "nor the `logpmf` of the discrete random variable \" f\"object", "sample=lambda size: abs(self.sample(size=size)), ) # Binary arithmetic operations __array_ufunc__ =", "RandomState object of the underlying distribution. This can be either", "from the multiplication of values with dtypes :attr:`dtype` and :class:`np.float_`.", "disable=import-outside-toplevel,cyclic-import from ._arithmetic import floordiv return floordiv(self, other) def __rfloordiv__(self,", "\" f\"converted to a `np.float_`, which is not possible for", "sum or an integral over products of probabilities and values", "implemented.\" ) class ContinuousRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape: ShapeArgType, dtype:", "_ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, pmf: Optional[Callable[[_ValueType],", "details and examples. \"\"\" if self.__shape != (): raise NotImplementedError(", "Optional[Callable[[_ValueType], bool]] = None, pdf: Optional[Callable[[_ValueType], np.float_]] = None, logpdf:", "Every probabilistic numerical method takes a random variable encoding the", "functions like ``in_support``, ``cdf`` and ``logcdf``, ``pmf`` and ``logpmf`` (in", "@property def random_state(self) -> RandomStateType: \"\"\"Random state of the random", "as input and outputs a random variable whose distribution encodes", "given points. \"\"\" if self.__logpdf is not None: return ContinuousRandomVariable._ensure_numpy_float(", "constructor of \" f\"`{cls.__name__}` must return a scalar value that", "shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: -self.sample(size=size), in_support=lambda x: self.in_support(-x), mode=lambda:", "that, even for discrete random variables, e.g. integer-valued random variables,", "as realizations of this random variable, to an easy-to-process, normalized", "variable object \" f\"with type `{type(self).__name__}` is implemented.\" ) def", "\"\"\" # pylint: disable=too-many-instance-attributes,too-many-public-methods def __init__( self, shape: ShapeArgType, dtype:", "-> _ValueType: \"\"\" Covariance :math:`\\\\operatorname{Cov}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))(X-\\\\mathbb{E}(X))^\\\\top)` of the random", "at the given points. \"\"\" if self.__cdf is not None:", "= _utils.as_random_state(random_state) # Probability distribution of the random variable self.__parameters", "from ._arithmetic import mul return mul(other, self) def __matmul__(self, other:", "newshape : int or tuple of ints New shape for", "self.__cov = cov self.__var = var self.__std = std self.__entropy", "self.median.transpose(*axes), mean=lambda: self.mean.transpose(*axes), cov=lambda: self.cov, var=lambda: self.var.transpose(*axes), std=lambda: self.std.transpose(*axes), entropy=lambda:", "def __pow__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", ") def quantile(self, p: FloatArgType) -> _ValueType: \"\"\"Quantile function. The", "Returns ------- reshaped_rv : ``self`` with the new dimensions of", "be broadcast over all additional dimensions. Returns ------- p :", "its descendants. Parameters ---------- shape : Shape of realizations of", "random variable :math:`X` is defined as :math:`Q(p) = \\\\inf\\\\{ x", "other) def __radd__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", "of ints New shape for the random variable. It must", "log-probability density / mass function at the given points. \"\"\"", "integral over products of probabilities and values of the random", ":class:`RandomVariable`. Examples -------- \"\"\" # pylint: disable=too-many-instance-attributes,too-many-public-methods def __init__( self,", "(elements of) a realization of this random variable.\"\"\" return self.__dtype", "f\"The quantile function should return values of the same shape", "from probnum import utils as _utils from probnum.type import (", "subclasses. For instance, this method is useful if (``log``)``cdf`` and", "def cdf(self, x: _ValueType) -> np.float_: \"\"\" Cumulative distribution function.", "(in :class:`DiscreteRandomVariable`), ``pdf`` and ``logpdf`` (in :class:`ContinuousRandomVariable`), and potentially by", "other) def __rtruediv__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import", ":code:`(..., S1, ..., SN)`, where :code:`(S1, ..., SN)` is the", "we express pdfs with respect to the Lebesgue measure unless", "dtype=dtype, random_state=random_state, parameters=parameters, sample=sample, in_support=in_support, cdf=cdf, logcdf=logcdf, quantile=quantile, mode=mode, median=median,", "the random variable. Returns ------- mode : float The mode", "self.__shape != (): raise NotImplementedError( \"The median is only defined", "random variables.\" ) median = self.__median() RandomVariable._check_property_value( \"median\", median, shape=self.__shape,", "of the distribution. \"\"\" if self.__std is None: try: std", "Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeArgType], _ValueType]] = None, in_support:", "type `{type(self).__name__}` is implemented.\" ) def logcdf(self, x: _ValueType) ->", "given points. \"\"\" if self.__cdf is not None: return RandomVariable._ensure_numpy_float(", "or Gaussian measure. Instances of :class:`RandomVariable` can be added, multiplied,", "not isinstance(value, np.float_): try: value = _utils.as_numpy_scalar(value, dtype=np.float_) except TypeError", "random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).transpose(*axes), mode=lambda: self.mode.transpose(*axes), median=lambda: self.median.transpose(*axes), mean=lambda: self.mean.transpose(*axes),", "= quantile # Properties of the random variable self.__mode =", "None: try: std = np.sqrt(self.var) except NotImplementedError as exc: raise", "probnum import utils as _utils from probnum.type import ( ArrayLikeGetitemArgType,", "= np.exp(self.logcdf(self._as_value_type(x))) assert isinstance(cdf, np.float_) return cdf else: raise NotImplementedError(", "all previously available methods are retained. The internals of :class:`RandomVariable`", "as the \" f\"random variable, i.e. `{self.__dtype.name}`, but it returned", "TypeError( \"The given argument `p` can not be cast to", "RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: abs(self.sample(size=size)), ) # Binary", "_ValueType]] = None, mode: Optional[Callable[[], _ValueType]] = None, median: Optional[Callable[[],", "certain computations more efficient. As a consequence, altering the internal", "None \"\"\" This prevents numpy from calling elementwise arithmetic operations", ": ``self`` with the new dimensions of ``shape``. \"\"\" newshape", "self.__mean = mean self.__cov = cov self.__var = var self.__std", "finite computation. The generic signature of a probabilistic numerical method", "the median, see :attr:`median_dtype`. Returns ------- median : float The", "from a random variable. Parameters ---------- size : tuple Size", "f\"`{cls.__name__}` must return a value that can be converted \"", "random_state: Optional[RandomStateType] = None, parameters: Optional[Dict[str, Any]] = None, sample:", "self.std.transpose(*axes), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) T = property(transpose) # Unary", "random variable object \" f\"with type `{type(self).__name__}` is implemented.\" )", "ContinuousRandomVariable._ensure_numpy_float( \"pdf\", self.__pdf(self._as_value_type(x)) ) if self.__logpdf is not None: pdf", "and ``logpmf`` (in :class:`DiscreteRandomVariable`), ``pdf`` and ``logpdf`` (in :class:`ContinuousRandomVariable`), and", "@cached_property def cov(self) -> _ValueType: \"\"\" Covariance :math:`\\\\operatorname{Cov}(X) = \\\\mathbb{E}((X-\\\\mathbb{E}(X))(X-\\\\mathbb{E}(X))^\\\\top)`", "matmul(self, other) def __rmatmul__(self, other: Any) -> \"RandomVariable\": # pylint:", "= logcdf self.__quantile = quantile # Properties of the random", "\\\\mathbb{E}((X-\\\\mathbb{E}(X))^2)` of the distribution. To learn about the dtype of", ":attr:`median`. It will be set to the dtype arising from", "if self.__logpdf is not None: return ContinuousRandomVariable._ensure_numpy_float( \"logpdf\", self.__logpdf(self._as_value_type(x)) )", "are averaged. For example, a uniform random variable on :math:`\\\\{", "Optional[Callable[[_ValueType], bool]] = None, pmf: Optional[Callable[[_ValueType], np.float_]] = None, logpmf:", "\"\"\"Get or set the RandomState object of the underlying distribution.", "\" f\"with type `{type(self).__name__}` is implemented.\" ) def quantile(self, p:", "operations allowing expressions like: y = np.array([1, 1]) + RV", "\"\"\" Give a new shape to a random variable. Parameters", "+ newshape), mode=lambda: self.mode.reshape(newshape), median=lambda: self.median.reshape(newshape), mean=lambda: self.mean.reshape(newshape), cov=lambda: self.cov,", "points. \"\"\" if self.__cdf is not None: return RandomVariable._ensure_numpy_float( \"cdf\",", "mean @cached_property def cov(self) -> _ValueType: \"\"\" Covariance :math:`\\\\operatorname{Cov}(X) =", "Make immutable if isinstance(cov, np.ndarray): cov.setflags(write=False) return cov @cached_property def", "sample=lambda size: self.sample(size)[key], mode=lambda: self.mode[key], mean=lambda: self.mean[key], var=lambda: self.var[key], std=lambda:", "x: _ValueType) -> np.float_: \"\"\" Cumulative distribution function. Parameters ----------", "# pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import pow_ return pow_(other, self)", "To learn about the dtype of the standard deviation, see", "matmul return matmul(self, other) def __rmatmul__(self, other: Any) -> \"RandomVariable\":", "def __pos__(self) -> \"RandomVariable\": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda", "): # pylint: disable=too-many-arguments,too-many-locals \"\"\"Create a new random variable.\"\"\" self.__shape", "= self.__mean() RandomVariable._check_property_value( \"mean\", mean, shape=self.__shape, dtype=self.__moment_dtype, ) # Make", "return a scalar value, but {value} of type \" f\"{type(value)}", "values of the same dtype as the \" f\"random variable,", "their ``distribution`` and not necessarily all previously available methods are", "import truediv return truediv(other, self) def __floordiv__(self, other: Any) ->", "return divmod_(self, other) def __rdivmod__(self, other: Any) -> \"RandomVariable\": #", "FloatArgType) -> _ValueType: \"\"\"Quantile function. The quantile function :math:`Q \\\\colon", "self.__entropy() entropy = RandomVariable._ensure_numpy_float( \"entropy\", entropy, force_scalar=True ) return entropy", "cdf self.__logcdf = logcdf self.__quantile = quantile # Properties of", "function `{name}` specified via the constructor of \" f\"`{cls.__name__}` must", "mathematical definition of a moment as a sum or an", "= std self.__entropy = entropy # Utilities self.__as_value_type = as_value_type", "self.__in_support(self._as_value_type(x)) if not isinstance(in_support, bool): raise ValueError( f\"The function `in_support`", "is not None: logcdf = np.log(self.__cdf(x)) assert isinstance(logcdf, np.float_) return", "(``log``)``cdf`` and (``log``)``pdf`` both only work on :class:`np.float_` arguments, but", "This prevents numpy from calling elementwise arithmetic operations allowing expressions", "is used to seed the local :class:`~numpy.random.RandomState` instance. \"\"\" return", "------- mode : float The mode of the random variable.", "raise NotImplementedError( f\"Neither the `logpmf` nor the `pmf` of the", "RandomVariable._check_property_value( \"variance\", var, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if", "p : array-like Value of the probability density / mass", "TypeError( f\"The function `{name}` specified via the constructor of \"", "new RandomState instance seeded with seed. \"\"\" self._random_state = _utils.as_random_state(seed)", "f\"{value} of type {type(value)}.\" ) from err elif not force_scalar:", "def ndim(self) -> int: return len(self.__shape) @cached_property def size(self) ->", "random variable does not have the correct \" f\"dtype. Expected", "Any]: \"\"\" Parameters of the probability distribution. The parameters of", "to be constant over their whole lifecycle. This is due", "__rpow__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic", "None: raise NotImplementedError mode = self.__mode() RandomVariable._check_property_value( \"mode\", mode, shape=self.__shape,", "not None: pmf = np.exp(self.__logpmf(x)) assert isinstance(pmf, np.float_) return pmf", "def __sub__(self, other: Any) -> \"RandomVariable\": # pylint: disable=import-outside-toplevel,cyclic-import from", "dtype=np.float_) except TypeError as err: raise TypeError( f\"The function `{name}`", "\" f\"{quantile.shape}.\" ) if quantile.dtype != self.__dtype: raise ValueError( f\"The", "transform the argument of functions like ``in_support``, ``cdf`` and ``logcdf``,", "the main objects used by probabilistic numerical methods. Every probabilistic", "deviation of the distribution. To learn about the dtype of", "this random variable.\"\"\" return self.__dtype @property def median_dtype(self) -> np.dtype:", "np.float_]] = None, logpdf: Optional[Callable[[_ValueType], np.float_]] = None, cdf: Optional[Callable[[_ValueType],", "raise ValueError( f\"The quantile function should return values of the", "1]) + RV to call the arithmetic operations defined by", "RandomStateType, ShapeArgType, ShapeType, ) try: # functools.cached_property is only available", ": Data type of realizations of this random variable. If", "pdf: Optional[Callable[[_ValueType], np.float_]] = None, logpdf: Optional[Callable[[_ValueType], np.float_]] = None,", "pdfs with respect to the Lebesgue measure unless stated otherwise.", "_ValueType: \"\"\" Median of the random variable. To learn about", "is not equal to the :attr:`median` as it is defined", "function `in_support` must return a `bool`, but its return value", "Value of the log-probability density / mass function at the", "str, value: Any, force_scalar: bool = False ) -> Union[np.float_,", "Returns ------- median : float The median of the distribution." ]
[ "self.assertEqual('GET', request.method) self.assertEqual(None, request.body) parameters = request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'],", "expected_region) self.assertEqual(parameters['address'], expected_address) def testReleaseMultipleAddresses(self): expected_project = 'test_project' expected_addresses =", "expected_project = 'test_project' expected_region = 'test-region' expected_address = 'test_address' address", "2.0 (the \"License\"); # you may not use this file", "expected_project = 'test_project' submitted_region = 'test-region' set_flags = { 'project':", "json import unittest import gflags as flags from gcutil_lib import", "'project': expected_project, 'region': 'region-a', } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress',", "def testGetAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address' submitted_region =", "'test-region' set_flags = { 'project': expected_project, 'region': submitted_region, } command", "= 'test_address' expected_description = 'test address' expected_region = 'test-region' expected_source_address", "= self.mock.Respond('compute.addresses.delete', {}) command.Handle(address) request = call.GetRequest() self.assertEqual('DELETE', request.method) self.assertEqual(None,", "('projects/%s/regions/%s/addresses/%s' % (expected_project, expected_region, expected_address)) set_flags = { 'project': 'incorrect_project',", "# limitations under the License. \"\"\"Unit tests for address collection", "= 'test_address' submitted_region = 'test-region' set_flags = { 'project': expected_project,", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "command.api.version), data) def testGetAddressPrintEmptyUsers(self): expected_project = 'test_project' submitted_region = 'test-region'", "'releaseaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.delete', {}) command.Handle(expected_address) request = call.GetRequest()", "self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'], expected_address) def testReleaseAddressWithoutRegionFlag(self): expected_project =", "testGetAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address' submitted_region = 'test-region'", "self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(expected_region, request.parameters['region']) body = json.loads(request.body) self.assertEqual(body['name'],", "= 'test_address' expected_description = 'test address' submitted_region = 'test-region' expected_source_address", "address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address) request =", "path_initializer.InitSysPath() import json import unittest import gflags as flags from", "{}) command.Handle(address) request = call.GetRequest() self.assertEqual('DELETE', request.method) self.assertEqual(None, request.body) parameters", "gcutil_lib import address_cmds from gcutil_lib import gcutil_unittest from gcutil_lib import", "use this file except in compliance with the License. #", "address = ('projects/%s/regions/%s/addresses/%s' % (expected_project, expected_region, expected_address)) set_flags = {", "testReserveAddressPromptsForRegion(self): expected_project = 'test_project' expected_address = 'test_address' expected_description = 'test", "request = call.GetRequest() self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(submitted_region, request.parameters['region']) body", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "FLAGS = flags.FLAGS class AddressCmdsTest(gcutil_unittest.GcutilTestCase): def setUp(self): self.mock, self.api =", "gflags as flags from gcutil_lib import address_cmds from gcutil_lib import", "License. # You may obtain a copy of the License", "expected_project, 'region': 'region-a', } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags)", "tests for address collection commands.\"\"\" import path_initializer path_initializer.InitSysPath() import json", "= 'test-region' expected_address = 'test_address' address = ('projects/%s/regions/%s/addresses/%s' % (expected_project,", "= 'test-region' set_flags = { 'project': expected_project, 'region': submitted_region, }", "under the License is distributed on an \"AS IS\" BASIS,", "{}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('GET', request.method) self.assertEqual(None, request.body) parameters", "License for the specific language governing permissions and # limitations", "self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(submitted_region, request.parameters['region']) body = json.loads(request.body) self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'],", "= { 'v1': [ ('users', []) ], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion(", "# Copyright 2012 Google Inc. All Rights Reserved. # #", "request.method) self.assertEqual(None, request.body) parameters = request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], submitted_region)", "Reserved. # # Licensed under the Apache License, Version 2.0", "address_cmds.GetAddress, 'getaddress', set_flags=set_flags) data = command.GetDetailRow({'users': []}) expected_data = {", "from gcutil_lib import mock_lists FLAGS = flags.FLAGS class AddressCmdsTest(gcutil_unittest.GcutilTestCase): def", "request = call.GetRequest() self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(expected_region, request.parameters['region']) body", "= request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], expected_region) self.assertEqual(parameters['address'], expected_address) def testReleaseMultipleAddresses(self):", "request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'], expected_address) def testGetAddressPrintNonEmptyUsers(self): expected_project", "self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'], expected_address) def testGetAddressPrintNonEmptyUsers(self): expected_project =", "address' expected_region = 'test-region' expected_source_address = '123.123.123.1' set_flags = {", "set_flags=set_flags) call = self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('POST',", "= 'test_address' address = ('projects/%s/regions/%s/addresses/%s' % (expected_project, expected_region, expected_address)) set_flags", "'test-addresses-%02d' % x for x in xrange(100)] set_flags = {", "call.GetRequest() self.assertEqual('GET', request.method) self.assertEqual(None, request.body) parameters = request.parameters self.assertEqual(parameters['project'], expected_project)", "command = self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) data = command.GetDetailRow({'users': []})", "'test address' submitted_region = 'test-region' expected_source_address = '192.168.127.12' set_flags =", "self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(submitted_region, request.parameters['region']) body = json.loads(request.body) self.assertEqual(body['name'],", "{ 'project': expected_project, 'region': submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress,", "{}) for x in xrange(len(expected_addresses))] _, exceptions = command.Handle(*expected_addresses) self.assertEqual(0,", "AddressCmdsTest(gcutil_unittest.GcutilTestCase): def setUp(self): self.mock, self.api = mock_api.CreateApi(self.version) def testReserveAddressPromptsForRegion(self): expected_project", "in compliance with the License. # You may obtain a", "[]) ], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data) def testReleaseAddressGeneratesCorrectRequest(self):", "expected_source_address, } command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) call =", "{ 'v1': [ ('users', []) ], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data,", "software # distributed under the License is distributed on an", "set_flags = { 'project': expected_project, 'region': submitted_region, } command =", "_, exceptions = command.Handle(*expected_addresses) self.assertEqual(0, len(exceptions)) sorted_calls = sorted([call.GetRequest().parameters['address'] for", "import gflags as flags from gcutil_lib import address_cmds from gcutil_lib", "expected_project, 'description': expected_description, 'region': submitted_region, 'source_address': expected_source_address, } command =", "self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'], expected_address) def testReleaseAddressWithoutRegionFlag(self): expected_project = 'test_project' expected_region", "request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], expected_region) self.assertEqual(parameters['address'], expected_address) def testReleaseMultipleAddresses(self): expected_project", "'192.168.127.12' set_flags = { 'project': expected_project, 'description': expected_description, 'region': submitted_region,", "gcutil_lib import mock_api from gcutil_lib import mock_lists FLAGS = flags.FLAGS", "= 'test_project' expected_region = 'test-region' expected_address = 'test_address' address =", "call.GetRequest() self.assertEqual('DELETE', request.method) self.assertEqual(None, request.body) parameters = request.parameters self.assertEqual(parameters['project'], expected_project)", "command = self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.get', {})", "testReleaseAddressWithoutRegionFlag(self): expected_project = 'test_project' expected_region = 'test-region' expected_address = 'test_address'", "[self.mock.Respond('compute.addresses.delete', {}) for x in xrange(len(expected_addresses))] _, exceptions = command.Handle(*expected_addresses)", "set_flags=set_flags) call = self.mock.Respond('compute.addresses.delete', {}) command.Handle(address) request = call.GetRequest() self.assertEqual('DELETE',", "{}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('DELETE', request.method) self.assertEqual(None, request.body) parameters", "'fr-2']}) expected_data = { 'v1': [ ('users', ['fr-1', 'fr-2']) ],", "x in xrange(100)] set_flags = { 'project': expected_project, 'region': 'region-a',", "% (expected_project, expected_region, expected_address)) set_flags = { 'project': 'incorrect_project', }", "x in xrange(len(expected_addresses))] _, exceptions = command.Handle(*expected_addresses) self.assertEqual(0, len(exceptions)) sorted_calls", "'region-a', } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) calls =", "name=[expected_region]) call = self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('POST',", "= command.GetDetailRow({'users': []}) expected_data = { 'v1': [ ('users', [])", "set_flags=set_flags) data = command.GetDetailRow({'users': ['fr-1', 'fr-2']}) expected_data = { 'v1':", "self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], expected_region) self.assertEqual(parameters['address'], expected_address) def testReleaseMultipleAddresses(self): expected_project =", "'releaseaddress', set_flags=set_flags) calls = [self.mock.Respond('compute.addresses.delete', {}) for x in xrange(len(expected_addresses))]", "expected_project = 'test_project' expected_addresses = [ 'test-addresses-%02d' % x for", "for x in xrange(len(expected_addresses))] _, exceptions = command.Handle(*expected_addresses) self.assertEqual(0, len(exceptions))", "address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) mock_lists.GetSampleRegionListCall( command, self.mock, num_responses=1, name=[expected_region]) call =", "json.loads(request.body) self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'], expected_source_address) def testReserveAddressGeneratesCorrectRequest(self): expected_project", "'v1': [ ('users', []) ], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version),", "request.parameters['project']) self.assertEquals(submitted_region, request.parameters['region']) body = json.loads(request.body) self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'], expected_description)", "expected_project) self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'], expected_address) def testGetAddressPrintNonEmptyUsers(self): expected_project = 'test_project'", "self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'], expected_source_address) def testGetAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "[]}) expected_data = { 'v1': [ ('users', []) ], }", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "'getaddress', set_flags=set_flags) data = command.GetDetailRow({'users': []}) expected_data = { 'v1':", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "('users', ['fr-1', 'fr-2']) ], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data)", "to in writing, software # distributed under the License is", "def testReleaseAddressWithoutRegionFlag(self): expected_project = 'test_project' expected_region = 'test-region' expected_address =", "(expected_project, expected_region, expected_address)) set_flags = { 'project': 'incorrect_project', } command", "command.Handle(address) request = call.GetRequest() self.assertEqual('DELETE', request.method) self.assertEqual(None, request.body) parameters =", "# See the License for the specific language governing permissions", "testReleaseMultipleAddresses(self): expected_project = 'test_project' expected_addresses = [ 'test-addresses-%02d' % x", "gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data) def testReleaseAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address", "or agreed to in writing, software # distributed under the", "= command.GetDetailRow({'users': ['fr-1', 'fr-2']}) expected_data = { 'v1': [ ('users',", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "'project': expected_project, 'description': expected_description, 'source_address': expected_source_address, } command = self._CreateAndInitializeCommand(", "with the License. # You may obtain a copy of", "set_flags = { 'project': expected_project, 'description': expected_description, 'region': submitted_region, 'source_address':", "= { 'project': 'incorrect_project', } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress',", "command.GetDetailRow({'users': ['fr-1', 'fr-2']}) expected_data = { 'v1': [ ('users', ['fr-1',", "'project': expected_project, 'region': submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress',", "= request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'], expected_address) def testReleaseAddressWithoutRegionFlag(self):", "self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data) def testReleaseAddressGeneratesCorrectRequest(self): expected_project = 'test_project'", "import address_cmds from gcutil_lib import gcutil_unittest from gcutil_lib import mock_api", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "expected_project, 'region': submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags)", "distributed under the License is distributed on an \"AS IS\"", "Inc. All Rights Reserved. # # Licensed under the Apache", "[ ('users', ['fr-1', 'fr-2']) ], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version),", "self.assertEqual(None, request.body) parameters = request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], expected_region) self.assertEqual(parameters['address'],", "for call in calls]) self.assertEqual(expected_addresses, sorted_calls) if __name__ == '__main__':", "expected_description = 'test address' submitted_region = 'test-region' expected_source_address = '192.168.127.12'", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "= { 'project': expected_project, 'description': expected_description, 'region': submitted_region, 'source_address': expected_source_address,", "'reserveaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address) request = call.GetRequest()", "not use this file except in compliance with the License.", "self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.get', {}) command.Handle(expected_address) request", "call = self.mock.Respond('compute.addresses.delete', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('DELETE', request.method)", "'test_address' expected_description = 'test address' submitted_region = 'test-region' expected_source_address =", "expected_data, command.api.version), data) def testGetAddressPrintEmptyUsers(self): expected_project = 'test_project' submitted_region =", "writing, software # distributed under the License is distributed on", "for address collection commands.\"\"\" import path_initializer path_initializer.InitSysPath() import json import", "= { 'project': expected_project, 'description': expected_description, 'source_address': expected_source_address, } command", "you may not use this file except in compliance with", "self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'], expected_source_address) def testReserveAddressGeneratesCorrectRequest(self): expected_project =", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "import path_initializer path_initializer.InitSysPath() import json import unittest import gflags as", "{ 'project': expected_project, 'description': expected_description, 'region': submitted_region, 'source_address': expected_source_address, }", "'test_project' submitted_region = 'test-region' set_flags = { 'project': expected_project, 'region':", "= call.GetRequest() self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(submitted_region, request.parameters['region']) body =", "expected_address) def testReleaseAddressWithoutRegionFlag(self): expected_project = 'test_project' expected_region = 'test-region' expected_address", "CONDITIONS OF ANY KIND, either express or implied. # See", "expected_source_address, } command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) mock_lists.GetSampleRegionListCall( command,", "= '123.123.123.1' set_flags = { 'project': expected_project, 'description': expected_description, 'source_address':", "import mock_api from gcutil_lib import mock_lists FLAGS = flags.FLAGS class", "def testReserveAddressPromptsForRegion(self): expected_project = 'test_project' expected_address = 'test_address' expected_description =", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.delete', {}) command.Handle(address) request", "'description': expected_description, 'region': submitted_region, 'source_address': expected_source_address, } command = self._CreateAndInitializeCommand(", "self.assertEqual(parameters['address'], expected_address) def testReleaseMultipleAddresses(self): expected_project = 'test_project' expected_addresses = [", "expected_project, 'description': expected_description, 'source_address': expected_source_address, } command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress,", "submitted_region = 'test-region' expected_source_address = '192.168.127.12' set_flags = { 'project':", "'test-region' expected_source_address = '123.123.123.1' set_flags = { 'project': expected_project, 'description':", "= { 'project': expected_project, 'region': submitted_region, } command = self._CreateAndInitializeCommand(", "json.loads(request.body) self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'], expected_source_address) def testGetAddressGeneratesCorrectRequest(self): expected_project", "calls = [self.mock.Respond('compute.addresses.delete', {}) for x in xrange(len(expected_addresses))] _, exceptions", "self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) data = command.GetDetailRow({'users': ['fr-1', 'fr-2']}) expected_data", "def testReleaseMultipleAddresses(self): expected_project = 'test_project' expected_addresses = [ 'test-addresses-%02d' %", "call.GetRequest() self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(expected_region, request.parameters['region']) body = json.loads(request.body)", "= command.Handle(*expected_addresses) self.assertEqual(0, len(exceptions)) sorted_calls = sorted([call.GetRequest().parameters['address'] for call in", "} self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data) def testReleaseAddressGeneratesCorrectRequest(self): expected_project =", "self.api = mock_api.CreateApi(self.version) def testReserveAddressPromptsForRegion(self): expected_project = 'test_project' expected_address =", "OR CONDITIONS OF ANY KIND, either express or implied. #", "num_responses=1, name=[expected_region]) call = self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address) request = call.GetRequest()", "command.Handle(expected_address) request = call.GetRequest() self.assertEqual('GET', request.method) self.assertEqual(None, request.body) parameters =", "{ 'project': 'incorrect_project', } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags)", "self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.delete', {}) command.Handle(expected_address) request", "the License is distributed on an \"AS IS\" BASIS, #", "address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) calls = [self.mock.Respond('compute.addresses.delete', {}) for x in", "data) def testGetAddressPrintEmptyUsers(self): expected_project = 'test_project' submitted_region = 'test-region' set_flags", "request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'], expected_address) def testReleaseAddressWithoutRegionFlag(self): expected_project", "import unittest import gflags as flags from gcutil_lib import address_cmds", "'test-region' expected_address = 'test_address' address = ('projects/%s/regions/%s/addresses/%s' % (expected_project, expected_region,", "{ 'project': expected_project, 'region': 'region-a', } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress,", "set_flags=set_flags) call = self.mock.Respond('compute.addresses.delete', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('DELETE',", "language governing permissions and # limitations under the License. \"\"\"Unit", "= 'test_project' expected_addresses = [ 'test-addresses-%02d' % x for x", "address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.delete', {}) command.Handle(expected_address) request =", "def setUp(self): self.mock, self.api = mock_api.CreateApi(self.version) def testReserveAddressPromptsForRegion(self): expected_project =", "'getaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.get', {}) command.Handle(expected_address) request = call.GetRequest()", "as flags from gcutil_lib import address_cmds from gcutil_lib import gcutil_unittest", "{ 'project': expected_project, 'region': submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.GetAddress,", "permissions and # limitations under the License. \"\"\"Unit tests for", "[ 'test-addresses-%02d' % x for x in xrange(100)] set_flags =", "} command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) mock_lists.GetSampleRegionListCall( command, self.mock,", "law or agreed to in writing, software # distributed under", "= sorted([call.GetRequest().parameters['address'] for call in calls]) self.assertEqual(expected_addresses, sorted_calls) if __name__", "xrange(100)] set_flags = { 'project': expected_project, 'region': 'region-a', } command", "expected_address = 'test_address' submitted_region = 'test-region' set_flags = { 'project':", "expected_address = 'test_address' expected_description = 'test address' submitted_region = 'test-region'", "'test_address' expected_description = 'test address' expected_region = 'test-region' expected_source_address =", "testGetAddressPrintNonEmptyUsers(self): expected_project = 'test_project' submitted_region = 'test-region' set_flags = {", "'incorrect_project', } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) call =", "= self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address)", "sorted([call.GetRequest().parameters['address'] for call in calls]) self.assertEqual(expected_addresses, sorted_calls) if __name__ ==", "= 'test_project' expected_address = 'test_address' submitted_region = 'test-region' set_flags =", "command.api.version), data) def testReleaseAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address'", "command.GetDetailRow({'users': []}) expected_data = { 'v1': [ ('users', []) ],", "set_flags=set_flags) calls = [self.mock.Respond('compute.addresses.delete', {}) for x in xrange(len(expected_addresses))] _,", "= 'test address' submitted_region = 'test-region' expected_source_address = '192.168.127.12' set_flags", "= { 'v1': [ ('users', ['fr-1', 'fr-2']) ], } self.assertEquals(", "submitted_region) self.assertEqual(parameters['address'], expected_address) def testReleaseAddressWithoutRegionFlag(self): expected_project = 'test_project' expected_region =", "address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.delete', {}) command.Handle(address) request =", "('users', []) ], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data) def", "'project': expected_project, 'description': expected_description, 'region': submitted_region, 'source_address': expected_source_address, } command", "may obtain a copy of the License at # #", "set_flags = { 'project': expected_project, 'description': expected_description, 'source_address': expected_source_address, }", "} command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.insert',", "expected_description = 'test address' expected_region = 'test-region' expected_source_address = '123.123.123.1'", "under the License. \"\"\"Unit tests for address collection commands.\"\"\" import", "request.method) self.assertEqual(None, request.body) parameters = request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], expected_region)", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "'test-region' expected_source_address = '192.168.127.12' set_flags = { 'project': expected_project, 'description':", "self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address) request", "expected_region = 'test-region' expected_address = 'test_address' address = ('projects/%s/regions/%s/addresses/%s' %", "command = self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) data = command.GetDetailRow({'users': ['fr-1',", "self.assertEquals(body['address'], expected_source_address) def testReserveAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address'", "may not use this file except in compliance with the", "set_flags=set_flags) mock_lists.GetSampleRegionListCall( command, self.mock, num_responses=1, name=[expected_region]) call = self.mock.Respond('compute.addresses.insert', {})", "'source_address': expected_source_address, } command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) mock_lists.GetSampleRegionListCall(", "data = command.GetDetailRow({'users': ['fr-1', 'fr-2']}) expected_data = { 'v1': [", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "this file except in compliance with the License. # You", "set_flags=set_flags) data = command.GetDetailRow({'users': []}) expected_data = { 'v1': [", "= self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.get', {}) command.Handle(expected_address)", "= '192.168.127.12' set_flags = { 'project': expected_project, 'description': expected_description, 'region':", "['fr-1', 'fr-2']) ], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data) def", "} command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) calls = [self.mock.Respond('compute.addresses.delete',", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "request = call.GetRequest() self.assertEqual('GET', request.method) self.assertEqual(None, request.body) parameters = request.parameters", "call = self.mock.Respond('compute.addresses.get', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('GET', request.method)", "'test_project' expected_address = 'test_address' submitted_region = 'test-region' set_flags = {", "# # Licensed under the Apache License, Version 2.0 (the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "= call.GetRequest() self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(expected_region, request.parameters['region']) body =", "expected_project, 'region': submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags)", "mock_api from gcutil_lib import mock_lists FLAGS = flags.FLAGS class AddressCmdsTest(gcutil_unittest.GcutilTestCase):", "= call.GetRequest() self.assertEqual('DELETE', request.method) self.assertEqual(None, request.body) parameters = request.parameters self.assertEqual(parameters['project'],", "= self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.delete', {}) command.Handle(expected_address)", "self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) calls = [self.mock.Respond('compute.addresses.delete', {}) for x", "gcutil_lib import gcutil_unittest from gcutil_lib import mock_api from gcutil_lib import", "'project': 'incorrect_project', } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) call", "len(exceptions)) sorted_calls = sorted([call.GetRequest().parameters['address'] for call in calls]) self.assertEqual(expected_addresses, sorted_calls)", "call in calls]) self.assertEqual(expected_addresses, sorted_calls) if __name__ == '__main__': unittest.main(testLoader=gcutil_unittest.GcutilLoader())", "{ 'project': expected_project, 'description': expected_description, 'source_address': expected_source_address, } command =", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "def testReserveAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address' expected_description =", "= self.mock.Respond('compute.addresses.get', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('GET', request.method) self.assertEqual(None,", "set_flags = { 'project': 'incorrect_project', } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress,", "in xrange(100)] set_flags = { 'project': expected_project, 'region': 'region-a', }", "import mock_lists FLAGS = flags.FLAGS class AddressCmdsTest(gcutil_unittest.GcutilTestCase): def setUp(self): self.mock,", "License. \"\"\"Unit tests for address collection commands.\"\"\" import path_initializer path_initializer.InitSysPath()", "expected_project) self.assertEqual(parameters['region'], expected_region) self.assertEqual(parameters['address'], expected_address) def testReleaseMultipleAddresses(self): expected_project = 'test_project'", "command, self.mock, num_responses=1, name=[expected_region]) call = self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address) request", "expected_address = 'test_address' expected_description = 'test address' expected_region = 'test-region'", "xrange(len(expected_addresses))] _, exceptions = command.Handle(*expected_addresses) self.assertEqual(0, len(exceptions)) sorted_calls = sorted([call.GetRequest().parameters['address']", "request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(submitted_region, request.parameters['region']) body = json.loads(request.body) self.assertEqual(body['name'], expected_address)", "[ ('users', []) ], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data)", "'releaseaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.delete', {}) command.Handle(address) request = call.GetRequest()", "Google Inc. All Rights Reserved. # # Licensed under the", "= self.mock.Respond('compute.addresses.delete', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('DELETE', request.method) self.assertEqual(None,", "self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data) def testGetAddressPrintEmptyUsers(self): expected_project = 'test_project'", "expected_address)) set_flags = { 'project': 'incorrect_project', } command = self._CreateAndInitializeCommand(", "def testReleaseAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address' submitted_region =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "expected_source_address = '192.168.127.12' set_flags = { 'project': expected_project, 'description': expected_description,", "expected_project = 'test_project' expected_address = 'test_address' submitted_region = 'test-region' set_flags", "= ('projects/%s/regions/%s/addresses/%s' % (expected_project, expected_region, expected_address)) set_flags = { 'project':", "= request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'], expected_address) def testGetAddressPrintNonEmptyUsers(self):", "or implied. # See the License for the specific language", "request = call.GetRequest() self.assertEqual('DELETE', request.method) self.assertEqual(None, request.body) parameters = request.parameters", "Rights Reserved. # # Licensed under the Apache License, Version", "command.Handle(expected_address) request = call.GetRequest() self.assertEqual('DELETE', request.method) self.assertEqual(None, request.body) parameters =", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) mock_lists.GetSampleRegionListCall( command, self.mock, num_responses=1,", "set_flags=set_flags) call = self.mock.Respond('compute.addresses.get', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('GET',", "'test_project' expected_addresses = [ 'test-addresses-%02d' % x for x in", "for x in xrange(100)] set_flags = { 'project': expected_project, 'region':", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "'source_address': expected_source_address, } command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) call", "submitted_region = 'test-region' set_flags = { 'project': expected_project, 'region': submitted_region,", "'test_project' expected_address = 'test_address' expected_description = 'test address' expected_region =", "expected_project) self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'], expected_address) def testReleaseAddressWithoutRegionFlag(self): expected_project = 'test_project'", "expected_addresses = [ 'test-addresses-%02d' % x for x in xrange(100)]", "gcutil_lib import mock_lists FLAGS = flags.FLAGS class AddressCmdsTest(gcutil_unittest.GcutilTestCase): def setUp(self):", "self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) data = command.GetDetailRow({'users': []}) expected_data =", "self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) mock_lists.GetSampleRegionListCall( command, self.mock, num_responses=1, name=[expected_region]) call", "x for x in xrange(100)] set_flags = { 'project': expected_project,", "expected_address = 'test_address' address = ('projects/%s/regions/%s/addresses/%s' % (expected_project, expected_region, expected_address))", "mock_lists FLAGS = flags.FLAGS class AddressCmdsTest(gcutil_unittest.GcutilTestCase): def setUp(self): self.mock, self.api", "(the \"License\"); # you may not use this file except", "'region': submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) call", "# you may not use this file except in compliance", "self.assertEqual(0, len(exceptions)) sorted_calls = sorted([call.GetRequest().parameters['address'] for call in calls]) self.assertEqual(expected_addresses,", "'test_address' submitted_region = 'test-region' set_flags = { 'project': expected_project, 'region':", "expected_description) self.assertEquals(body['address'], expected_source_address) def testReserveAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address =", "flags from gcutil_lib import address_cmds from gcutil_lib import gcutil_unittest from", "commands.\"\"\" import path_initializer path_initializer.InitSysPath() import json import unittest import gflags", "} command = self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.get',", "# # Unless required by applicable law or agreed to", "self.assertEqual(parameters['address'], expected_address) def testGetAddressPrintNonEmptyUsers(self): expected_project = 'test_project' submitted_region = 'test-region'", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "expected_data, command.api.version), data) def testReleaseAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address =", "Version 2.0 (the \"License\"); # you may not use this", "self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'], expected_source_address) def testGetAddressGeneratesCorrectRequest(self): expected_project =", "self.assertEquals(expected_region, request.parameters['region']) body = json.loads(request.body) self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'],", "data) def testReleaseAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address' submitted_region", "'test_address' address = ('projects/%s/regions/%s/addresses/%s' % (expected_project, expected_region, expected_address)) set_flags =", "implied. # See the License for the specific language governing", "expected_address) def testReleaseMultipleAddresses(self): expected_project = 'test_project' expected_addresses = [ 'test-addresses-%02d'", "under the Apache License, Version 2.0 (the \"License\"); # you", "command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.delete', {})", "request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(expected_region, request.parameters['region']) body = json.loads(request.body) self.assertEqual(body['name'], expected_address)", "self.mock.Respond('compute.addresses.delete', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('DELETE', request.method) self.assertEqual(None, request.body)", "address collection commands.\"\"\" import path_initializer path_initializer.InitSysPath() import json import unittest", "by applicable law or agreed to in writing, software #", "= self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('POST', request.method) self.assertEqual(expected_project,", "expected_description, 'region': submitted_region, 'source_address': expected_source_address, } command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress,", "= mock_api.CreateApi(self.version) def testReserveAddressPromptsForRegion(self): expected_project = 'test_project' expected_address = 'test_address'", "self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'], expected_address) def testGetAddressPrintNonEmptyUsers(self): expected_project = 'test_project' submitted_region", "body = json.loads(request.body) self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'], expected_source_address) def", "= 'test_project' submitted_region = 'test-region' set_flags = { 'project': expected_project,", "submitted_region, 'source_address': expected_source_address, } command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags)", "expected_address) self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'], expected_source_address) def testReserveAddressGeneratesCorrectRequest(self): expected_project = 'test_project'", "address_cmds from gcutil_lib import gcutil_unittest from gcutil_lib import mock_api from", "'description': expected_description, 'source_address': expected_source_address, } command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress',", "= json.loads(request.body) self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'], expected_source_address) def testGetAddressGeneratesCorrectRequest(self):", "'test_project' expected_address = 'test_address' expected_description = 'test address' submitted_region =", "address' submitted_region = 'test-region' expected_source_address = '192.168.127.12' set_flags = {", "request.parameters['project']) self.assertEquals(expected_region, request.parameters['region']) body = json.loads(request.body) self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'], expected_description)", "self.mock.Respond('compute.addresses.delete', {}) command.Handle(address) request = call.GetRequest() self.assertEqual('DELETE', request.method) self.assertEqual(None, request.body)", "'region': submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) call", "= call.GetRequest() self.assertEqual('GET', request.method) self.assertEqual(None, request.body) parameters = request.parameters self.assertEqual(parameters['project'],", "= [self.mock.Respond('compute.addresses.delete', {}) for x in xrange(len(expected_addresses))] _, exceptions =", "], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data) def testGetAddressPrintEmptyUsers(self): expected_project", "submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) data =", "import gcutil_unittest from gcutil_lib import mock_api from gcutil_lib import mock_lists", "parameters = request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'], expected_address) def", "'project': expected_project, 'region': submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress',", "in xrange(len(expected_addresses))] _, exceptions = command.Handle(*expected_addresses) self.assertEqual(0, len(exceptions)) sorted_calls =", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "data = command.GetDetailRow({'users': []}) expected_data = { 'v1': [ ('users',", "self.assertEqual(parameters['region'], expected_region) self.assertEqual(parameters['address'], expected_address) def testReleaseMultipleAddresses(self): expected_project = 'test_project' expected_addresses", "% x for x in xrange(100)] set_flags = { 'project':", "Unless required by applicable law or agreed to in writing,", "expected_source_address) def testGetAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address' submitted_region", "command.Handle(expected_address) request = call.GetRequest() self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(submitted_region, request.parameters['region'])", "self.assertEqual('DELETE', request.method) self.assertEqual(None, request.body) parameters = request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'],", "from gcutil_lib import gcutil_unittest from gcutil_lib import mock_api from gcutil_lib", "= { 'project': expected_project, 'region': 'region-a', } command = self._CreateAndInitializeCommand(", "command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) calls = [self.mock.Respond('compute.addresses.delete', {})", "command.Handle(expected_address) request = call.GetRequest() self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(expected_region, request.parameters['region'])", "the specific language governing permissions and # limitations under the", "testReleaseAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address' submitted_region = 'test-region'", "from gcutil_lib import mock_api from gcutil_lib import mock_lists FLAGS =", "expected_source_address = '123.123.123.1' set_flags = { 'project': expected_project, 'description': expected_description,", "request.body) parameters = request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], expected_region) self.assertEqual(parameters['address'], expected_address)", "'getaddress', set_flags=set_flags) data = command.GetDetailRow({'users': ['fr-1', 'fr-2']}) expected_data = {", "applicable law or agreed to in writing, software # distributed", "expected_project = 'test_project' expected_address = 'test_address' expected_description = 'test address'", "Copyright 2012 Google Inc. All Rights Reserved. # # Licensed", "self.assertEquals(body['address'], expected_source_address) def testGetAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address'", "= json.loads(request.body) self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'], expected_source_address) def testReserveAddressGeneratesCorrectRequest(self):", "in writing, software # distributed under the License is distributed", "testGetAddressPrintEmptyUsers(self): expected_project = 'test_project' submitted_region = 'test-region' set_flags = {", "self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'], expected_source_address) def testReserveAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address", "call = self.mock.Respond('compute.addresses.delete', {}) command.Handle(address) request = call.GetRequest() self.assertEqual('DELETE', request.method)", "path_initializer path_initializer.InitSysPath() import json import unittest import gflags as flags", "'123.123.123.1' set_flags = { 'project': expected_project, 'description': expected_description, 'source_address': expected_source_address,", "= self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) calls = [self.mock.Respond('compute.addresses.delete', {}) for", "testReserveAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address' expected_description = 'test", "expected_region, expected_address)) set_flags = { 'project': 'incorrect_project', } command =", "the License. \"\"\"Unit tests for address collection commands.\"\"\" import path_initializer", "address_cmds.GetAddress, 'getaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.get', {}) command.Handle(expected_address) request =", "expected_data = { 'v1': [ ('users', ['fr-1', 'fr-2']) ], }", "'test_project' expected_region = 'test-region' expected_address = 'test_address' address = ('projects/%s/regions/%s/addresses/%s'", "\"\"\"Unit tests for address collection commands.\"\"\" import path_initializer path_initializer.InitSysPath() import", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "} command = self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) data = command.GetDetailRow({'users':", "= 'test_project' expected_address = 'test_address' expected_description = 'test address' expected_region", "request.body) parameters = request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'], expected_address)", "# You may obtain a copy of the License at", "collection commands.\"\"\" import path_initializer path_initializer.InitSysPath() import json import unittest import", "= self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) data = command.GetDetailRow({'users': ['fr-1', 'fr-2']})", "self.mock, self.api = mock_api.CreateApi(self.version) def testReserveAddressPromptsForRegion(self): expected_project = 'test_project' expected_address", "self.mock, num_responses=1, name=[expected_region]) call = self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address) request =", "{}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(expected_region,", "'fr-2']) ], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data) def testGetAddressPrintEmptyUsers(self):", "} self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data) def testGetAddressPrintEmptyUsers(self): expected_project =", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "call.GetRequest() self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(submitted_region, request.parameters['region']) body = json.loads(request.body)", "expected_description) self.assertEquals(body['address'], expected_source_address) def testGetAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address =", "expected_description, 'source_address': expected_source_address, } command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags)", "mock_lists.GetSampleRegionListCall( command, self.mock, num_responses=1, name=[expected_region]) call = self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address)", "exceptions = command.Handle(*expected_addresses) self.assertEqual(0, len(exceptions)) sorted_calls = sorted([call.GetRequest().parameters['address'] for call", "= 'test address' expected_region = 'test-region' expected_source_address = '123.123.123.1' set_flags", "= self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) mock_lists.GetSampleRegionListCall( command, self.mock, num_responses=1, name=[expected_region])", "self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project'])", "the License for the specific language governing permissions and #", "Apache License, Version 2.0 (the \"License\"); # you may not", "expected_address) def testGetAddressPrintNonEmptyUsers(self): expected_project = 'test_project' submitted_region = 'test-region' set_flags", "either express or implied. # See the License for the", "address_cmds.GetAddress, 'getaddress', set_flags=set_flags) data = command.GetDetailRow({'users': ['fr-1', 'fr-2']}) expected_data =", "], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data) def testReleaseAddressGeneratesCorrectRequest(self): expected_project", "self.mock.Respond('compute.addresses.get', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('GET', request.method) self.assertEqual(None, request.body)", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "= flags.FLAGS class AddressCmdsTest(gcutil_unittest.GcutilTestCase): def setUp(self): self.mock, self.api = mock_api.CreateApi(self.version)", "submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) call =", "def testGetAddressPrintEmptyUsers(self): expected_project = 'test_project' submitted_region = 'test-region' set_flags =", "= [ 'test-addresses-%02d' % x for x in xrange(100)] set_flags", "governing permissions and # limitations under the License. \"\"\"Unit tests", "gcutil_unittest from gcutil_lib import mock_api from gcutil_lib import mock_lists FLAGS", "{ 'v1': [ ('users', ['fr-1', 'fr-2']) ], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion(", "self.assertEqual(parameters['address'], expected_address) def testReleaseAddressWithoutRegionFlag(self): expected_project = 'test_project' expected_region = 'test-region'", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "command.Handle(*expected_addresses) self.assertEqual(0, len(exceptions)) sorted_calls = sorted([call.GetRequest().parameters['address'] for call in calls])", "gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data) def testGetAddressPrintEmptyUsers(self): expected_project = 'test_project' submitted_region", "and # limitations under the License. \"\"\"Unit tests for address", "expected_data = { 'v1': [ ('users', []) ], } self.assertEquals(", "'region': 'region-a', } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) calls", "= 'test-region' expected_source_address = '192.168.127.12' set_flags = { 'project': expected_project,", "sorted_calls = sorted([call.GetRequest().parameters['address'] for call in calls]) self.assertEqual(expected_addresses, sorted_calls) if", "= 'test_project' expected_address = 'test_address' expected_description = 'test address' submitted_region", "} command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.delete',", "from gcutil_lib import address_cmds from gcutil_lib import gcutil_unittest from gcutil_lib", "\"License\"); # you may not use this file except in", "expected_region = 'test-region' expected_source_address = '123.123.123.1' set_flags = { 'project':", "{}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(submitted_region,", "'test address' expected_region = 'test-region' expected_source_address = '123.123.123.1' set_flags =", "'reserveaddress', set_flags=set_flags) mock_lists.GetSampleRegionListCall( command, self.mock, num_responses=1, name=[expected_region]) call = self.mock.Respond('compute.addresses.insert',", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "'region': submitted_region, 'source_address': expected_source_address, } command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress',", "parameters = request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], expected_region) self.assertEqual(parameters['address'], expected_address) def", "expected_address) self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'], expected_source_address) def testGetAddressGeneratesCorrectRequest(self): expected_project = 'test_project'", "set_flags = { 'project': expected_project, 'region': 'region-a', } command =", "= self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) data = command.GetDetailRow({'users': []}) expected_data", "import json import unittest import gflags as flags from gcutil_lib", "= 'test-region' expected_source_address = '123.123.123.1' set_flags = { 'project': expected_project,", "# distributed under the License is distributed on an \"AS", "flags.FLAGS class AddressCmdsTest(gcutil_unittest.GcutilTestCase): def setUp(self): self.mock, self.api = mock_api.CreateApi(self.version) def", "# Unless required by applicable law or agreed to in", "'v1': [ ('users', ['fr-1', 'fr-2']) ], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data,", "expected_source_address) def testReserveAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address' expected_description", "unittest import gflags as flags from gcutil_lib import address_cmds from", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "submitted_region) self.assertEqual(parameters['address'], expected_address) def testGetAddressPrintNonEmptyUsers(self): expected_project = 'test_project' submitted_region =", "limitations under the License. \"\"\"Unit tests for address collection commands.\"\"\"", "call = self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('POST', request.method)", "self.assertEqual(None, request.body) parameters = request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'],", "= self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.delete', {}) command.Handle(address)", "command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.insert', {})", "def testGetAddressPrintNonEmptyUsers(self): expected_project = 'test_project' submitted_region = 'test-region' set_flags =", "class AddressCmdsTest(gcutil_unittest.GcutilTestCase): def setUp(self): self.mock, self.api = mock_api.CreateApi(self.version) def testReserveAddressPromptsForRegion(self):", "You may obtain a copy of the License at #", "request.parameters['region']) body = json.loads(request.body) self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'], expected_source_address)", "'region': submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) data", "submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) call =", "self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(expected_region, request.parameters['region']) body = json.loads(request.body) self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'],", "mock_api.CreateApi(self.version) def testReserveAddressPromptsForRegion(self): expected_project = 'test_project' expected_address = 'test_address' expected_description", "the Apache License, Version 2.0 (the \"License\"); # you may", "['fr-1', 'fr-2']}) expected_data = { 'v1': [ ('users', ['fr-1', 'fr-2'])", "2012 Google Inc. All Rights Reserved. # # Licensed under", "self.assertEquals(submitted_region, request.parameters['region']) body = json.loads(request.body) self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'],", "setUp(self): self.mock, self.api = mock_api.CreateApi(self.version) def testReserveAddressPromptsForRegion(self): expected_project = 'test_project'" ]
[ "), migrations.AlterField( model_name='competitor', name='min2', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True, size=9, verbose_name='minuto 2'),", "dependencies = [ ('vote', '0004_auto_20210131_1621'), ] operations = [ migrations.AlterField(", "migrations.AlterField( model_name='competitor', name='min1', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True, size=9, verbose_name='minuto 1'), ),", "model_name='competitor', name='min2', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True, size=9, verbose_name='minuto 2'), ), ]", "] operations = [ migrations.AlterField( model_name='competitor', name='min1', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True,", "Migration(migrations.Migration): dependencies = [ ('vote', '0004_auto_20210131_1621'), ] operations = [", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('vote',", "null=True, size=9, verbose_name='minuto 1'), ), migrations.AlterField( model_name='competitor', name='min2', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True,", "blank=True, null=True, size=9, verbose_name='minuto 1'), ), migrations.AlterField( model_name='competitor', name='min2', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(),", "import django.contrib.postgres.fields from django.db import migrations, models class Migration(migrations.Migration): dependencies", "[ ('vote', '0004_auto_20210131_1621'), ] operations = [ migrations.AlterField( model_name='competitor', name='min1',", "1'), ), migrations.AlterField( model_name='competitor', name='min2', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True, size=9, verbose_name='minuto", "= [ ('vote', '0004_auto_20210131_1621'), ] operations = [ migrations.AlterField( model_name='competitor',", "Django 3.1.5 on 2021-02-05 00:00 import django.contrib.postgres.fields from django.db import", "migrations, models class Migration(migrations.Migration): dependencies = [ ('vote', '0004_auto_20210131_1621'), ]", "models class Migration(migrations.Migration): dependencies = [ ('vote', '0004_auto_20210131_1621'), ] operations", "# Generated by Django 3.1.5 on 2021-02-05 00:00 import django.contrib.postgres.fields", "00:00 import django.contrib.postgres.fields from django.db import migrations, models class Migration(migrations.Migration):", "'0004_auto_20210131_1621'), ] operations = [ migrations.AlterField( model_name='competitor', name='min1', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True,", "3.1.5 on 2021-02-05 00:00 import django.contrib.postgres.fields from django.db import migrations,", "verbose_name='minuto 1'), ), migrations.AlterField( model_name='competitor', name='min2', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True, size=9,", "Generated by Django 3.1.5 on 2021-02-05 00:00 import django.contrib.postgres.fields from", "2021-02-05 00:00 import django.contrib.postgres.fields from django.db import migrations, models class", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "class Migration(migrations.Migration): dependencies = [ ('vote', '0004_auto_20210131_1621'), ] operations =", "operations = [ migrations.AlterField( model_name='competitor', name='min1', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True, size=9,", "name='min1', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True, size=9, verbose_name='minuto 1'), ), migrations.AlterField( model_name='competitor',", "migrations.AlterField( model_name='competitor', name='min2', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True, size=9, verbose_name='minuto 2'), ),", "by Django 3.1.5 on 2021-02-05 00:00 import django.contrib.postgres.fields from django.db", "on 2021-02-05 00:00 import django.contrib.postgres.fields from django.db import migrations, models", "size=9, verbose_name='minuto 1'), ), migrations.AlterField( model_name='competitor', name='min2', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True,", "model_name='competitor', name='min1', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True, size=9, verbose_name='minuto 1'), ), migrations.AlterField(", "('vote', '0004_auto_20210131_1621'), ] operations = [ migrations.AlterField( model_name='competitor', name='min1', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(),", "<filename>vote/migrations/0005_auto_20210204_1900.py<gh_stars>1-10 # Generated by Django 3.1.5 on 2021-02-05 00:00 import", "django.contrib.postgres.fields from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True, size=9, verbose_name='minuto 1'), ), migrations.AlterField( model_name='competitor', name='min2',", "= [ migrations.AlterField( model_name='competitor', name='min1', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True, size=9, verbose_name='minuto", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('vote', '0004_auto_20210131_1621'),", "[ migrations.AlterField( model_name='competitor', name='min1', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True, size=9, verbose_name='minuto 1')," ]
[ "sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from tools.shared import asstr logger = logging.getLogger('wasm-sourcemap') def", "'--output', help='output source map') parser.add_argument('-p', '--prefix', nargs='*', help='replace source debug", "DWARF information from %s' % wasm) if not os.path.exists(options.dwarfdump): logger.error('llvm-dwarfdump", "# # Address Line Column File ISA Discriminator Flags #", "column return OrderedDict([('version', 3), ('names', []), ('sources', sources), ('sourcesContent', sources_content),", "size field) fn_size_length = floor(log(entries[cur_entry]['address'] - fn_start + 1, 128))", "section') section_name = \"sourceMappingURL\" section_content = encode_uint_var(len(section_name)) + section_name +", "last_address = 0 last_source_id = 0 last_line = 1 last_column", "source text SourceMapPrefixes = namedtuple('SourceMapPrefixes', 'sources, load') def encode_vlq(n): VLQ_CHARS", "+ wasm[section_start:pos] return stripped def encode_uint_var(n): result = bytearray() while", "column - last_column mappings.append(encode_vlq(address_delta) + encode_vlq(source_id_delta) + encode_vlq(line_delta) + encode_vlq(column_delta))", "= p.split('=') prefixes.append({'prefix': prefix, 'replacement': replacement}) else: prefixes.append({'prefix': p, 'replacement':", "append_source_mapping(wasm, options.source_map_url) if options.w: logger.debug('Saving wasm to %s' % options.w)", "%s' % load_name) sources_content.append(None) else: source_id = sources_map[source_name] address_delta =", "\"playground.c\" # dir_index: 1 # mod_time: 0x00000000 # length: 0x00000000", "infile.read() sources_content.append(source_content) except IOError: print('Failed to read source: %s' %", "for dir in re.finditer(r\"include_directories\\[\\s*(\\d+)\\] = \\\"([^\\\"]*)\", line_chunk): include_directories[dir.group(1)] = dir.group(2)", "# file_names[ 1]: # name: \"playground.c\" # dir_index: 1 #", "None else \"\" line_chunk = debug_line_chunks[i + 1] # include_directories[", "block_start = cur_entry def read_dwarf_entries(wasm, options): if options.dwarfdump_output: output =", "8 while pos < len(wasm): section_id, pos_ = read_var_uint(wasm, pos)", "and linking sections') parser.add_argument('-u', '--source-map-url', nargs='?', help='specifies sourceMappingURL section contest')", "def read_dwarf_entries(wasm, options): if options.dwarfdump_output: output = open(options.dwarfdump_output, 'r').read() elif", "help='output source map') parser.add_argument('-p', '--prefix', nargs='*', help='replace source debug filename", "collect_sources: load_name = prefixes.load.resolve(file_name) try: with open(load_name, 'r') as infile:", "logger.debug('Append sourceMappingURL section') section_name = \"sourceMappingURL\" section_content = encode_uint_var(len(section_name)) +", "name.startswith(\".debug_\"): continue # skip debug related sections stripped = stripped", "name.startswith(\"reloc..debug_\") or name.startswith(\".debug_\"): continue # skip debug related sections stripped", "== entry['address']: # last entry has the same address, reusing", "| (n & 127)) n = n >> 7 result.append(n)", "source_name not in sources_map: source_id = len(sources) sources_map[source_name] = source_id", "not entry['eos']: entries.append(entry) else: # move end of function to", "31: result = result + VLQ_CHARS[32 + (x & 31)]", "0 cur_entry = 0 while cur_entry < len(entries): if not", "p in self.prefixes: if name.startswith(p['prefix']): if p['replacement'] is None: result", "import logging from math import floor, log import os import", "encode_uint_var(0) + encode_uint_var(len(section_content)) + section_content def get_code_section_offset(wasm): logger.debug('Read sections index')", "END operator entry['address'] -= 1 if entries[-1]['address'] == entry['address']: #", "nargs='?', help='set output wasm file') parser.add_argument('-x', '--strip', action='store_true', help='removes debug", "logger.debug('Reading DWARF information from %s' % wasm) if not os.path.exists(options.dwarfdump):", "else: entries.append(entry) remove_dead_entries(entries) # return entries sorted by the address", "debug sections from a wasm file. \"\"\" import argparse from", "<< shift), pos def strip_debug_sections(wasm): logger.debug('Strip debug sections') pos =", "include_directories[file.group(3)] file_path = (dir + '/' if file.group(2)[0] != '/'", "file. \"\"\"Utility tools that extracts DWARF information encoded in a", "+ (x & 31)] x = x >> 5 return", "function size (including size field) fn_size_length = floor(log(entries[cur_entry]['address'] - fn_start", "with open(load_name, 'r') as infile: source_content = infile.read() sources_content.append(source_content) except", "read_var_uint(wasm, pos): n = 0 shift = 0 b =", "reserved. # Emscripten is available under two separate licenses, the", "file. \"\"\" import argparse from collections import OrderedDict, namedtuple import", "# Remove dead code debug info block. del entries[block_start:cur_entry +", "last_source_id line_delta = line - last_line column_delta = column -", "json.dump(map, outfile, separators=(',', ':')) if options.strip: wasm = strip_debug_sections(wasm) if", "VLQ_CHARS[x] def read_var_uint(wasm, pos): n = 0 shift = 0", "':')) if options.strip: wasm = strip_debug_sections(wasm) if options.source_map_url: wasm =", "prefixes self.cache = {} def resolve(self, name): if name in", "if options.strip: wasm = strip_debug_sections(wasm) if options.source_map_url: wasm = append_source_mapping(wasm,", "[] sources_content = [] if collect_sources else None mappings =", "strip_debug_sections(wasm) if options.source_map_url: wasm = append_source_mapping(wasm, options.source_map_url) if options.w: logger.debug('Saving", "pos def strip_debug_sections(wasm): logger.debug('Strip debug sections') pos = 8 stripped", "print('Failed to read source: %s' % load_name) sources_content.append(None) else: source_id", "fn_size_length = floor(log(entries[cur_entry]['address'] - fn_start + 1, 128)) + 1", "a wasm source map. Additionally, it can collect original sources,", "= (dir + '/' if file.group(2)[0] != '/' else '')", "+ code_section_offset file_name = entry['file'] source_name = prefixes.sources.resolve(file_name) if source_name", "collect original sources, change files prefixes, and strip debug sections", "as a wasm source map. Additionally, it can collect original", "sections') parser.add_argument('-u', '--source-map-url', nargs='?', help='specifies sourceMappingURL section contest') parser.add_argument('--dwarfdump', help=\"path", "University of Illinois/NCSA Open Source License. Both these licenses can", "if source_name not in sources_map: source_id = len(sources) sources_map[source_name] =", "self.cache = {} def resolve(self, name): if name in self.cache:", "= source_id sources.append(source_name) if collect_sources: load_name = prefixes.load.resolve(file_name) try: with", "infile: wasm = infile.read() entries = read_dwarf_entries(wasm_input, options) code_section_offset =", "+ encode_uint_var(len(url)) + url return wasm + encode_uint_var(0) + encode_uint_var(len(section_content))", "# last entry has the same address, reusing entries[-1]['eos'] =", "code debug info block. del entries[block_start:cur_entry + 1] cur_entry =", "= append_source_mapping(wasm, options.source_map_url) if options.w: logger.debug('Saving wasm to %s' %", "wasm[section_start:pos] return stripped def encode_uint_var(n): result = bytearray() while n", "result = name[len(p['prefix'])::] else: result = p['replacement'] + name[len(p['prefix'])::] break", "0 is_stmt # 0x0000000000000007 23 10 1 0 0 is_stmt", "section_size def remove_dead_entries(entries): # Remove entries for dead functions. It", "cur_entry += 1 continue fn_start = entries[block_start]['address'] # Calculate the", "if column == 0: column = 1 address = entry['address']", "= line last_column = column return OrderedDict([('version', 3), ('names', []),", "= prefixes.sources.resolve(file_name) if source_name not in sources_map: source_id = len(sources)", "< len(entries): if not entries[cur_entry]['eos']: cur_entry += 1 continue fn_start", "0 end_sequence # 0x0000000000000011 28 0 1 0 0 is_stmt", "pos = read_var_uint(wasm, pos_) if section_id == 10: return pos", "entries for dead functions. It is a heuristics to ignore", "line in re.finditer(r\"\\n0x([0-9a-f]+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)(.*?end_sequence)?\", line_chunk): entry = {'address': int(line.group(1), 16), 'line':", "= 1 for entry in entries: line = entry['line'] column", "+ stmt_list + r\"\\)\\s+\" + r\"DW_AT_comp_dir\\s+\\(\\\"([^\\\"]+)\", maybe_debug_info_content) comp_dir = comp_dir_match.group(1)", "import re from subprocess import Popen, PIPE import sys sys.path.insert(1,", "dir_index: 1 # mod_time: 0x00000000 # length: 0x00000000 # #", "ignore entries with line 0 if line == 0: continue", "= entry['file'] source_name = prefixes.sources.resolve(file_name) if source_name not in sources_map:", "options.source_map_url) if options.w: logger.debug('Saving wasm to %s' % options.w) with", "line_chunk): entry = {'address': int(line.group(1), 16), 'line': int(line.group(2)), 'column': int(line.group(3)),", "outfile: json.dump(map, outfile, separators=(',', ':')) if options.strip: wasm = strip_debug_sections(wasm)", "\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\" x = (n << 1) if n >= 0", "= read_var_uint(wasm, pos) section_size, pos = read_var_uint(wasm, pos_) if section_id", "if options.source_map_url: wasm = append_source_mapping(wasm, options.source_map_url) if options.w: logger.debug('Saving wasm", "'file': files[line.group(4)], 'eos': line.group(5) is not None} if not entry['eos']:", "== 10: return pos pos = pos + section_size def", "for dead functions. It is a heuristics to ignore data", "encode_uint_var(n): result = bytearray() while n > 127: result.append(128 |", "debug_line_chunks = re.split(r\"debug_line\\[(0x[0-9a-f]*)\\]\", asstr(output)) maybe_debug_info_content = debug_line_chunks[0] for i in", "--- ------------- ------------- # 0x0000000000000006 22 0 1 0 0", "filename prefix for reading sources from file system (see also", "'/' else '') + file.group(2) files[file.group(1)] = file_path for line", "def read_var_uint(wasm, pos): n = 0 shift = 0 b", "0 last_source_id = 0 last_line = 1 last_column = 1", "1 address = entry['address'] + code_section_offset file_name = entry['file'] source_name", "= entry['line'] column = entry['column'] # ignore entries with line", "Flags # ------------------ ------ ------ ------ --- ------------- ------------- #", "if collect_sources: load_name = prefixes.load.resolve(file_name) try: with open(load_name, 'r') as", "get_code_section_offset(wasm) prefixes = SourceMapPrefixes(sources=Prefixes(options.prefix), load=Prefixes(options.load_prefix)) logger.debug('Saving to %s' % options.output)", "source_id = len(sources) sources_map[source_name] = source_id sources.append(source_name) if collect_sources: load_name", "= debug_line_chunks[i + 1] # include_directories[ 1] = \"/Users/yury/Work/junk/sqlite-playground/src\" #", "open(load_name, 'r') as infile: source_content = infile.read() sources_content.append(source_content) except IOError:", "or --dwarfdump-output') sys.exit(1) entries = [] debug_line_chunks = re.split(r\"debug_line\\[(0x[0-9a-f]*)\\]\", asstr(output))", "1 0 0 end_sequence # 0x0000000000000011 28 0 1 0", "names that output to source maps JSON # - \"load\"", "- fn_start + 1, 128)) + 1 min_live_offset = 1", "[] for p in args: if '=' in p: prefix,", "1 if column == 0: column = 1 address =", "sources_content.append(source_content) except IOError: print('Failed to read source: %s' % load_name)", "= [] sources_map = {} last_address = 0 last_source_id =", "128: n = n | ((b - 128) << shift)", "1 for entry in entries: line = entry['line'] column =", "code_section_offset, prefixes, collect_sources): sources = [] sources_content = [] if", "logger.debug('Strip debug sections') pos = 8 stripped = wasm[:pos] while", "True else: entries.append(entry) remove_dead_entries(entries) # return entries sorted by the", "sorted by the address field return sorted(entries, key=lambda entry: entry['address'])", "address field return sorted(entries, key=lambda entry: entry['address']) def build_sourcemap(entries, code_section_offset,", "load_name) sources_content.append(None) else: source_id = sources_map[source_name] address_delta = address -", "used to load source text SourceMapPrefixes = namedtuple('SourceMapPrefixes', 'sources, load')", "text SourceMapPrefixes = namedtuple('SourceMapPrefixes', 'sources, load') def encode_vlq(n): VLQ_CHARS =", "os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from tools.shared import asstr logger = logging.getLogger('wasm-sourcemap') def parse_args():", "if section_id == 10: return pos pos = pos +", "licenses can be # found in the LICENSE file. \"\"\"Utility", "LICENSE file. \"\"\"Utility tools that extracts DWARF information encoded in", "entry['address']: # last entry has the same address, reusing entries[-1]['eos']", "128) << shift) b = ord(wasm[pos:pos + 1]) pos =", "result = name for p in self.prefixes: if name.startswith(p['prefix']): if", "the LLVM tools, and encodes it as a wasm source", "name.startswith(p['prefix']): if p['replacement'] is None: result = name[len(p['prefix'])::] else: result", "# - \"sources\" is for names that output to source", "sections') pos = 8 stripped = wasm[:pos] while pos <", "url return wasm + encode_uint_var(0) + encode_uint_var(len(section_content)) + section_content def", "0 is_stmt prologue_end # 0x000000000000000f 23 3 1 0 0", "% load_name) sources_content.append(None) else: source_id = sources_map[source_name] address_delta = address", "import json import logging from math import floor, log import", "3), ('names', []), ('sources', sources), ('sourcesContent', sources_content), ('mappings', ','.join(mappings))]) def", "& 127)) n = n >> 7 result.append(n) return bytes(result)", "can be # found in the LICENSE file. \"\"\"Utility tools", "nargs='*', help='replace source debug filename prefix for source map', default=[])", "1]) pos = pos + 1 shift += 7 return", "import sys sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from tools.shared import asstr logger =", "not found: ' + options.dwarfdump) sys.exit(1) process = Popen([options.dwarfdump, \"-debug-info\",", "related sections stripped = stripped + wasm[section_start:pos] return stripped def", "prologue_end # 0x000000000000000f 23 3 1 0 0 # 0x0000000000000010", "function to the last END operator entry['address'] -= 1 if", "nargs='?', help='specifies sourceMappingURL section contest') parser.add_argument('--dwarfdump', help=\"path to llvm-dwarfdump executable\")", "# name: \"playground.c\" # dir_index: 1 # mod_time: 0x00000000 #", "entry: entry['address']) def build_sourcemap(entries, code_section_offset, prefixes, collect_sources): sources = []", "result.append(n) return bytes(result) def append_source_mapping(wasm, url): logger.debug('Append sourceMappingURL section') section_name", "0x0000000000000006 22 0 1 0 0 is_stmt # 0x0000000000000007 23", "found in the LICENSE file. \"\"\"Utility tools that extracts DWARF", "len(wasm): section_id, pos_ = read_var_uint(wasm, pos) section_size, pos = read_var_uint(wasm,", "28 0 1 0 0 is_stmt include_directories = {'0': comp_dir}", "if collect_sources else None mappings = [] sources_map = {}", "address = entry['address'] + code_section_offset file_name = entry['file'] source_name =", "= infile.read() entries = read_dwarf_entries(wasm_input, options) code_section_offset = get_code_section_offset(wasm) prefixes", "if exit_code != 0: logger.error('Error during llvm-dwarfdump execution (%s)' %", "source_id - last_source_id line_delta = line - last_line column_delta =", "system (see also --sources)', default=[]) parser.add_argument('-w', nargs='?', help='set output wasm", "name_len name = wasm[name_pos:name_end] if name == \"linking\" or name", "sys.exit(1) entries = [] debug_line_chunks = re.split(r\"debug_line\\[(0x[0-9a-f]*)\\]\", asstr(output)) maybe_debug_info_content =", "for p in self.prefixes: if name.startswith(p['prefix']): if p['replacement'] is None:", "= process.communicate() exit_code = process.wait() if exit_code != 0: logger.error('Error", "section_id == 10: return pos pos = pos + section_size", "with open(wasm_input, 'rb') as infile: wasm = infile.read() entries =", "entry['eos']: entries.append(entry) else: # move end of function to the", "index') pos = 8 while pos < len(wasm): section_id, pos_", "ord(wasm[pos:pos + 1]) pos = pos + 1 shift +=", "sources, change files prefixes, and strip debug sections from a", "end of function to the last END operator entry['address'] -=", "for paths that used to load source text SourceMapPrefixes =", "with open(options.w, 'wb') as outfile: outfile.write(wasm) logger.debug('Done') return 0 if", "if '=' in p: prefix, replacement = p.split('=') prefixes.append({'prefix': prefix,", "is for names that output to source maps JSON #", "or name.startswith(\"reloc..debug_\") or name.startswith(\".debug_\"): continue # skip debug related sections", "licenses, the MIT license and the # University of Illinois/NCSA", "address last_source_id = source_id last_line = line last_column = column", "1 block_start = cur_entry def read_dwarf_entries(wasm, options): if options.dwarfdump_output: output", "range(1, len(debug_line_chunks), 2): stmt_list = debug_line_chunks[i] comp_dir_match = re.search(r\"DW_AT_stmt_list\\s+\\(\" +", "'r') as infile: source_content = infile.read() sources_content.append(source_content) except IOError: print('Failed", "for names that output to source maps JSON # -", "1 while b >= 128: n = n | ((b", "os import re from subprocess import Popen, PIPE import sys", "from tools.shared import asstr logger = logging.getLogger('wasm-sourcemap') def parse_args(): parser", "0 while cur_entry < len(entries): if not entries[cur_entry]['eos']: cur_entry +=", "parser.add_argument('-p', '--prefix', nargs='*', help='replace source debug filename prefix for source", "process = Popen([options.dwarfdump, \"-debug-info\", \"-debug-line\", wasm], stdout=PIPE) output, err =", "has the same address, reusing entries[-1]['eos'] = True else: entries.append(entry)", "pos pos = pos + section_size def remove_dead_entries(entries): # Remove", "None: result = name[len(p['prefix'])::] else: result = p['replacement'] + name[len(p['prefix'])::]", "+ section_name + encode_uint_var(len(url)) + url return wasm + encode_uint_var(0)", "by the address field return sorted(entries, key=lambda entry: entry['address']) def", "source_name = prefixes.sources.resolve(file_name) if source_name not in sources_map: source_id =", "debug_line_chunks[i + 1] # include_directories[ 1] = \"/Users/yury/Work/junk/sqlite-playground/src\" # file_names[", "[]), ('sources', sources), ('sourcesContent', sources_content), ('mappings', ','.join(mappings))]) def main(): options", "= result + VLQ_CHARS[32 + (x & 31)] x =", "License. Both these licenses can be # found in the", "+ section_size if section_id == 0: name_len, name_pos = read_var_uint(wasm,", "stmt_list = debug_line_chunks[i] comp_dir_match = re.search(r\"DW_AT_stmt_list\\s+\\(\" + stmt_list + r\"\\)\\s+\"", "section_size, section_body = read_var_uint(wasm, pos_) pos = section_body + section_size", "Emscripten is available under two separate licenses, the MIT license", "= entry['address'] + code_section_offset file_name = entry['file'] source_name = prefixes.sources.resolve(file_name)", "pos): n = 0 shift = 0 b = ord(wasm[pos:pos", "help='replace source debug filename prefix for source map', default=[]) parser.add_argument('-s',", "PIPE import sys sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from tools.shared import asstr logger", "url): logger.debug('Append sourceMappingURL section') section_name = \"sourceMappingURL\" section_content = encode_uint_var(len(section_name))", "name): if name in self.cache: return self.cache[name] result = name", "sources_map: source_id = len(sources) sources_map[source_name] = source_id sources.append(source_name) if collect_sources:", "source debug filename prefix for reading sources from file system", "= strip_debug_sections(wasm) if options.source_map_url: wasm = append_source_mapping(wasm, options.source_map_url) if options.w:", "into source map') parser.add_argument('-l', '--load-prefix', nargs='*', help='replace source debug filename", "'--strip', action='store_true', help='removes debug and linking sections') parser.add_argument('-u', '--source-map-url', nargs='?',", "(\\d+)\", line_chunk): dir = include_directories[file.group(3)] file_path = (dir + '/'", "line_chunk): dir = include_directories[file.group(3)] file_path = (dir + '/' if", "JSON # - \"load\" is for paths that used to", "read_dwarf_entries(wasm, options): if options.dwarfdump_output: output = open(options.dwarfdump_output, 'r').read() elif options.dwarfdump:", "move end of function to the last END operator entry['address']", "[] sources_map = {} last_address = 0 last_source_id = 0", "b = ord(wasm[pos:pos + 1]) pos = pos + 1", "1]: # name: \"playground.c\" # dir_index: 1 # mod_time: 0x00000000", "help='replace source debug filename prefix for reading sources from file", "entries.append(entry) remove_dead_entries(entries) # return entries sorted by the address field", "b >= 128: n = n | ((b - 128)", "+ section_size def remove_dead_entries(entries): # Remove entries for dead functions.", "in range(1, len(debug_line_chunks), 2): stmt_list = debug_line_chunks[i] comp_dir_match = re.search(r\"DW_AT_stmt_list\\s+\\(\"", "= 1 last_column = 1 for entry in entries: line", "[] debug_line_chunks = re.split(r\"debug_line\\[(0x[0-9a-f]*)\\]\", asstr(output)) maybe_debug_info_content = debug_line_chunks[0] for i", "options.strip: wasm = strip_debug_sections(wasm) if options.source_map_url: wasm = append_source_mapping(wasm, options.source_map_url)", "during llvm-dwarfdump execution (%s)' % exit_code) sys.exit(1) else: logger.error('Please specify", "0 if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG if os.environ.get('EMCC_DEBUG') else logging.INFO)", "{} def resolve(self, name): if name in self.cache: return self.cache[name]", "result = p['replacement'] + name[len(p['prefix'])::] break self.cache[name] = result return", "starting address near to 0 (is equal to its size", "name: \"playground.c\" # dir_index: 1 # mod_time: 0x00000000 # length:", "== 0: continue # start at least at column 1", "sections from a wasm file. \"\"\" import argparse from collections", "encoded in a wasm output produced by the LLVM tools,", "<< 1) if n >= 0 else ((-n << 1)", "prefix, replacement = p.split('=') prefixes.append({'prefix': prefix, 'replacement': replacement}) else: prefixes.append({'prefix':", "'r').read() elif options.dwarfdump: logger.debug('Reading DWARF information from %s' % wasm)", "for entry in entries: line = entry['line'] column = entry['column']", "map = build_sourcemap(entries, code_section_offset, prefixes, options.sources) with open(options.output, 'w') as", "execution (%s)' % exit_code) sys.exit(1) else: logger.error('Please specify either --dwarfdump", "dir.group(2) files = {} for file in re.finditer(r\"file_names\\[\\s*(\\d+)\\]:\\s+name: \\\"([^\\\"]*)\\\"\\s+dir_index: (\\d+)\",", "help=\"path to llvm-dwarfdump executable\") parser.add_argument('--dwarfdump-output', nargs='?', help=argparse.SUPPRESS) return parser.parse_args() class", "\"load\" is for paths that used to load source text", "return parser.parse_args() class Prefixes: def __init__(self, args): prefixes = []", "file.group(2) files[file.group(1)] = file_path for line in re.finditer(r\"\\n0x([0-9a-f]+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)(.*?end_sequence)?\", line_chunk): entry", "sourceMappingURL section') section_name = \"sourceMappingURL\" section_content = encode_uint_var(len(section_name)) + section_name", "tools.shared import asstr logger = logging.getLogger('wasm-sourcemap') def parse_args(): parser =", "if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG if os.environ.get('EMCC_DEBUG') else logging.INFO) sys.exit(main())", "(n << 1) if n >= 0 else ((-n <<", "+= 1 block_start = cur_entry def read_dwarf_entries(wasm, options): if options.dwarfdump_output:", "> 31: result = result + VLQ_CHARS[32 + (x &", "+ encode_uint_var(0) + encode_uint_var(len(section_content)) + section_content def get_code_section_offset(wasm): logger.debug('Read sections", "0x00000000 # # Address Line Column File ISA Discriminator Flags", "IOError: print('Failed to read source: %s' % load_name) sources_content.append(None) else:", "if the # function starting address near to 0 (is", "line 0 if line == 0: continue # start at", "separators=(',', ':')) if options.strip: wasm = strip_debug_sections(wasm) if options.source_map_url: wasm", "paths that used to load source text SourceMapPrefixes = namedtuple('SourceMapPrefixes',", "action='store_true', help='removes debug and linking sections') parser.add_argument('-u', '--source-map-url', nargs='?', help='specifies", "- last_address source_id_delta = source_id - last_source_id line_delta = line", "'w') as outfile: json.dump(map, outfile, separators=(',', ':')) if options.strip: wasm", "functions. It is a heuristics to ignore data if the", "parser.add_argument('wasm', help='wasm file') parser.add_argument('-o', '--output', help='output source map') parser.add_argument('-p', '--prefix',", "'line': int(line.group(2)), 'column': int(line.group(3)), 'file': files[line.group(4)], 'eos': line.group(5) is not", "source files from file system into source map') parser.add_argument('-l', '--load-prefix',", "parser.parse_args() class Prefixes: def __init__(self, args): prefixes = [] for", "5 return result + VLQ_CHARS[x] def read_var_uint(wasm, pos): n =", "= ord(wasm[pos:pos + 1]) pos = pos + 1 while", "= entries[block_start]['address'] # Calculate the LEB encoded function size (including", "\"\" line_chunk = debug_line_chunks[i + 1] # include_directories[ 1] =", "+ section_content def get_code_section_offset(wasm): logger.debug('Read sections index') pos = 8", "sources.append(source_name) if collect_sources: load_name = prefixes.load.resolve(file_name) try: with open(load_name, 'r')", "options.w: logger.debug('Saving wasm to %s' % options.w) with open(options.w, 'wb')", "Emscripten Authors. All rights reserved. # Emscripten is available under", "read_var_uint(wasm, pos_) if section_id == 10: return pos pos =", "continue cur_entry += 1 block_start = cur_entry def read_dwarf_entries(wasm, options):", "section_body + section_size if section_id == 0: name_len, name_pos =", "read_var_uint(wasm, pos) section_size, section_body = read_var_uint(wasm, pos_) pos = section_body", "re.split(r\"debug_line\\[(0x[0-9a-f]*)\\]\", asstr(output)) maybe_debug_info_content = debug_line_chunks[0] for i in range(1, len(debug_line_chunks),", "open(wasm_input, 'rb') as infile: wasm = infile.read() entries = read_dwarf_entries(wasm_input,", "column 1 if column == 0: column = 1 address", "it can collect original sources, change files prefixes, and strip", "result = result + VLQ_CHARS[32 + (x & 31)] x", "+ url return wasm + encode_uint_var(0) + encode_uint_var(len(section_content)) + section_content", "as outfile: json.dump(map, outfile, separators=(',', ':')) if options.strip: wasm =", "replacement}) else: prefixes.append({'prefix': p, 'replacement': None}) self.prefixes = prefixes self.cache", "dir = include_directories[file.group(3)] file_path = (dir + '/' if file.group(2)[0]", "read_var_uint(wasm, pos) section_size, pos = read_var_uint(wasm, pos_) if section_id ==", "field return sorted(entries, key=lambda entry: entry['address']) def build_sourcemap(entries, code_section_offset, prefixes,", "files = {} for file in re.finditer(r\"file_names\\[\\s*(\\d+)\\]:\\s+name: \\\"([^\\\"]*)\\\"\\s+dir_index: (\\d+)\", line_chunk):", "= section_body + section_size if section_id == 0: name_len, name_pos", "open(options.output, 'w') as outfile: json.dump(map, outfile, separators=(',', ':')) if options.strip:", "Popen([options.dwarfdump, \"-debug-info\", \"-debug-line\", wasm], stdout=PIPE) output, err = process.communicate() exit_code", "collect_sources): sources = [] sources_content = [] if collect_sources else", "entries if fn_start < min_live_offset: # Remove dead code debug", "min_live_offset: # Remove dead code debug info block. del entries[block_start:cur_entry", "for source map', default=[]) parser.add_argument('-s', '--sources', action='store_true', help='read and embed", "\"linking\" or name == \"sourceMappingURL\" or name.startswith(\"reloc..debug_\") or name.startswith(\".debug_\"): continue", "last_line column_delta = column - last_column mappings.append(encode_vlq(address_delta) + encode_vlq(source_id_delta) +", "= prefixes self.cache = {} def resolve(self, name): if name", "source map') parser.add_argument('-p', '--prefix', nargs='*', help='replace source debug filename prefix", "p.split('=') prefixes.append({'prefix': prefix, 'replacement': replacement}) else: prefixes.append({'prefix': p, 'replacement': None})", "23 3 1 0 0 end_sequence # 0x0000000000000011 28 0", "[] if collect_sources else None mappings = [] sources_map =", "# 0x0000000000000011 28 0 1 0 0 is_stmt include_directories =", "strip_debug_sections(wasm): logger.debug('Strip debug sections') pos = 8 stripped = wasm[:pos]", "\\\"([^\\\"]*)\", line_chunk): include_directories[dir.group(1)] = dir.group(2) files = {} for file", "parser.add_argument('--dwarfdump', help=\"path to llvm-dwarfdump executable\") parser.add_argument('--dwarfdump-output', nargs='?', help=argparse.SUPPRESS) return parser.parse_args()", "p['replacement'] + name[len(p['prefix'])::] break self.cache[name] = result return result #", "replacement = p.split('=') prefixes.append({'prefix': prefix, 'replacement': replacement}) else: prefixes.append({'prefix': p,", "open(options.w, 'wb') as outfile: outfile.write(wasm) logger.debug('Done') return 0 if __name__", "address, reusing entries[-1]['eos'] = True else: entries.append(entry) remove_dead_entries(entries) # return", "1) + 1) result = \"\" while x > 31:", "'wb') as outfile: outfile.write(wasm) logger.debug('Done') return 0 if __name__ ==", "shift = 0 b = ord(wasm[pos:pos + 1]) pos =", "= argparse.ArgumentParser(prog='wasm-sourcemap.py', description=__doc__) parser.add_argument('wasm', help='wasm file') parser.add_argument('-o', '--output', help='output source", "floor, log import os import re from subprocess import Popen,", "pos < len(wasm): section_start = pos section_id, pos_ = read_var_uint(wasm,", "= cur_entry def read_dwarf_entries(wasm, options): if options.dwarfdump_output: output = open(options.dwarfdump_output,", "Popen, PIPE import sys sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from tools.shared import asstr", "(b << shift), pos def strip_debug_sections(wasm): logger.debug('Strip debug sections') pos", "prefixes.append({'prefix': prefix, 'replacement': replacement}) else: prefixes.append({'prefix': p, 'replacement': None}) self.prefixes", "(including size field) fn_size_length = floor(log(entries[cur_entry]['address'] - fn_start + 1,", "by the LLVM tools, and encodes it as a wasm", "start at least at column 1 if column == 0:", "json import logging from math import floor, log import os", "shift) b = ord(wasm[pos:pos + 1]) pos = pos +", "to 0 (is equal to its size field length). block_start", "Column File ISA Discriminator Flags # ------------------ ------ ------ ------", "len(wasm): section_start = pos section_id, pos_ = read_var_uint(wasm, pos) section_size,", "# 0x0000000000000006 22 0 1 0 0 is_stmt # 0x0000000000000007", "Both these licenses can be # found in the LICENSE", "logger.error('Error during llvm-dwarfdump execution (%s)' % exit_code) sys.exit(1) else: logger.error('Please", "section_start = pos section_id, pos_ = read_var_uint(wasm, pos) section_size, section_body", "0 shift = 0 b = ord(wasm[pos:pos + 1]) pos", "= [] if collect_sources else None mappings = [] sources_map", "class Prefixes: def __init__(self, args): prefixes = [] for p", "block. del entries[block_start:cur_entry + 1] cur_entry = block_start continue cur_entry", "name_pos = read_var_uint(wasm, section_body) name_end = name_pos + name_len name", "((-n << 1) + 1) result = \"\" while x", "def strip_debug_sections(wasm): logger.debug('Strip debug sections') pos = 8 stripped =", "can collect original sources, change files prefixes, and strip debug", "if file.group(2)[0] != '/' else '') + file.group(2) files[file.group(1)] =", "column = entry['column'] # ignore entries with line 0 if", "encode_vlq(column_delta)) last_address = address last_source_id = source_id last_line = line", "0 (is equal to its size field length). block_start =", "1 last_column = 1 for entry in entries: line =", "heuristics to ignore data if the # function starting address", "column_delta = column - last_column mappings.append(encode_vlq(address_delta) + encode_vlq(source_id_delta) + encode_vlq(line_delta)", "file') parser.add_argument('-x', '--strip', action='store_true', help='removes debug and linking sections') parser.add_argument('-u',", "maps JSON # - \"load\" is for paths that used", "+ '/' if file.group(2)[0] != '/' else '') + file.group(2)", "- last_source_id line_delta = line - last_line column_delta = column", "for line in re.finditer(r\"\\n0x([0-9a-f]+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)(.*?end_sequence)?\", line_chunk): entry = {'address': int(line.group(1), 16),", "= read_var_uint(wasm, pos_) pos = section_body + section_size if section_id", "last_column mappings.append(encode_vlq(address_delta) + encode_vlq(source_id_delta) + encode_vlq(line_delta) + encode_vlq(column_delta)) last_address =", "% options.w) with open(options.w, 'wb') as outfile: outfile.write(wasm) logger.debug('Done') return", "0 0 # 0x0000000000000010 23 3 1 0 0 end_sequence", "else: source_id = sources_map[source_name] address_delta = address - last_address source_id_delta", "line last_column = column return OrderedDict([('version', 3), ('names', []), ('sources',", "= pos section_id, pos_ = read_var_uint(wasm, pos) section_size, section_body =", "2018 The Emscripten Authors. All rights reserved. # Emscripten is", "while n > 127: result.append(128 | (n & 127)) n", "\"sources\" is for names that output to source maps JSON", "for code section entries if fn_start < min_live_offset: # Remove", "logger.debug('Done') return 0 if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG if os.environ.get('EMCC_DEBUG')", "debug related sections stripped = stripped + wasm[section_start:pos] return stripped", "with open(options.output, 'w') as outfile: json.dump(map, outfile, separators=(',', ':')) if", "sys sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from tools.shared import asstr logger = logging.getLogger('wasm-sourcemap')", "entries.append(entry) else: # move end of function to the last", "sys.exit(1) process = Popen([options.dwarfdump, \"-debug-info\", \"-debug-line\", wasm], stdout=PIPE) output, err", "data if the # function starting address near to 0", "mappings = [] sources_map = {} last_address = 0 last_source_id", "append_source_mapping(wasm, url): logger.debug('Append sourceMappingURL section') section_name = \"sourceMappingURL\" section_content =", "self.cache: return self.cache[name] result = name for p in self.prefixes:", "1 shift += 7 return n + (b << shift),", "return result # SourceMapPrefixes contains resolver for file names that", "produced by the LLVM tools, and encodes it as a", "field) fn_size_length = floor(log(entries[cur_entry]['address'] - fn_start + 1, 128)) +", "line.group(5) is not None} if not entry['eos']: entries.append(entry) else: #", "# include_directories[ 1] = \"/Users/yury/Work/junk/sqlite-playground/src\" # file_names[ 1]: # name:", "pos < len(wasm): section_id, pos_ = read_var_uint(wasm, pos) section_size, pos", "n + (b << shift), pos def strip_debug_sections(wasm): logger.debug('Strip debug", "result + VLQ_CHARS[x] def read_var_uint(wasm, pos): n = 0 shift", "argparse.ArgumentParser(prog='wasm-sourcemap.py', description=__doc__) parser.add_argument('wasm', help='wasm file') parser.add_argument('-o', '--output', help='output source map')", "file_name = entry['file'] source_name = prefixes.sources.resolve(file_name) if source_name not in", "options.dwarfdump) sys.exit(1) process = Popen([options.dwarfdump, \"-debug-info\", \"-debug-line\", wasm], stdout=PIPE) output,", "<< 1) + 1) result = \"\" while x >", "= stripped + wasm[section_start:pos] return stripped def encode_uint_var(n): result =", "+ encode_uint_var(len(section_content)) + section_content def get_code_section_offset(wasm): logger.debug('Read sections index') pos", "is not None else \"\" line_chunk = debug_line_chunks[i + 1]", "0 b = ord(wasm[pos:pos + 1]) pos = pos +", "end_sequence # 0x0000000000000011 28 0 1 0 0 is_stmt include_directories", "in p: prefix, replacement = p.split('=') prefixes.append({'prefix': prefix, 'replacement': replacement})", "self.cache[name] = result return result # SourceMapPrefixes contains resolver for", "source: %s' % load_name) sources_content.append(None) else: source_id = sources_map[source_name] address_delta", "23 3 1 0 0 # 0x0000000000000010 23 3 1", "{'0': comp_dir} for dir in re.finditer(r\"include_directories\\[\\s*(\\d+)\\] = \\\"([^\\\"]*)\", line_chunk): include_directories[dir.group(1)]", "= address - last_address source_id_delta = source_id - last_source_id line_delta", "= read_var_uint(wasm, pos_) if section_id == 10: return pos pos", "n >= 0 else ((-n << 1) + 1) result", "127)) n = n >> 7 result.append(n) return bytes(result) def", "args: if '=' in p: prefix, replacement = p.split('=') prefixes.append({'prefix':", "to its size field length). block_start = 0 cur_entry =", "if line == 0: continue # start at least at", "near to 0 (is equal to its size field length).", "except IOError: print('Failed to read source: %s' % load_name) sources_content.append(None)", "name[len(p['prefix'])::] else: result = p['replacement'] + name[len(p['prefix'])::] break self.cache[name] =", "+ 1, 128)) + 1 min_live_offset = 1 + fn_size_length", "under two separate licenses, the MIT license and the #", "= block_start continue cur_entry += 1 block_start = cur_entry def", "entry['file'] source_name = prefixes.sources.resolve(file_name) if source_name not in sources_map: source_id", "prefixes.sources.resolve(file_name) if source_name not in sources_map: source_id = len(sources) sources_map[source_name]", "0 0 is_stmt prologue_end # 0x000000000000000f 23 3 1 0", "if fn_start < min_live_offset: # Remove dead code debug info", "- \"load\" is for paths that used to load source", "bytes(result) def append_source_mapping(wasm, url): logger.debug('Append sourceMappingURL section') section_name = \"sourceMappingURL\"", "elif options.dwarfdump: logger.debug('Reading DWARF information from %s' % wasm) if", "All rights reserved. # Emscripten is available under two separate", "# return entries sorted by the address field return sorted(entries,", "load source text SourceMapPrefixes = namedtuple('SourceMapPrefixes', 'sources, load') def encode_vlq(n):", "floor(log(entries[cur_entry]['address'] - fn_start + 1, 128)) + 1 min_live_offset =", "specify either --dwarfdump or --dwarfdump-output') sys.exit(1) entries = [] debug_line_chunks", "not in sources_map: source_id = len(sources) sources_map[source_name] = source_id sources.append(source_name)", "pos = pos + section_size def remove_dead_entries(entries): # Remove entries", "= build_sourcemap(entries, code_section_offset, prefixes, options.sources) with open(options.output, 'w') as outfile:", "= True else: entries.append(entry) remove_dead_entries(entries) # return entries sorted by", "remove_dead_entries(entries) # return entries sorted by the address field return", "= name[len(p['prefix'])::] else: result = p['replacement'] + name[len(p['prefix'])::] break self.cache[name]", "stripped = stripped + wasm[section_start:pos] return stripped def encode_uint_var(n): result", "rights reserved. # Emscripten is available under two separate licenses,", "action='store_true', help='read and embed source files from file system into", "1 continue fn_start = entries[block_start]['address'] # Calculate the LEB encoded", "either --dwarfdump or --dwarfdump-output') sys.exit(1) entries = [] debug_line_chunks =", "# Address Line Column File ISA Discriminator Flags # ------------------", "File ISA Discriminator Flags # ------------------ ------ ------ ------ ---", "------ --- ------------- ------------- # 0x0000000000000006 22 0 1 0", "x = x >> 5 return result + VLQ_CHARS[x] def", "None mappings = [] sources_map = {} last_address = 0", "change files prefixes, and strip debug sections from a wasm", "continue # start at least at column 1 if column", "r\"\\)\\s+\" + r\"DW_AT_comp_dir\\s+\\(\\\"([^\\\"]+)\", maybe_debug_info_content) comp_dir = comp_dir_match.group(1) if comp_dir_match is", "load_name = prefixes.load.resolve(file_name) try: with open(load_name, 'r') as infile: source_content", "sources_map[source_name] = source_id sources.append(source_name) if collect_sources: load_name = prefixes.load.resolve(file_name) try:", "(x & 31)] x = x >> 5 return result", "+ name_len name = wasm[name_pos:name_end] if name == \"linking\" or", "else: prefixes.append({'prefix': p, 'replacement': None}) self.prefixes = prefixes self.cache =", "0x0000000000000007 23 10 1 0 0 is_stmt prologue_end # 0x000000000000000f", "Open Source License. Both these licenses can be # found", "x >> 5 return result + VLQ_CHARS[x] def read_var_uint(wasm, pos):", "name[len(p['prefix'])::] break self.cache[name] = result return result # SourceMapPrefixes contains", "self.prefixes = prefixes self.cache = {} def resolve(self, name): if", "system into source map') parser.add_argument('-l', '--load-prefix', nargs='*', help='replace source debug", "wasm = infile.read() entries = read_dwarf_entries(wasm_input, options) code_section_offset = get_code_section_offset(wasm)", "pos_ = read_var_uint(wasm, pos) section_size, section_body = read_var_uint(wasm, pos_) pos", "0: column = 1 address = entry['address'] + code_section_offset file_name", "1 if entries[-1]['address'] == entry['address']: # last entry has the", "\"\" while x > 31: result = result + VLQ_CHARS[32", "dir in re.finditer(r\"include_directories\\[\\s*(\\d+)\\] = \\\"([^\\\"]*)\", line_chunk): include_directories[dir.group(1)] = dir.group(2) files", "in re.finditer(r\"file_names\\[\\s*(\\d+)\\]:\\s+name: \\\"([^\\\"]*)\\\"\\s+dir_index: (\\d+)\", line_chunk): dir = include_directories[file.group(3)] file_path =", "int(line.group(2)), 'column': int(line.group(3)), 'file': files[line.group(4)], 'eos': line.group(5) is not None}", "r\"DW_AT_comp_dir\\s+\\(\\\"([^\\\"]+)\", maybe_debug_info_content) comp_dir = comp_dir_match.group(1) if comp_dir_match is not None", "1 # mod_time: 0x00000000 # length: 0x00000000 # # Address", "prefixes, options.sources) with open(options.output, 'w') as outfile: json.dump(map, outfile, separators=(',',", "def __init__(self, args): prefixes = [] for p in args:", "while b >= 128: n = n | ((b -", "if comp_dir_match is not None else \"\" line_chunk = debug_line_chunks[i", "= 0 last_line = 1 last_column = 1 for entry", "parser.add_argument('-w', nargs='?', help='set output wasm file') parser.add_argument('-x', '--strip', action='store_true', help='removes", "pos) section_size, pos = read_var_uint(wasm, pos_) if section_id == 10:", "SourceMapPrefixes(sources=Prefixes(options.prefix), load=Prefixes(options.load_prefix)) logger.debug('Saving to %s' % options.output) map = build_sourcemap(entries,", "return self.cache[name] result = name for p in self.prefixes: if", "last_column = column return OrderedDict([('version', 3), ('names', []), ('sources', sources),", "options) code_section_offset = get_code_section_offset(wasm) prefixes = SourceMapPrefixes(sources=Prefixes(options.prefix), load=Prefixes(options.load_prefix)) logger.debug('Saving to", "+ VLQ_CHARS[x] def read_var_uint(wasm, pos): n = 0 shift =", "return stripped def encode_uint_var(n): result = bytearray() while n >", "'replacement': replacement}) else: prefixes.append({'prefix': p, 'replacement': None}) self.prefixes = prefixes", "= result return result # SourceMapPrefixes contains resolver for file", "VLQ_CHARS[32 + (x & 31)] x = x >> 5", "entries[cur_entry]['eos']: cur_entry += 1 continue fn_start = entries[block_start]['address'] # Calculate", "len(entries): if not entries[cur_entry]['eos']: cur_entry += 1 continue fn_start =", "wasm], stdout=PIPE) output, err = process.communicate() exit_code = process.wait() if", "exit_code) sys.exit(1) else: logger.error('Please specify either --dwarfdump or --dwarfdump-output') sys.exit(1)", "wasm file') parser.add_argument('-x', '--strip', action='store_true', help='removes debug and linking sections')", "the address field return sorted(entries, key=lambda entry: entry['address']) def build_sourcemap(entries,", "= address last_source_id = source_id last_line = line last_column =", "output to source maps JSON # - \"load\" is for", "and strip debug sections from a wasm file. \"\"\" import", "last_address = address last_source_id = source_id last_line = line last_column", "= get_code_section_offset(wasm) prefixes = SourceMapPrefixes(sources=Prefixes(options.prefix), load=Prefixes(options.load_prefix)) logger.debug('Saving to %s' %", "os.path.exists(options.dwarfdump): logger.error('llvm-dwarfdump not found: ' + options.dwarfdump) sys.exit(1) process =", "length). block_start = 0 cur_entry = 0 while cur_entry <", "executable\") parser.add_argument('--dwarfdump-output', nargs='?', help=argparse.SUPPRESS) return parser.parse_args() class Prefixes: def __init__(self,", "10: return pos pos = pos + section_size def remove_dead_entries(entries):", "that are: # - \"sources\" is for names that output", "SourceMapPrefixes = namedtuple('SourceMapPrefixes', 'sources, load') def encode_vlq(n): VLQ_CHARS = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\"", "line == 0: continue # start at least at column", "+ VLQ_CHARS[32 + (x & 31)] x = x >>", "continue fn_start = entries[block_start]['address'] # Calculate the LEB encoded function", "< len(wasm): section_start = pos section_id, pos_ = read_var_uint(wasm, pos)", "if options.dwarfdump_output: output = open(options.dwarfdump_output, 'r').read() elif options.dwarfdump: logger.debug('Reading DWARF", "import floor, log import os import re from subprocess import", "0: logger.error('Error during llvm-dwarfdump execution (%s)' % exit_code) sys.exit(1) else:", "3 1 0 0 # 0x0000000000000010 23 3 1 0", "name = wasm[name_pos:name_end] if name == \"linking\" or name ==", "result + VLQ_CHARS[32 + (x & 31)] x = x", "= read_var_uint(wasm, pos) section_size, section_body = read_var_uint(wasm, pos_) pos =", "llvm-dwarfdump execution (%s)' % exit_code) sys.exit(1) else: logger.error('Please specify either", "DWARF information encoded in a wasm output produced by the", "for i in range(1, len(debug_line_chunks), 2): stmt_list = debug_line_chunks[i] comp_dir_match", "pos = 8 while pos < len(wasm): section_id, pos_ =", "'/' if file.group(2)[0] != '/' else '') + file.group(2) files[file.group(1)]", "('sourcesContent', sources_content), ('mappings', ','.join(mappings))]) def main(): options = parse_args() wasm_input", "= {} for file in re.finditer(r\"file_names\\[\\s*(\\d+)\\]:\\s+name: \\\"([^\\\"]*)\\\"\\s+dir_index: (\\d+)\", line_chunk): dir", "def parse_args(): parser = argparse.ArgumentParser(prog='wasm-sourcemap.py', description=__doc__) parser.add_argument('wasm', help='wasm file') parser.add_argument('-o',", "def resolve(self, name): if name in self.cache: return self.cache[name] result", "for file names that are: # - \"sources\" is for", "while pos < len(wasm): section_start = pos section_id, pos_ =", "= [] debug_line_chunks = re.split(r\"debug_line\\[(0x[0-9a-f]*)\\]\", asstr(output)) maybe_debug_info_content = debug_line_chunks[0] for", "parser.add_argument('-u', '--source-map-url', nargs='?', help='specifies sourceMappingURL section contest') parser.add_argument('--dwarfdump', help=\"path to", "prefixes = [] for p in args: if '=' in", "= encode_uint_var(len(section_name)) + section_name + encode_uint_var(len(url)) + url return wasm", "Remove entries for dead functions. It is a heuristics to", "# ------------------ ------ ------ ------ --- ------------- ------------- # 0x0000000000000006", "section_id, pos_ = read_var_uint(wasm, pos) section_size, pos = read_var_uint(wasm, pos_)", "\"-debug-info\", \"-debug-line\", wasm], stdout=PIPE) output, err = process.communicate() exit_code =", "import OrderedDict, namedtuple import json import logging from math import", "1 0 0 is_stmt # 0x0000000000000007 23 10 1 0", "re.finditer(r\"\\n0x([0-9a-f]+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)(.*?end_sequence)?\", line_chunk): entry = {'address': int(line.group(1), 16), 'line': int(line.group(2)), 'column':", "from %s' % wasm) if not os.path.exists(options.dwarfdump): logger.error('llvm-dwarfdump not found:", "'--source-map-url', nargs='?', help='specifies sourceMappingURL section contest') parser.add_argument('--dwarfdump', help=\"path to llvm-dwarfdump", "encode_uint_var(len(url)) + url return wasm + encode_uint_var(0) + encode_uint_var(len(section_content)) +", "else: logger.error('Please specify either --dwarfdump or --dwarfdump-output') sys.exit(1) entries =", "stripped def encode_uint_var(n): result = bytearray() while n > 127:", "= pos + section_size def remove_dead_entries(entries): # Remove entries for", "is_stmt # 0x0000000000000007 23 10 1 0 0 is_stmt prologue_end", "a wasm file. \"\"\" import argparse from collections import OrderedDict,", "8 stripped = wasm[:pos] while pos < len(wasm): section_start =", "Additionally, it can collect original sources, change files prefixes, and", "logging from math import floor, log import os import re", "= logging.getLogger('wasm-sourcemap') def parse_args(): parser = argparse.ArgumentParser(prog='wasm-sourcemap.py', description=__doc__) parser.add_argument('wasm', help='wasm", "collect_sources else None mappings = [] sources_map = {} last_address", "build_sourcemap(entries, code_section_offset, prefixes, options.sources) with open(options.output, 'w') as outfile: json.dump(map,", "1) if n >= 0 else ((-n << 1) +", "files[file.group(1)] = file_path for line in re.finditer(r\"\\n0x([0-9a-f]+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)(.*?end_sequence)?\", line_chunk): entry =", "file names that are: # - \"sources\" is for names", "math import floor, log import os import re from subprocess", "args): prefixes = [] for p in args: if '='", "= options.wasm with open(wasm_input, 'rb') as infile: wasm = infile.read()", "parser.add_argument('-x', '--strip', action='store_true', help='removes debug and linking sections') parser.add_argument('-u', '--source-map-url',", "help='specifies sourceMappingURL section contest') parser.add_argument('--dwarfdump', help=\"path to llvm-dwarfdump executable\") parser.add_argument('--dwarfdump-output',", "python # Copyright 2018 The Emscripten Authors. All rights reserved.", "files from file system into source map') parser.add_argument('-l', '--load-prefix', nargs='*',", "'') + file.group(2) files[file.group(1)] = file_path for line in re.finditer(r\"\\n0x([0-9a-f]+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)(.*?end_sequence)?\",", "comp_dir} for dir in re.finditer(r\"include_directories\\[\\s*(\\d+)\\] = \\\"([^\\\"]*)\", line_chunk): include_directories[dir.group(1)] =", "main(): options = parse_args() wasm_input = options.wasm with open(wasm_input, 'rb')", "files[line.group(4)], 'eos': line.group(5) is not None} if not entry['eos']: entries.append(entry)", "return OrderedDict([('version', 3), ('names', []), ('sources', sources), ('sourcesContent', sources_content), ('mappings',", "= {} last_address = 0 last_source_id = 0 last_line =", "sourceMappingURL section contest') parser.add_argument('--dwarfdump', help=\"path to llvm-dwarfdump executable\") parser.add_argument('--dwarfdump-output', nargs='?',", "int(line.group(1), 16), 'line': int(line.group(2)), 'column': int(line.group(3)), 'file': files[line.group(4)], 'eos': line.group(5)", "n >> 7 result.append(n) return bytes(result) def append_source_mapping(wasm, url): logger.debug('Append", "\"/Users/yury/Work/junk/sqlite-playground/src\" # file_names[ 1]: # name: \"playground.c\" # dir_index: 1", "map. Additionally, it can collect original sources, change files prefixes,", "tools, and encodes it as a wasm source map. Additionally,", "break self.cache[name] = result return result # SourceMapPrefixes contains resolver", "logger.debug('Read sections index') pos = 8 while pos < len(wasm):", "parser.add_argument('--dwarfdump-output', nargs='?', help=argparse.SUPPRESS) return parser.parse_args() class Prefixes: def __init__(self, args):", "load') def encode_vlq(n): VLQ_CHARS = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\" x = (n <<", "# ignore entries with line 0 if line == 0:", "= x >> 5 return result + VLQ_CHARS[x] def read_var_uint(wasm,", "fn_start = entries[block_start]['address'] # Calculate the LEB encoded function size", "prefixes.load.resolve(file_name) try: with open(load_name, 'r') as infile: source_content = infile.read()", "filename prefix for source map', default=[]) parser.add_argument('-s', '--sources', action='store_true', help='read", "= name for p in self.prefixes: if name.startswith(p['prefix']): if p['replacement']", "column = 1 address = entry['address'] + code_section_offset file_name =", ">= 128: n = n | ((b - 128) <<", "Prefixes: def __init__(self, args): prefixes = [] for p in", "debug_line_chunks[0] for i in range(1, len(debug_line_chunks), 2): stmt_list = debug_line_chunks[i]", "result = bytearray() while n > 127: result.append(128 | (n", "Source License. Both these licenses can be # found in", "file_names[ 1]: # name: \"playground.c\" # dir_index: 1 # mod_time:", "entries[block_start:cur_entry + 1] cur_entry = block_start continue cur_entry += 1", "else None mappings = [] sources_map = {} last_address =", "= infile.read() sources_content.append(source_content) except IOError: print('Failed to read source: %s'", "not entries[cur_entry]['eos']: cur_entry += 1 continue fn_start = entries[block_start]['address'] #", "output, err = process.communicate() exit_code = process.wait() if exit_code !=", "open(options.dwarfdump_output, 'r').read() elif options.dwarfdump: logger.debug('Reading DWARF information from %s' %", "maybe_debug_info_content) comp_dir = comp_dir_match.group(1) if comp_dir_match is not None else", "in re.finditer(r\"\\n0x([0-9a-f]+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)(.*?end_sequence)?\", line_chunk): entry = {'address': int(line.group(1), 16), 'line': int(line.group(2)),", "prefixes, and strip debug sections from a wasm file. \"\"\"", "include_directories = {'0': comp_dir} for dir in re.finditer(r\"include_directories\\[\\s*(\\d+)\\] = \\\"([^\\\"]*)\",", "+ options.dwarfdump) sys.exit(1) process = Popen([options.dwarfdump, \"-debug-info\", \"-debug-line\", wasm], stdout=PIPE)", "fn_start + 1, 128)) + 1 min_live_offset = 1 +", "logger.debug('Saving to %s' % options.output) map = build_sourcemap(entries, code_section_offset, prefixes,", "field length). block_start = 0 cur_entry = 0 while cur_entry", "10 1 0 0 is_stmt prologue_end # 0x000000000000000f 23 3", "output wasm file') parser.add_argument('-x', '--strip', action='store_true', help='removes debug and linking", "0: continue # start at least at column 1 if", "and the # University of Illinois/NCSA Open Source License. Both", "--sources)', default=[]) parser.add_argument('-w', nargs='?', help='set output wasm file') parser.add_argument('-x', '--strip',", "= file_path for line in re.finditer(r\"\\n0x([0-9a-f]+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)(.*?end_sequence)?\", line_chunk): entry = {'address':", "in a wasm output produced by the LLVM tools, and", "else: # move end of function to the last END", "key=lambda entry: entry['address']) def build_sourcemap(entries, code_section_offset, prefixes, collect_sources): sources =", "read source: %s' % load_name) sources_content.append(None) else: source_id = sources_map[source_name]", "wasm source map. Additionally, it can collect original sources, change", "info block. del entries[block_start:cur_entry + 1] cur_entry = block_start continue", "# 0x000000000000000f 23 3 1 0 0 # 0x0000000000000010 23", "from a wasm file. \"\"\" import argparse from collections import", "name == \"sourceMappingURL\" or name.startswith(\"reloc..debug_\") or name.startswith(\".debug_\"): continue # skip", "embed source files from file system into source map') parser.add_argument('-l',", "# move end of function to the last END operator", "len(sources) sources_map[source_name] = source_id sources.append(source_name) if collect_sources: load_name = prefixes.load.resolve(file_name)", "read_var_uint(wasm, section_body) name_end = name_pos + name_len name = wasm[name_pos:name_end]", "output produced by the LLVM tools, and encodes it as", "file_path for line in re.finditer(r\"\\n0x([0-9a-f]+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)(.*?end_sequence)?\", line_chunk): entry = {'address': int(line.group(1),", "code section entries if fn_start < min_live_offset: # Remove dead", "return sorted(entries, key=lambda entry: entry['address']) def build_sourcemap(entries, code_section_offset, prefixes, collect_sources):", "address - last_address source_id_delta = source_id - last_source_id line_delta =", "= include_directories[file.group(3)] file_path = (dir + '/' if file.group(2)[0] !=", "# Remove entries for dead functions. It is a heuristics", "parser.add_argument('-l', '--load-prefix', nargs='*', help='replace source debug filename prefix for reading", "if n >= 0 else ((-n << 1) + 1)", "1 0 0 is_stmt include_directories = {'0': comp_dir} for dir", "% wasm) if not os.path.exists(options.dwarfdump): logger.error('llvm-dwarfdump not found: ' +", "{} for file in re.finditer(r\"file_names\\[\\s*(\\d+)\\]:\\s+name: \\\"([^\\\"]*)\\\"\\s+dir_index: (\\d+)\", line_chunk): dir =", "# Copyright 2018 The Emscripten Authors. All rights reserved. #", "= pos + 1 while b >= 128: n =", "pos + 1 shift += 7 return n + (b", "= source_id last_line = line last_column = column return OrderedDict([('version',", "file_path = (dir + '/' if file.group(2)[0] != '/' else", "from subprocess import Popen, PIPE import sys sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from", "is for code section entries if fn_start < min_live_offset: #", "import os import re from subprocess import Popen, PIPE import", "= dir.group(2) files = {} for file in re.finditer(r\"file_names\\[\\s*(\\d+)\\]:\\s+name: \\\"([^\\\"]*)\\\"\\s+dir_index:", "parser.add_argument('-o', '--output', help='output source map') parser.add_argument('-p', '--prefix', nargs='*', help='replace source", "source_id last_line = line last_column = column return OrderedDict([('version', 3),", "entries[block_start]['address'] # Calculate the LEB encoded function size (including size", "reusing entries[-1]['eos'] = True else: entries.append(entry) remove_dead_entries(entries) # return entries", "from collections import OrderedDict, namedtuple import json import logging from", "0 # 0x0000000000000010 23 3 1 0 0 end_sequence #", "len(debug_line_chunks), 2): stmt_list = debug_line_chunks[i] comp_dir_match = re.search(r\"DW_AT_stmt_list\\s+\\(\" + stmt_list", "\"\"\"Utility tools that extracts DWARF information encoded in a wasm", "load=Prefixes(options.load_prefix)) logger.debug('Saving to %s' % options.output) map = build_sourcemap(entries, code_section_offset,", "------------- ------------- # 0x0000000000000006 22 0 1 0 0 is_stmt", "to source maps JSON # - \"load\" is for paths", "result = \"\" while x > 31: result = result", "= open(options.dwarfdump_output, 'r').read() elif options.dwarfdump: logger.debug('Reading DWARF information from %s'", "not os.path.exists(options.dwarfdump): logger.error('llvm-dwarfdump not found: ' + options.dwarfdump) sys.exit(1) process", "wasm = append_source_mapping(wasm, options.source_map_url) if options.w: logger.debug('Saving wasm to %s'", "stmt_list + r\"\\)\\s+\" + r\"DW_AT_comp_dir\\s+\\(\\\"([^\\\"]+)\", maybe_debug_info_content) comp_dir = comp_dir_match.group(1) if", "& 31)] x = x >> 5 return result +", "result.append(128 | (n & 127)) n = n >> 7", "dead functions. It is a heuristics to ignore data if", "sources_content = [] if collect_sources else None mappings = []", "else: result = p['replacement'] + name[len(p['prefix'])::] break self.cache[name] = result", "%s' % options.w) with open(options.w, 'wb') as outfile: outfile.write(wasm) logger.debug('Done')", "pos = pos + 1 while b >= 128: n", "address near to 0 (is equal to its size field", "Address Line Column File ISA Discriminator Flags # ------------------ ------", "pos section_id, pos_ = read_var_uint(wasm, pos) section_size, section_body = read_var_uint(wasm,", "p: prefix, replacement = p.split('=') prefixes.append({'prefix': prefix, 'replacement': replacement}) else:", "sections stripped = stripped + wasm[section_start:pos] return stripped def encode_uint_var(n):", "if not os.path.exists(options.dwarfdump): logger.error('llvm-dwarfdump not found: ' + options.dwarfdump) sys.exit(1)", "22 0 1 0 0 is_stmt # 0x0000000000000007 23 10", "file.group(2)[0] != '/' else '') + file.group(2) files[file.group(1)] = file_path", "ignore data if the # function starting address near to", "= 0 shift = 0 b = ord(wasm[pos:pos + 1])", "name == \"linking\" or name == \"sourceMappingURL\" or name.startswith(\"reloc..debug_\") or", "('names', []), ('sources', sources), ('sourcesContent', sources_content), ('mappings', ','.join(mappings))]) def main():", "files prefixes, and strip debug sections from a wasm file.", "result # SourceMapPrefixes contains resolver for file names that are:", "contains resolver for file names that are: # - \"sources\"", "if section_id == 0: name_len, name_pos = read_var_uint(wasm, section_body) name_end", "information from %s' % wasm) if not os.path.exists(options.dwarfdump): logger.error('llvm-dwarfdump not", "------------------ ------ ------ ------ --- ------------- ------------- # 0x0000000000000006 22", "comp_dir = comp_dir_match.group(1) if comp_dir_match is not None else \"\"", "to %s' % options.output) map = build_sourcemap(entries, code_section_offset, prefixes, options.sources)", "'column': int(line.group(3)), 'file': files[line.group(4)], 'eos': line.group(5) is not None} if", "1 0 0 # 0x0000000000000010 23 3 1 0 0", "prefixes.append({'prefix': p, 'replacement': None}) self.prefixes = prefixes self.cache = {}", "entry = {'address': int(line.group(1), 16), 'line': int(line.group(2)), 'column': int(line.group(3)), 'file':", "= prefixes.load.resolve(file_name) try: with open(load_name, 'r') as infile: source_content =", "maybe_debug_info_content = debug_line_chunks[0] for i in range(1, len(debug_line_chunks), 2): stmt_list", "pos = 8 stripped = wasm[:pos] while pos < len(wasm):", "as outfile: outfile.write(wasm) logger.debug('Done') return 0 if __name__ == '__main__':", "encode_vlq(source_id_delta) + encode_vlq(line_delta) + encode_vlq(column_delta)) last_address = address last_source_id =", "Calculate the LEB encoded function size (including size field) fn_size_length", "return bytes(result) def append_source_mapping(wasm, url): logger.debug('Append sourceMappingURL section') section_name =", "OrderedDict([('version', 3), ('names', []), ('sources', sources), ('sourcesContent', sources_content), ('mappings', ','.join(mappings))])", "self.prefixes: if name.startswith(p['prefix']): if p['replacement'] is None: result = name[len(p['prefix'])::]", "continue # skip debug related sections stripped = stripped +", "1] cur_entry = block_start continue cur_entry += 1 block_start =", "from file system (see also --sources)', default=[]) parser.add_argument('-w', nargs='?', help='set", "The Emscripten Authors. All rights reserved. # Emscripten is available", "extracts DWARF information encoded in a wasm output produced by", "be # found in the LICENSE file. \"\"\"Utility tools that", "to llvm-dwarfdump executable\") parser.add_argument('--dwarfdump-output', nargs='?', help=argparse.SUPPRESS) return parser.parse_args() class Prefixes:", "+ (b << shift), pos def strip_debug_sections(wasm): logger.debug('Strip debug sections')", "prefix for source map', default=[]) parser.add_argument('-s', '--sources', action='store_true', help='read and", "comp_dir_match is not None else \"\" line_chunk = debug_line_chunks[i +", "'replacement': None}) self.prefixes = prefixes self.cache = {} def resolve(self,", "-= 1 if entries[-1]['address'] == entry['address']: # last entry has", "as infile: source_content = infile.read() sources_content.append(source_content) except IOError: print('Failed to", "128)) + 1 min_live_offset = 1 + fn_size_length # 1", "p, 'replacement': None}) self.prefixes = prefixes self.cache = {} def", "sources_content.append(None) else: source_id = sources_map[source_name] address_delta = address - last_address", "1 byte is for code section entries if fn_start <", "encode_vlq(line_delta) + encode_vlq(column_delta)) last_address = address last_source_id = source_id last_line", "ord(wasm[pos:pos + 1]) pos = pos + 1 while b", "if name.startswith(p['prefix']): if p['replacement'] is None: result = name[len(p['prefix'])::] else:", "else ((-n << 1) + 1) result = \"\" while", "code_section_offset, prefixes, options.sources) with open(options.output, 'w') as outfile: json.dump(map, outfile,", "= \"/Users/yury/Work/junk/sqlite-playground/src\" # file_names[ 1]: # name: \"playground.c\" # dir_index:", "0x000000000000000f 23 3 1 0 0 # 0x0000000000000010 23 3", "to %s' % options.w) with open(options.w, 'wb') as outfile: outfile.write(wasm)", "name_end = name_pos + name_len name = wasm[name_pos:name_end] if name", "resolve(self, name): if name in self.cache: return self.cache[name] result =", "LEB encoded function size (including size field) fn_size_length = floor(log(entries[cur_entry]['address']", "!= 0: logger.error('Error during llvm-dwarfdump execution (%s)' % exit_code) sys.exit(1)", "that output to source maps JSON # - \"load\" is", "= Popen([options.dwarfdump, \"-debug-info\", \"-debug-line\", wasm], stdout=PIPE) output, err = process.communicate()", "sources from file system (see also --sources)', default=[]) parser.add_argument('-w', nargs='?',", "sorted(entries, key=lambda entry: entry['address']) def build_sourcemap(entries, code_section_offset, prefixes, collect_sources): sources", "and encodes it as a wasm source map. Additionally, it", "= 1 + fn_size_length # 1 byte is for code", "'rb') as infile: wasm = infile.read() entries = read_dwarf_entries(wasm_input, options)", "llvm-dwarfdump executable\") parser.add_argument('--dwarfdump-output', nargs='?', help=argparse.SUPPRESS) return parser.parse_args() class Prefixes: def", "last_source_id = 0 last_line = 1 last_column = 1 for", "a heuristics to ignore data if the # function starting", "default=[]) parser.add_argument('-w', nargs='?', help='set output wasm file') parser.add_argument('-x', '--strip', action='store_true',", "wasm) if not os.path.exists(options.dwarfdump): logger.error('llvm-dwarfdump not found: ' + options.dwarfdump)", "map') parser.add_argument('-p', '--prefix', nargs='*', help='replace source debug filename prefix for", "entries with line 0 if line == 0: continue #", "entries = read_dwarf_entries(wasm_input, options) code_section_offset = get_code_section_offset(wasm) prefixes = SourceMapPrefixes(sources=Prefixes(options.prefix),", "# skip debug related sections stripped = stripped + wasm[section_start:pos]", "the MIT license and the # University of Illinois/NCSA Open", "wasm file. \"\"\" import argparse from collections import OrderedDict, namedtuple", "description=__doc__) parser.add_argument('wasm', help='wasm file') parser.add_argument('-o', '--output', help='output source map') parser.add_argument('-p',", "encode_uint_var(len(section_content)) + section_content def get_code_section_offset(wasm): logger.debug('Read sections index') pos =", "pos = pos + 1 shift += 7 return n", "return entries sorted by the address field return sorted(entries, key=lambda", "7 return n + (b << shift), pos def strip_debug_sections(wasm):", "that used to load source text SourceMapPrefixes = namedtuple('SourceMapPrefixes', 'sources,", "0 0 end_sequence # 0x0000000000000011 28 0 1 0 0", "+ file.group(2) files[file.group(1)] = file_path for line in re.finditer(r\"\\n0x([0-9a-f]+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)(.*?end_sequence)?\", line_chunk):", "is None: result = name[len(p['prefix'])::] else: result = p['replacement'] +", "== \"sourceMappingURL\" or name.startswith(\"reloc..debug_\") or name.startswith(\".debug_\"): continue # skip debug", "debug info block. del entries[block_start:cur_entry + 1] cur_entry = block_start", "found: ' + options.dwarfdump) sys.exit(1) process = Popen([options.dwarfdump, \"-debug-info\", \"-debug-line\",", "byte is for code section entries if fn_start < min_live_offset:", "not None else \"\" line_chunk = debug_line_chunks[i + 1] #", "prefix, 'replacement': replacement}) else: prefixes.append({'prefix': p, 'replacement': None}) self.prefixes =", "0: name_len, name_pos = read_var_uint(wasm, section_body) name_end = name_pos +", "0x00000000 # length: 0x00000000 # # Address Line Column File", "# 1 byte is for code section entries if fn_start", "= {} def resolve(self, name): if name in self.cache: return", "file in re.finditer(r\"file_names\\[\\s*(\\d+)\\]:\\s+name: \\\"([^\\\"]*)\\\"\\s+dir_index: (\\d+)\", line_chunk): dir = include_directories[file.group(3)] file_path", "parse_args() wasm_input = options.wasm with open(wasm_input, 'rb') as infile: wasm", "'--sources', action='store_true', help='read and embed source files from file system", "%s' % wasm) if not os.path.exists(options.dwarfdump): logger.error('llvm-dwarfdump not found: '", "operator entry['address'] -= 1 if entries[-1]['address'] == entry['address']: # last", "help='removes debug and linking sections') parser.add_argument('-u', '--source-map-url', nargs='?', help='specifies sourceMappingURL", "== 0: column = 1 address = entry['address'] + code_section_offset", "separate licenses, the MIT license and the # University of", "log import os import re from subprocess import Popen, PIPE", "asstr logger = logging.getLogger('wasm-sourcemap') def parse_args(): parser = argparse.ArgumentParser(prog='wasm-sourcemap.py', description=__doc__)", "shift), pos def strip_debug_sections(wasm): logger.debug('Strip debug sections') pos = 8", "> 127: result.append(128 | (n & 127)) n = n", "file system (see also --sources)', default=[]) parser.add_argument('-w', nargs='?', help='set output", "= re.search(r\"DW_AT_stmt_list\\s+\\(\" + stmt_list + r\"\\)\\s+\" + r\"DW_AT_comp_dir\\s+\\(\\\"([^\\\"]+)\", maybe_debug_info_content) comp_dir", "to read source: %s' % load_name) sources_content.append(None) else: source_id =", "(n & 127)) n = n >> 7 result.append(n) return", "resolver for file names that are: # - \"sources\" is", "__init__(self, args): prefixes = [] for p in args: if", "== \"linking\" or name == \"sourceMappingURL\" or name.startswith(\"reloc..debug_\") or name.startswith(\".debug_\"):", "def get_code_section_offset(wasm): logger.debug('Read sections index') pos = 8 while pos", "0x0000000000000010 23 3 1 0 0 end_sequence # 0x0000000000000011 28", "wasm to %s' % options.w) with open(options.w, 'wb') as outfile:", "= n | ((b - 128) << shift) b =", "pos = section_body + section_size if section_id == 0: name_len,", "= comp_dir_match.group(1) if comp_dir_match is not None else \"\" line_chunk", "code_section_offset file_name = entry['file'] source_name = prefixes.sources.resolve(file_name) if source_name not", "+ 1) result = \"\" while x > 31: result", "source maps JSON # - \"load\" is for paths that", "block_start continue cur_entry += 1 block_start = cur_entry def read_dwarf_entries(wasm,", "== 0: name_len, name_pos = read_var_uint(wasm, section_body) name_end = name_pos", "options): if options.dwarfdump_output: output = open(options.dwarfdump_output, 'r').read() elif options.dwarfdump: logger.debug('Reading", "while x > 31: result = result + VLQ_CHARS[32 +", "is for paths that used to load source text SourceMapPrefixes", "--dwarfdump-output') sys.exit(1) entries = [] debug_line_chunks = re.split(r\"debug_line\\[(0x[0-9a-f]*)\\]\", asstr(output)) maybe_debug_info_content", "# University of Illinois/NCSA Open Source License. Both these licenses", "wasm[name_pos:name_end] if name == \"linking\" or name == \"sourceMappingURL\" or", "= \"sourceMappingURL\" section_content = encode_uint_var(len(section_name)) + section_name + encode_uint_var(len(url)) +", "debug_line_chunks[i] comp_dir_match = re.search(r\"DW_AT_stmt_list\\s+\\(\" + stmt_list + r\"\\)\\s+\" + r\"DW_AT_comp_dir\\s+\\(\\\"([^\\\"]+)\",", "= floor(log(entries[cur_entry]['address'] - fn_start + 1, 128)) + 1 min_live_offset", "# 0x0000000000000010 23 3 1 0 0 end_sequence # 0x0000000000000011", "section contest') parser.add_argument('--dwarfdump', help=\"path to llvm-dwarfdump executable\") parser.add_argument('--dwarfdump-output', nargs='?', help=argparse.SUPPRESS)", "stripped = wasm[:pos] while pos < len(wasm): section_start = pos", "entry has the same address, reusing entries[-1]['eos'] = True else:", "column == 0: column = 1 address = entry['address'] +", "0 last_line = 1 last_column = 1 for entry in", "encoded function size (including size field) fn_size_length = floor(log(entries[cur_entry]['address'] -", "for file in re.finditer(r\"file_names\\[\\s*(\\d+)\\]:\\s+name: \\\"([^\\\"]*)\\\"\\s+dir_index: (\\d+)\", line_chunk): dir = include_directories[file.group(3)]", "def main(): options = parse_args() wasm_input = options.wasm with open(wasm_input,", "1, 128)) + 1 min_live_offset = 1 + fn_size_length #", "reading sources from file system (see also --sources)', default=[]) parser.add_argument('-w',", "min_live_offset = 1 + fn_size_length # 1 byte is for", "------------- # 0x0000000000000006 22 0 1 0 0 is_stmt #", "entries: line = entry['line'] column = entry['column'] # ignore entries", "p in args: if '=' in p: prefix, replacement =", "| ((b - 128) << shift) b = ord(wasm[pos:pos +", "file') parser.add_argument('-o', '--output', help='output source map') parser.add_argument('-p', '--prefix', nargs='*', help='replace", "or name == \"sourceMappingURL\" or name.startswith(\"reloc..debug_\") or name.startswith(\".debug_\"): continue #", "del entries[block_start:cur_entry + 1] cur_entry = block_start continue cur_entry +=", "last END operator entry['address'] -= 1 if entries[-1]['address'] == entry['address']:", "pos + section_size def remove_dead_entries(entries): # Remove entries for dead", "= process.wait() if exit_code != 0: logger.error('Error during llvm-dwarfdump execution", "+ 1 shift += 7 return n + (b <<", "result return result # SourceMapPrefixes contains resolver for file names", "map', default=[]) parser.add_argument('-s', '--sources', action='store_true', help='read and embed source files", "last entry has the same address, reusing entries[-1]['eos'] = True", "re from subprocess import Popen, PIPE import sys sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))", "last_line = 1 last_column = 1 for entry in entries:", "last_source_id = source_id last_line = line last_column = column return", "sources = [] sources_content = [] if collect_sources else None", "x = (n << 1) if n >= 0 else", "p['replacement'] is None: result = name[len(p['prefix'])::] else: result = p['replacement']", "1 min_live_offset = 1 + fn_size_length # 1 byte is", "return result + VLQ_CHARS[x] def read_var_uint(wasm, pos): n = 0", "else '') + file.group(2) files[file.group(1)] = file_path for line in", "sources_map[source_name] address_delta = address - last_address source_id_delta = source_id -", "last_column = 1 for entry in entries: line = entry['line']", "entry['address'] + code_section_offset file_name = entry['file'] source_name = prefixes.sources.resolve(file_name) if", "{} last_address = 0 last_source_id = 0 last_line = 1", "skip debug related sections stripped = stripped + wasm[section_start:pos] return", "with line 0 if line == 0: continue # start", "1 0 0 is_stmt prologue_end # 0x000000000000000f 23 3 1", "- 128) << shift) b = ord(wasm[pos:pos + 1]) pos", "None}) self.prefixes = prefixes self.cache = {} def resolve(self, name):", "to the last END operator entry['address'] -= 1 if entries[-1]['address']", "length: 0x00000000 # # Address Line Column File ISA Discriminator", "options.output) map = build_sourcemap(entries, code_section_offset, prefixes, options.sources) with open(options.output, 'w')", "nargs='*', help='replace source debug filename prefix for reading sources from", "= 8 stripped = wasm[:pos] while pos < len(wasm): section_start", "('sources', sources), ('sourcesContent', sources_content), ('mappings', ','.join(mappings))]) def main(): options =", "= wasm[:pos] while pos < len(wasm): section_start = pos section_id,", "+ 1]) pos = pos + 1 shift += 7", "------ ------ --- ------------- ------------- # 0x0000000000000006 22 0 1", "= 8 while pos < len(wasm): section_id, pos_ = read_var_uint(wasm,", "section_name = \"sourceMappingURL\" section_content = encode_uint_var(len(section_name)) + section_name + encode_uint_var(len(url))", "section_size, pos = read_var_uint(wasm, pos_) if section_id == 10: return", "Discriminator Flags # ------------------ ------ ------ ------ --- ------------- -------------", "'eos': line.group(5) is not None} if not entry['eos']: entries.append(entry) else:", "exit_code != 0: logger.error('Error during llvm-dwarfdump execution (%s)' % exit_code)", "options.dwarfdump_output: output = open(options.dwarfdump_output, 'r').read() elif options.dwarfdump: logger.debug('Reading DWARF information", "two separate licenses, the MIT license and the # University", "23 10 1 0 0 is_stmt prologue_end # 0x000000000000000f 23", "= \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\" x = (n << 1) if n >=", "sys.exit(1) else: logger.error('Please specify either --dwarfdump or --dwarfdump-output') sys.exit(1) entries", "Copyright 2018 The Emscripten Authors. All rights reserved. # Emscripten", "Illinois/NCSA Open Source License. Both these licenses can be #", "{'address': int(line.group(1), 16), 'line': int(line.group(2)), 'column': int(line.group(3)), 'file': files[line.group(4)], 'eos':", "entry['line'] column = entry['column'] # ignore entries with line 0", "logging.getLogger('wasm-sourcemap') def parse_args(): parser = argparse.ArgumentParser(prog='wasm-sourcemap.py', description=__doc__) parser.add_argument('wasm', help='wasm file')", "outfile, separators=(',', ':')) if options.strip: wasm = strip_debug_sections(wasm) if options.source_map_url:", "= entry['column'] # ignore entries with line 0 if line", "= {'address': int(line.group(1), 16), 'line': int(line.group(2)), 'column': int(line.group(3)), 'file': files[line.group(4)],", "cur_entry < len(entries): if not entries[cur_entry]['eos']: cur_entry += 1 continue", "line - last_line column_delta = column - last_column mappings.append(encode_vlq(address_delta) +", "of Illinois/NCSA Open Source License. Both these licenses can be", "def remove_dead_entries(entries): # Remove entries for dead functions. It is", "debug filename prefix for reading sources from file system (see", "help='read and embed source files from file system into source", "sections index') pos = 8 while pos < len(wasm): section_id,", "n = 0 shift = 0 b = ord(wasm[pos:pos +", "= debug_line_chunks[i] comp_dir_match = re.search(r\"DW_AT_stmt_list\\s+\\(\" + stmt_list + r\"\\)\\s+\" +", "prefix for reading sources from file system (see also --sources)',", "2): stmt_list = debug_line_chunks[i] comp_dir_match = re.search(r\"DW_AT_stmt_list\\s+\\(\" + stmt_list +", "0 if line == 0: continue # start at least", "from file system into source map') parser.add_argument('-l', '--load-prefix', nargs='*', help='replace", "= p['replacement'] + name[len(p['prefix'])::] break self.cache[name] = result return result", "the LEB encoded function size (including size field) fn_size_length =", "prefixes, collect_sources): sources = [] sources_content = [] if collect_sources", "line_chunk): include_directories[dir.group(1)] = dir.group(2) files = {} for file in", "prefixes = SourceMapPrefixes(sources=Prefixes(options.prefix), load=Prefixes(options.load_prefix)) logger.debug('Saving to %s' % options.output) map", "= [] sources_content = [] if collect_sources else None mappings", "pos_ = read_var_uint(wasm, pos) section_size, pos = read_var_uint(wasm, pos_) if", "debug filename prefix for source map', default=[]) parser.add_argument('-s', '--sources', action='store_true',", "section_size if section_id == 0: name_len, name_pos = read_var_uint(wasm, section_body)", "(is equal to its size field length). block_start = 0", "= 0 while cur_entry < len(entries): if not entries[cur_entry]['eos']: cur_entry", "(dir + '/' if file.group(2)[0] != '/' else '') +", "of function to the last END operator entry['address'] -= 1", "VLQ_CHARS = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\" x = (n << 1) if n", "options.w) with open(options.w, 'wb') as outfile: outfile.write(wasm) logger.debug('Done') return 0", "\"sourceMappingURL\" or name.startswith(\"reloc..debug_\") or name.startswith(\".debug_\"): continue # skip debug related", "if name == \"linking\" or name == \"sourceMappingURL\" or name.startswith(\"reloc..debug_\")", "that extracts DWARF information encoded in a wasm output produced", "last_line = line last_column = column return OrderedDict([('version', 3), ('names',", "#!/usr/bin/env python # Copyright 2018 The Emscripten Authors. All rights", "license and the # University of Illinois/NCSA Open Source License.", "0 0 is_stmt # 0x0000000000000007 23 10 1 0 0", "linking sections') parser.add_argument('-u', '--source-map-url', nargs='?', help='specifies sourceMappingURL section contest') parser.add_argument('--dwarfdump',", "0 else ((-n << 1) + 1) result = \"\"", "map') parser.add_argument('-l', '--load-prefix', nargs='*', help='replace source debug filename prefix for", "if options.w: logger.debug('Saving wasm to %s' % options.w) with open(options.w,", "in self.prefixes: if name.startswith(p['prefix']): if p['replacement'] is None: result =", "- \"sources\" is for names that output to source maps", "exit_code = process.wait() if exit_code != 0: logger.error('Error during llvm-dwarfdump", "name_len, name_pos = read_var_uint(wasm, section_body) name_end = name_pos + name_len", "name for p in self.prefixes: if name.startswith(p['prefix']): if p['replacement'] is", "dead code debug info block. del entries[block_start:cur_entry + 1] cur_entry", "0 0 is_stmt include_directories = {'0': comp_dir} for dir in", "parser.add_argument('-s', '--sources', action='store_true', help='read and embed source files from file", "ISA Discriminator Flags # ------------------ ------ ------ ------ --- -------------", "n | ((b - 128) << shift) b = ord(wasm[pos:pos", "and embed source files from file system into source map')", "n = n >> 7 result.append(n) return bytes(result) def append_source_mapping(wasm,", "127: result.append(128 | (n & 127)) n = n >>", "\"-debug-line\", wasm], stdout=PIPE) output, err = process.communicate() exit_code = process.wait()", "+ r\"\\)\\s+\" + r\"DW_AT_comp_dir\\s+\\(\\\"([^\\\"]+)\", maybe_debug_info_content) comp_dir = comp_dir_match.group(1) if comp_dir_match", "1] = \"/Users/yury/Work/junk/sqlite-playground/src\" # file_names[ 1]: # name: \"playground.c\" #", "pos) section_size, section_body = read_var_uint(wasm, pos_) pos = section_body +", "in sources_map: source_id = len(sources) sources_map[source_name] = source_id sources.append(source_name) if", "# Calculate the LEB encoded function size (including size field)", "help=argparse.SUPPRESS) return parser.parse_args() class Prefixes: def __init__(self, args): prefixes =", "= source_id - last_source_id line_delta = line - last_line column_delta", "= name_pos + name_len name = wasm[name_pos:name_end] if name ==", "as infile: wasm = infile.read() entries = read_dwarf_entries(wasm_input, options) code_section_offset", "section_body = read_var_uint(wasm, pos_) pos = section_body + section_size if", "if p['replacement'] is None: result = name[len(p['prefix'])::] else: result =", "section_content def get_code_section_offset(wasm): logger.debug('Read sections index') pos = 8 while", "function starting address near to 0 (is equal to its", "cur_entry += 1 block_start = cur_entry def read_dwarf_entries(wasm, options): if", "\\\"([^\\\"]*)\\\"\\s+dir_index: (\\d+)\", line_chunk): dir = include_directories[file.group(3)] file_path = (dir +", "def build_sourcemap(entries, code_section_offset, prefixes, collect_sources): sources = [] sources_content =", "collections import OrderedDict, namedtuple import json import logging from math", "= bytearray() while n > 127: result.append(128 | (n &", "+= 7 return n + (b << shift), pos def", "+ fn_size_length # 1 byte is for code section entries", "logger.error('Please specify either --dwarfdump or --dwarfdump-output') sys.exit(1) entries = []", "code_section_offset = get_code_section_offset(wasm) prefixes = SourceMapPrefixes(sources=Prefixes(options.prefix), load=Prefixes(options.load_prefix)) logger.debug('Saving to %s'", "It is a heuristics to ignore data if the #", "# - \"load\" is for paths that used to load", "entry['address']) def build_sourcemap(entries, code_section_offset, prefixes, collect_sources): sources = [] sources_content", "contest') parser.add_argument('--dwarfdump', help=\"path to llvm-dwarfdump executable\") parser.add_argument('--dwarfdump-output', nargs='?', help=argparse.SUPPRESS) return", "= read_dwarf_entries(wasm_input, options) code_section_offset = get_code_section_offset(wasm) prefixes = SourceMapPrefixes(sources=Prefixes(options.prefix), load=Prefixes(options.load_prefix))", "options.source_map_url: wasm = append_source_mapping(wasm, options.source_map_url) if options.w: logger.debug('Saving wasm to", "# dir_index: 1 # mod_time: 0x00000000 # length: 0x00000000 #", "wasm_input = options.wasm with open(wasm_input, 'rb') as infile: wasm =", "mappings.append(encode_vlq(address_delta) + encode_vlq(source_id_delta) + encode_vlq(line_delta) + encode_vlq(column_delta)) last_address = address", "last_address source_id_delta = source_id - last_source_id line_delta = line -", "source_content = infile.read() sources_content.append(source_content) except IOError: print('Failed to read source:", "in the LICENSE file. \"\"\"Utility tools that extracts DWARF information", "1] # include_directories[ 1] = \"/Users/yury/Work/junk/sqlite-playground/src\" # file_names[ 1]: #", "infile: source_content = infile.read() sources_content.append(source_content) except IOError: print('Failed to read", "+ encode_vlq(column_delta)) last_address = address last_source_id = source_id last_line =", "+ name[len(p['prefix'])::] break self.cache[name] = result return result # SourceMapPrefixes", "strip debug sections from a wasm file. \"\"\" import argparse", "import argparse from collections import OrderedDict, namedtuple import json import", "parser = argparse.ArgumentParser(prog='wasm-sourcemap.py', description=__doc__) parser.add_argument('wasm', help='wasm file') parser.add_argument('-o', '--output', help='output", "\"sourceMappingURL\" section_content = encode_uint_var(len(section_name)) + section_name + encode_uint_var(len(url)) + url", "pos + 1 while b >= 128: n = n", "while pos < len(wasm): section_id, pos_ = read_var_uint(wasm, pos) section_size,", "namedtuple import json import logging from math import floor, log", "namedtuple('SourceMapPrefixes', 'sources, load') def encode_vlq(n): VLQ_CHARS = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\" x =", "argparse from collections import OrderedDict, namedtuple import json import logging", "n > 127: result.append(128 | (n & 127)) n =", "entry in entries: line = entry['line'] column = entry['column'] #", "1]) pos = pos + 1 while b >= 128:", "not None} if not entry['eos']: entries.append(entry) else: # move end", "- last_line column_delta = column - last_column mappings.append(encode_vlq(address_delta) + encode_vlq(source_id_delta)", "','.join(mappings))]) def main(): options = parse_args() wasm_input = options.wasm with", "original sources, change files prefixes, and strip debug sections from", "name in self.cache: return self.cache[name] result = name for p", "def encode_vlq(n): VLQ_CHARS = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\" x = (n << 1)", "if not entry['eos']: entries.append(entry) else: # move end of function", "logger = logging.getLogger('wasm-sourcemap') def parse_args(): parser = argparse.ArgumentParser(prog='wasm-sourcemap.py', description=__doc__) parser.add_argument('wasm',", "if not entries[cur_entry]['eos']: cur_entry += 1 continue fn_start = entries[block_start]['address']", "# length: 0x00000000 # # Address Line Column File ISA", "+ encode_vlq(source_id_delta) + encode_vlq(line_delta) + encode_vlq(column_delta)) last_address = address last_source_id", "from math import floor, log import os import re from", "size (including size field) fn_size_length = floor(log(entries[cur_entry]['address'] - fn_start +", "source map. Additionally, it can collect original sources, change files", "# start at least at column 1 if column ==", "least at column 1 if column == 0: column =", "Authors. All rights reserved. # Emscripten is available under two", "= re.split(r\"debug_line\\[(0x[0-9a-f]*)\\]\", asstr(output)) maybe_debug_info_content = debug_line_chunks[0] for i in range(1,", "debug and linking sections') parser.add_argument('-u', '--source-map-url', nargs='?', help='specifies sourceMappingURL section", "name_pos + name_len name = wasm[name_pos:name_end] if name == \"linking\"", "in re.finditer(r\"include_directories\\[\\s*(\\d+)\\] = \\\"([^\\\"]*)\", line_chunk): include_directories[dir.group(1)] = dir.group(2) files =", "SourceMapPrefixes contains resolver for file names that are: # -", "import asstr logger = logging.getLogger('wasm-sourcemap') def parse_args(): parser = argparse.ArgumentParser(prog='wasm-sourcemap.py',", "= SourceMapPrefixes(sources=Prefixes(options.prefix), load=Prefixes(options.load_prefix)) logger.debug('Saving to %s' % options.output) map =", "% options.output) map = build_sourcemap(entries, code_section_offset, prefixes, options.sources) with open(options.output,", "options = parse_args() wasm_input = options.wasm with open(wasm_input, 'rb') as", "+ 1 while b >= 128: n = n |", "= pos + 1 shift += 7 return n +", "file system into source map') parser.add_argument('-l', '--load-prefix', nargs='*', help='replace source", "equal to its size field length). block_start = 0 cur_entry", ">= 0 else ((-n << 1) + 1) result =", "i in range(1, len(debug_line_chunks), 2): stmt_list = debug_line_chunks[i] comp_dir_match =", "re.finditer(r\"include_directories\\[\\s*(\\d+)\\] = \\\"([^\\\"]*)\", line_chunk): include_directories[dir.group(1)] = dir.group(2) files = {}", "logger.error('llvm-dwarfdump not found: ' + options.dwarfdump) sys.exit(1) process = Popen([options.dwarfdump,", "is not None} if not entry['eos']: entries.append(entry) else: # move", "mod_time: 0x00000000 # length: 0x00000000 # # Address Line Column", "entries[-1]['address'] == entry['address']: # last entry has the same address,", "fn_size_length # 1 byte is for code section entries if", "+ 1 min_live_offset = 1 + fn_size_length # 1 byte", "also --sources)', default=[]) parser.add_argument('-w', nargs='?', help='set output wasm file') parser.add_argument('-x',", "a wasm output produced by the LLVM tools, and encodes", "block_start = 0 cur_entry = 0 while cur_entry < len(entries):", "line_delta = line - last_line column_delta = column - last_column", "16), 'line': int(line.group(2)), 'column': int(line.group(3)), 'file': files[line.group(4)], 'eos': line.group(5) is", "is_stmt prologue_end # 0x000000000000000f 23 3 1 0 0 #", "comp_dir_match = re.search(r\"DW_AT_stmt_list\\s+\\(\" + stmt_list + r\"\\)\\s+\" + r\"DW_AT_comp_dir\\s+\\(\\\"([^\\\"]+)\", maybe_debug_info_content)", "= 0 last_source_id = 0 last_line = 1 last_column =", "it as a wasm source map. Additionally, it can collect", "n = n | ((b - 128) << shift) b", "= column - last_column mappings.append(encode_vlq(address_delta) + encode_vlq(source_id_delta) + encode_vlq(line_delta) +", "self.cache[name] result = name for p in self.prefixes: if name.startswith(p['prefix']):", "default=[]) parser.add_argument('-s', '--sources', action='store_true', help='read and embed source files from", "subprocess import Popen, PIPE import sys sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from tools.shared", "source map', default=[]) parser.add_argument('-s', '--sources', action='store_true', help='read and embed source", "31)] x = x >> 5 return result + VLQ_CHARS[x]", "help='wasm file') parser.add_argument('-o', '--output', help='output source map') parser.add_argument('-p', '--prefix', nargs='*',", "are: # - \"sources\" is for names that output to", "# mod_time: 0x00000000 # length: 0x00000000 # # Address Line", "import Popen, PIPE import sys sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from tools.shared import", "= (n << 1) if n >= 0 else ((-n", "these licenses can be # found in the LICENSE file.", "sources), ('sourcesContent', sources_content), ('mappings', ','.join(mappings))]) def main(): options = parse_args()", "comp_dir_match.group(1) if comp_dir_match is not None else \"\" line_chunk =", "line_chunk = debug_line_chunks[i + 1] # include_directories[ 1] = \"/Users/yury/Work/junk/sqlite-playground/src\"", "return wasm + encode_uint_var(0) + encode_uint_var(len(section_content)) + section_content def get_code_section_offset(wasm):", "source map') parser.add_argument('-l', '--load-prefix', nargs='*', help='replace source debug filename prefix", "re.search(r\"DW_AT_stmt_list\\s+\\(\" + stmt_list + r\"\\)\\s+\" + r\"DW_AT_comp_dir\\s+\\(\\\"([^\\\"]+)\", maybe_debug_info_content) comp_dir =", "'--prefix', nargs='*', help='replace source debug filename prefix for source map',", "'=' in p: prefix, replacement = p.split('=') prefixes.append({'prefix': prefix, 'replacement':", "get_code_section_offset(wasm): logger.debug('Read sections index') pos = 8 while pos <", "0 is_stmt include_directories = {'0': comp_dir} for dir in re.finditer(r\"include_directories\\[\\s*(\\d+)\\]", "cur_entry def read_dwarf_entries(wasm, options): if options.dwarfdump_output: output = open(options.dwarfdump_output, 'r').read()", "+ 1] # include_directories[ 1] = \"/Users/yury/Work/junk/sqlite-playground/src\" # file_names[ 1]:", "the # University of Illinois/NCSA Open Source License. Both these", "address_delta = address - last_address source_id_delta = source_id - last_source_id", "pos_) pos = section_body + section_size if section_id == 0:", "0 1 0 0 is_stmt include_directories = {'0': comp_dir} for", "stdout=PIPE) output, err = process.communicate() exit_code = process.wait() if exit_code", "= {'0': comp_dir} for dir in re.finditer(r\"include_directories\\[\\s*(\\d+)\\] = \\\"([^\\\"]*)\", line_chunk):", "None} if not entry['eos']: entries.append(entry) else: # move end of", "tools that extracts DWARF information encoded in a wasm output", "while cur_entry < len(entries): if not entries[cur_entry]['eos']: cur_entry += 1", "logger.debug('Saving wasm to %s' % options.w) with open(options.w, 'wb') as", "read_var_uint(wasm, pos_) pos = section_body + section_size if section_id ==", "is_stmt include_directories = {'0': comp_dir} for dir in re.finditer(r\"include_directories\\[\\s*(\\d+)\\] =", "wasm + encode_uint_var(0) + encode_uint_var(len(section_content)) + section_content def get_code_section_offset(wasm): logger.debug('Read", "0x0000000000000011 28 0 1 0 0 is_stmt include_directories = {'0':", "1 + fn_size_length # 1 byte is for code section", "7 result.append(n) return bytes(result) def append_source_mapping(wasm, url): logger.debug('Append sourceMappingURL section')", "\"\"\" import argparse from collections import OrderedDict, namedtuple import json", "fn_start < min_live_offset: # Remove dead code debug info block.", "asstr(output)) maybe_debug_info_content = debug_line_chunks[0] for i in range(1, len(debug_line_chunks), 2):", "# found in the LICENSE file. \"\"\"Utility tools that extracts", "bytearray() while n > 127: result.append(128 | (n & 127))", "# function starting address near to 0 (is equal to", "entry['address'] -= 1 if entries[-1]['address'] == entry['address']: # last entry", "'sources, load') def encode_vlq(n): VLQ_CHARS = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\" x = (n", "debug sections') pos = 8 stripped = wasm[:pos] while pos", "else \"\" line_chunk = debug_line_chunks[i + 1] # include_directories[ 1]", "cur_entry = 0 while cur_entry < len(entries): if not entries[cur_entry]['eos']:", "= 0 cur_entry = 0 while cur_entry < len(entries): if", "+ encode_vlq(line_delta) + encode_vlq(column_delta)) last_address = address last_source_id = source_id", "+ 1]) pos = pos + 1 while b >=", "for p in args: if '=' in p: prefix, replacement", "section_content = encode_uint_var(len(section_name)) + section_name + encode_uint_var(len(url)) + url return", "def append_source_mapping(wasm, url): logger.debug('Append sourceMappingURL section') section_name = \"sourceMappingURL\" section_content", "return 0 if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG if os.environ.get('EMCC_DEBUG') else", "in self.cache: return self.cache[name] result = name for p in", "%s' % options.output) map = build_sourcemap(entries, code_section_offset, prefixes, options.sources) with", "= read_var_uint(wasm, section_body) name_end = name_pos + name_len name =", "encode_uint_var(len(section_name)) + section_name + encode_uint_var(len(url)) + url return wasm +", "include_directories[dir.group(1)] = dir.group(2) files = {} for file in re.finditer(r\"file_names\\[\\s*(\\d+)\\]:\\s+name:", "entry['column'] # ignore entries with line 0 if line ==", "<< shift) b = ord(wasm[pos:pos + 1]) pos = pos", "names that are: # - \"sources\" is for names that", "include_directories[ 1] = \"/Users/yury/Work/junk/sqlite-playground/src\" # file_names[ 1]: # name: \"playground.c\"", "< min_live_offset: # Remove dead code debug info block. del", "0 1 0 0 is_stmt # 0x0000000000000007 23 10 1", "try: with open(load_name, 'r') as infile: source_content = infile.read() sources_content.append(source_content)", "= wasm[name_pos:name_end] if name == \"linking\" or name == \"sourceMappingURL\"", "' + options.dwarfdump) sys.exit(1) process = Popen([options.dwarfdump, \"-debug-info\", \"-debug-line\", wasm],", "same address, reusing entries[-1]['eos'] = True else: entries.append(entry) remove_dead_entries(entries) #", "section_body) name_end = name_pos + name_len name = wasm[name_pos:name_end] if", "x > 31: result = result + VLQ_CHARS[32 + (x", "int(line.group(3)), 'file': files[line.group(4)], 'eos': line.group(5) is not None} if not", "process.communicate() exit_code = process.wait() if exit_code != 0: logger.error('Error during", "read_dwarf_entries(wasm_input, options) code_section_offset = get_code_section_offset(wasm) prefixes = SourceMapPrefixes(sources=Prefixes(options.prefix), load=Prefixes(options.load_prefix)) logger.debug('Saving", "process.wait() if exit_code != 0: logger.error('Error during llvm-dwarfdump execution (%s)'", "for reading sources from file system (see also --sources)', default=[])", "its size field length). block_start = 0 cur_entry = 0", "size field length). block_start = 0 cur_entry = 0 while", "+= 1 continue fn_start = entries[block_start]['address'] # Calculate the LEB", "((b - 128) << shift) b = ord(wasm[pos:pos + 1])", "section entries if fn_start < min_live_offset: # Remove dead code", "options.dwarfdump: logger.debug('Reading DWARF information from %s' % wasm) if not", "% exit_code) sys.exit(1) else: logger.error('Please specify either --dwarfdump or --dwarfdump-output')", "in entries: line = entry['line'] column = entry['column'] # ignore", "- last_column mappings.append(encode_vlq(address_delta) + encode_vlq(source_id_delta) + encode_vlq(line_delta) + encode_vlq(column_delta)) last_address", "source_id_delta = source_id - last_source_id line_delta = line - last_line", "the LICENSE file. \"\"\"Utility tools that extracts DWARF information encoded", "+ 1] cur_entry = block_start continue cur_entry += 1 block_start", "< len(wasm): section_id, pos_ = read_var_uint(wasm, pos) section_size, pos =", "= len(sources) sources_map[source_name] = source_id sources.append(source_name) if collect_sources: load_name =", "sources_content), ('mappings', ','.join(mappings))]) def main(): options = parse_args() wasm_input =", "source debug filename prefix for source map', default=[]) parser.add_argument('-s', '--sources',", "('mappings', ','.join(mappings))]) def main(): options = parse_args() wasm_input = options.wasm", "the same address, reusing entries[-1]['eos'] = True else: entries.append(entry) remove_dead_entries(entries)", "source_id = sources_map[source_name] address_delta = address - last_address source_id_delta =", "= parse_args() wasm_input = options.wasm with open(wasm_input, 'rb') as infile:", "to load source text SourceMapPrefixes = namedtuple('SourceMapPrefixes', 'sources, load') def", "section_id == 0: name_len, name_pos = read_var_uint(wasm, section_body) name_end =", "infile.read() entries = read_dwarf_entries(wasm_input, options) code_section_offset = get_code_section_offset(wasm) prefixes =", "return n + (b << shift), pos def strip_debug_sections(wasm): logger.debug('Strip", "or name.startswith(\".debug_\"): continue # skip debug related sections stripped =", "--dwarfdump or --dwarfdump-output') sys.exit(1) entries = [] debug_line_chunks = re.split(r\"debug_line\\[(0x[0-9a-f]*)\\]\",", "entries[-1]['eos'] = True else: entries.append(entry) remove_dead_entries(entries) # return entries sorted", "= [] for p in args: if '=' in p:", ">> 5 return result + VLQ_CHARS[x] def read_var_uint(wasm, pos): n", "= 0 b = ord(wasm[pos:pos + 1]) pos = pos", "outfile: outfile.write(wasm) logger.debug('Done') return 0 if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG", "# 0x0000000000000007 23 10 1 0 0 is_stmt prologue_end #", "= debug_line_chunks[0] for i in range(1, len(debug_line_chunks), 2): stmt_list =", "entries sorted by the address field return sorted(entries, key=lambda entry:", "def encode_uint_var(n): result = bytearray() while n > 127: result.append(128", "= \"\" while x > 31: result = result +", "build_sourcemap(entries, code_section_offset, prefixes, collect_sources): sources = [] sources_content = []", "at least at column 1 if column == 0: column", "source_id sources.append(source_name) if collect_sources: load_name = prefixes.load.resolve(file_name) try: with open(load_name,", "available under two separate licenses, the MIT license and the", "wasm output produced by the LLVM tools, and encodes it", "(see also --sources)', default=[]) parser.add_argument('-w', nargs='?', help='set output wasm file')", "in args: if '=' in p: prefix, replacement = p.split('=')", "stripped + wasm[section_start:pos] return stripped def encode_uint_var(n): result = bytearray()", "= \\\"([^\\\"]*)\", line_chunk): include_directories[dir.group(1)] = dir.group(2) files = {} for", "wasm = strip_debug_sections(wasm) if options.source_map_url: wasm = append_source_mapping(wasm, options.source_map_url) if", "LLVM tools, and encodes it as a wasm source map.", "is available under two separate licenses, the MIT license and", "entries = [] debug_line_chunks = re.split(r\"debug_line\\[(0x[0-9a-f]*)\\]\", asstr(output)) maybe_debug_info_content = debug_line_chunks[0]", "= sources_map[source_name] address_delta = address - last_address source_id_delta = source_id", "= ord(wasm[pos:pos + 1]) pos = pos + 1 shift", ">> 7 result.append(n) return bytes(result) def append_source_mapping(wasm, url): logger.debug('Append sourceMappingURL", "output = open(options.dwarfdump_output, 'r').read() elif options.dwarfdump: logger.debug('Reading DWARF information from", "wasm[:pos] while pos < len(wasm): section_start = pos section_id, pos_", "+ r\"DW_AT_comp_dir\\s+\\(\\\"([^\\\"]+)\", maybe_debug_info_content) comp_dir = comp_dir_match.group(1) if comp_dir_match is not", "if entries[-1]['address'] == entry['address']: # last entry has the same", "at column 1 if column == 0: column = 1", "------ ------ ------ --- ------------- ------------- # 0x0000000000000006 22 0", "outfile.write(wasm) logger.debug('Done') return 0 if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG if", "= column return OrderedDict([('version', 3), ('names', []), ('sources', sources), ('sourcesContent',", "line = entry['line'] column = entry['column'] # ignore entries with", "err = process.communicate() exit_code = process.wait() if exit_code != 0:", "OrderedDict, namedtuple import json import logging from math import floor,", "MIT license and the # University of Illinois/NCSA Open Source", "'--load-prefix', nargs='*', help='replace source debug filename prefix for reading sources", "the last END operator entry['address'] -= 1 if entries[-1]['address'] ==", "options.sources) with open(options.output, 'w') as outfile: json.dump(map, outfile, separators=(',', ':'))", "cur_entry = block_start continue cur_entry += 1 block_start = cur_entry", "return pos pos = pos + section_size def remove_dead_entries(entries): #", "sources_map = {} last_address = 0 last_source_id = 0 last_line", "1) result = \"\" while x > 31: result =", "section_name + encode_uint_var(len(url)) + url return wasm + encode_uint_var(0) +", "(%s)' % exit_code) sys.exit(1) else: logger.error('Please specify either --dwarfdump or", "re.finditer(r\"file_names\\[\\s*(\\d+)\\]:\\s+name: \\\"([^\\\"]*)\\\"\\s+dir_index: (\\d+)\", line_chunk): dir = include_directories[file.group(3)] file_path = (dir", "Remove dead code debug info block. del entries[block_start:cur_entry + 1]", "help='set output wasm file') parser.add_argument('-x', '--strip', action='store_true', help='removes debug and", "remove_dead_entries(entries): # Remove entries for dead functions. It is a", "options.wasm with open(wasm_input, 'rb') as infile: wasm = infile.read() entries", "parse_args(): parser = argparse.ArgumentParser(prog='wasm-sourcemap.py', description=__doc__) parser.add_argument('wasm', help='wasm file') parser.add_argument('-o', '--output',", "3 1 0 0 end_sequence # 0x0000000000000011 28 0 1", "= 1 address = entry['address'] + code_section_offset file_name = entry['file']", "encodes it as a wasm source map. Additionally, it can", "if name in self.cache: return self.cache[name] result = name for", "the # function starting address near to 0 (is equal", "Line Column File ISA Discriminator Flags # ------------------ ------ ------", "# Emscripten is available under two separate licenses, the MIT", "# SourceMapPrefixes contains resolver for file names that are: #", "information encoded in a wasm output produced by the LLVM", "encode_vlq(n): VLQ_CHARS = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\" x = (n << 1) if", "nargs='?', help=argparse.SUPPRESS) return parser.parse_args() class Prefixes: def __init__(self, args): prefixes", "shift += 7 return n + (b << shift), pos", "section_id, pos_ = read_var_uint(wasm, pos) section_size, section_body = read_var_uint(wasm, pos_)", "= n >> 7 result.append(n) return bytes(result) def append_source_mapping(wasm, url):", "pos_) if section_id == 10: return pos pos = pos", "is a heuristics to ignore data if the # function", "= namedtuple('SourceMapPrefixes', 'sources, load') def encode_vlq(n): VLQ_CHARS = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\" x", "to ignore data if the # function starting address near", "= line - last_line column_delta = column - last_column mappings.append(encode_vlq(address_delta)", "!= '/' else '') + file.group(2) files[file.group(1)] = file_path for" ]